diff options
699 files changed, 17081 insertions, 15368 deletions
diff --git a/.clippy.toml b/.clippy.toml index 815c94732ed7..137f41d203de 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -7,5 +7,5 @@ check-private-items = true disallowed-macros = [ # The `clippy::dbg_macro` lint only works with `std::dbg!`, thus we simulate # it here, see: https://github.com/rust-lang/rust-clippy/issues/11303. - { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool" }, + { path = "kernel::dbg", reason = "the `dbg!` macro is intended as a debugging tool", allow-invalid = true }, ] @@ -447,6 +447,8 @@ Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net> Luca Weiss <luca@lucaweiss.eu> <luca@z3ntu.xyz> Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com> Luo Jie <quic_luoj@quicinc.com> <luoj@codeaurora.org> +Lance Yang <lance.yang@linux.dev> <ioworker0@gmail.com> +Lance Yang <lance.yang@linux.dev> <mingzhe.yang@ly.com> Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org> Maharaja Kennadyrajan <quic_mkenna@quicinc.com> <mkenna@codeaurora.org> @@ -483,6 +485,7 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu> Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net> Matthieu CASTET <castet.matthieu@free.fr> Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com> +Mattijs Korpershoek <mkorpershoek@kernel.org> <mkorpershoek@baylibre.com> Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com> Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting> Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com> @@ -749,6 +752,7 @@ Tvrtko Ursulin <tursulin@ursulin.net> <tvrtko@ursulin.net> Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws> Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com> Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de> +Uwe Kleine-König <u.kleine-koenig@baylibre.com> <ukleinek@baylibre.com> Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Uwe Kleine-König <ukleinek@strlen.de> Uwe Kleine-König <ukl@pengutronix.de> diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon index 9bce281314df..cb207c79680d 100644 --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon @@ -111,7 +111,7 @@ Description: RO. Package current voltage in millivolt. What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp2_input Date: March 2025 -KernelVersion: 6.14 +KernelVersion: 6.15 Contact: intel-xe@lists.freedesktop.org Description: RO. Package temperature in millidegree Celsius. @@ -119,7 +119,7 @@ Description: RO. Package temperature in millidegree Celsius. What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon<i>/temp3_input Date: March 2025 -KernelVersion: 6.14 +KernelVersion: 6.15 Contact: intel-xe@lists.freedesktop.org Description: RO. VRAM temperature in millidegree Celsius. diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst index d0502691dfa1..240fee618e06 100644 --- a/Documentation/admin-guide/thunderbolt.rst +++ b/Documentation/admin-guide/thunderbolt.rst @@ -296,6 +296,39 @@ information is missing. To recover from this mode, one needs to flash a valid NVM image to the host controller in the same way it is done in the previous chapter. +Tunneling events +---------------- +The driver sends ``KOBJ_CHANGE`` events to userspace when there is a +tunneling change in the ``thunderbolt_domain``. The notification carries +following environment variables:: + + TUNNEL_EVENT=<EVENT> + TUNNEL_DETAILS=0:12 <-> 1:20 (USB3) + +Possible values for ``<EVENT>`` are: + + activated + The tunnel was activated (created). + + changed + There is a change in this tunnel. For example bandwidth allocation was + changed. + + deactivated + The tunnel was torn down. + + low bandwidth + The tunnel is not getting optimal bandwidth. + + insufficient bandwidth + There is not enough bandwidth for the current tunnel requirements. + +The ``TUNNEL_DETAILS`` is only provided if the tunnel is known. For +example, in case of Firmware Connection Manager this is missing or does +not provide full tunnel information. In case of Software Connection Manager +this includes full tunnel details. The format currently matches what the +driver uses when logging. This may change over time. + Networking over Thunderbolt cable --------------------------------- Thunderbolt technology allows software communication between two hosts diff --git a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml index 517a4ac1bea3..e365413732e7 100644 --- a/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml +++ b/Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Mediatek's Keypad Controller maintainers: - - Mattijs Korpershoek <mkorpershoek@baylibre.com> + - Mattijs Korpershoek <mkorpershoek@kernel.org> allOf: - $ref: /schemas/input/matrix-keymap.yaml# diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index 45819b235800..a2d4c626f659 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -74,19 +74,17 @@ properties: - rev-rmii - moca - # RX and TX delays are added by the MAC when required + # RX and TX delays are provided by the PCB. See below - rgmii - # RGMII with internal RX and TX delays provided by the PHY, - # the MAC should not add the RX or TX delays in this case + # RX and TX delays are not provided by the PCB. This is the most + # frequent case. See below - rgmii-id - # RGMII with internal RX delay provided by the PHY, the MAC - # should not add an RX delay in this case + # TX delay is provided by the PCB. See below - rgmii-rxid - # RGMII with internal TX delay provided by the PHY, the MAC - # should not add an TX delay in this case + # RX delay is provided by the PCB. See below - rgmii-txid - rtbi - smii @@ -286,4 +284,89 @@ allOf: additionalProperties: true +# Informative +# =========== +# +# 'phy-modes' & 'phy-connection-type' properties 'rgmii', 'rgmii-id', +# 'rgmii-rxid', and 'rgmii-txid' are frequently used wrongly by +# developers. This informative section clarifies their usage. +# +# The RGMII specification requires a 2ns delay between the data and +# clock signals on the RGMII bus. How this delay is implemented is not +# specified. +# +# One option is to make the clock traces on the PCB longer than the +# data traces. A sufficiently difference in length can provide the 2ns +# delay. If both the RX and TX delays are implemented in this manner, +# 'rgmii' should be used, so indicating the PCB adds the delays. +# +# If the PCB does not add these delays via extra long traces, +# 'rgmii-id' should be used. Here, 'id' refers to 'internal delay', +# where either the MAC or PHY adds the delay. +# +# If only one of the two delays are implemented via extra long clock +# lines, either 'rgmii-rxid' or 'rgmii-txid' should be used, +# indicating the MAC or PHY should implement one of the delays +# internally, while the PCB implements the other delay. +# +# Device Tree describes hardware, and in this case, it describes the +# PCB between the MAC and the PHY, if the PCB implements delays or +# not. +# +# In practice, very few PCBs make use of extra long clock lines. Hence +# any RGMII phy mode other than 'rgmii-id' is probably wrong, and is +# unlikely to be accepted during review without details provided in +# the commit description and comments in the .dts file. +# +# When the PCB does not implement the delays, the MAC or PHY must. As +# such, this is software configuration, and so not described in Device +# Tree. +# +# The following describes how Linux implements the configuration of +# the MAC and PHY to add these delays when the PCB does not. As stated +# above, developers often get this wrong, and the aim of this section +# is reduce the frequency of these errors by Linux developers. Other +# users of the Device Tree may implement it differently, and still be +# consistent with both the normative and informative description +# above. +# +# By default in Linux, when using phylib/phylink, the MAC is expected +# to read the 'phy-mode' from Device Tree, not implement any delays, +# and pass the value to the PHY. The PHY will then implement delays as +# specified by the 'phy-mode'. The PHY should always be reconfigured +# to implement the needed delays, replacing any setting performed by +# strapping or the bootloader, etc. +# +# Experience to date is that all PHYs which implement RGMII also +# implement the ability to add or not add the needed delays. Hence +# this default is expected to work in all cases. Ignoring this default +# is likely to be questioned by Reviews, and require a strong argument +# to be accepted. +# +# There are a small number of cases where the MAC has hard coded +# delays which cannot be disabled. The 'phy-mode' only describes the +# PCB. The inability to disable the delays in the MAC does not change +# the meaning of 'phy-mode'. It does however mean that a 'phy-mode' of +# 'rgmii' is now invalid, it cannot be supported, since both the PCB +# and the MAC and PHY adding delays cannot result in a functional +# link. Thus the MAC should report a fatal error for any modes which +# cannot be supported. When the MAC implements the delay, it must +# ensure that the PHY does not also implement the same delay. So it +# must modify the phy-mode it passes to the PHY, removing the delay it +# has added. Failure to remove the delay will result in a +# non-functioning link. +# +# Sometimes there is a need to fine tune the delays. Often the MAC or +# PHY can perform this fine tuning. In the MAC node, the Device Tree +# properties 'rx-internal-delay-ps' and 'tx-internal-delay-ps' should +# be used to indicate fine tuning performed by the MAC. The values +# expected here are small. A value of 2000ps, i.e 2ns, and a phy-mode +# of 'rgmii' will not be accepted by Reviewers. +# +# If the PHY is to perform fine tuning, the properties +# 'rx-internal-delay-ps' and 'tx-internal-delay-ps' in the PHY node +# should be used. When the PHY is implementing delays, e.g. 'rgmii-id' +# these properties should have a value near to 2000ps. If the PCB is +# implementing delays, e.g. 'rgmii', a small value can be used to fine +# tune the delay added by the PCB. ... diff --git a/Documentation/devicetree/bindings/net/wireless/realtek,rtl8188e.yaml b/Documentation/devicetree/bindings/net/wireless/realtek,rtl8188e.yaml new file mode 100644 index 000000000000..2769731e0708 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/realtek,rtl8188e.yaml @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/realtek,rtl8188e.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek RTL8188E USB WiFi + +maintainers: + - J. Neuschäfer <j.ne@posteo.net> + +description: + Realtek RTL8188E is a family of USB-connected 2.4 GHz WiFi modules. + +allOf: + - $ref: /schemas/usb/usb-device.yaml# + +properties: + compatible: + const: usbbda,179 # RTL8188ETV + + reg: true + + vdd-supply: + description: + Regulator for the 3V3 supply. + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + usb { + #address-cells = <1>; + #size-cells = <0>; + + wifi: wifi@1 { + compatible = "usbbda,179"; + reg = <1>; + vdd-supply = <&vcc3v3>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml index b9e33a7429b0..4e208cb7f6c6 100644 --- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml @@ -190,4 +190,19 @@ examples: sound-dai = <&vamacro 0>; }; }; + + usb-dai-link { + link-name = "USB Playback"; + cpu { + sound-dai = <&q6afedai USB_RX>; + }; + + codec { + sound-dai = <&usbdai USB_RX>; + }; + + platform { + sound-dai = <&q6routing>; + }; + }; }; diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml index bccd00a1ddd0..53d00ca643b3 100644 --- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml +++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml @@ -56,19 +56,18 @@ properties: enum: - snps,dw-apb-ssi - snps,dwc-ssi-1.01a - - description: Microsemi Ocelot/Jaguar2 SoC SPI Controller - items: - - enum: - - mscc,ocelot-spi - - mscc,jaguar2-spi - - const: snps,dw-apb-ssi - description: Microchip Sparx5 SoC SPI Controller const: microchip,sparx5-spi - description: Amazon Alpine SPI Controller const: amazon,alpine-dw-apb-ssi - - description: Renesas RZ/N1 SPI Controller + - description: Vendor controllers which use snps,dw-apb-ssi as fallback items: - - const: renesas,rzn1-spi + - enum: + - mscc,ocelot-spi + - mscc,jaguar2-spi + - renesas,rzn1-spi + - sophgo,sg2042-spi + - thead,th1520-spi - const: snps,dw-apb-ssi - description: Intel Keem Bay SPI Controller const: intel,keembay-ssi @@ -88,10 +87,6 @@ properties: - renesas,r9a06g032-spi # RZ/N1D - renesas,r9a06g033-spi # RZ/N1S - const: renesas,rzn1-spi # RZ/N1 - - description: T-HEAD TH1520 SoC SPI Controller - items: - - const: thead,th1520-spi - - const: snps,dw-apb-ssi reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/usb/chipidea,usb2-common.yaml b/Documentation/devicetree/bindings/usb/chipidea,usb2-common.yaml index d2a7d2ecf48a..10020af15afc 100644 --- a/Documentation/devicetree/bindings/usb/chipidea,usb2-common.yaml +++ b/Documentation/devicetree/bindings/usb/chipidea,usb2-common.yaml @@ -42,6 +42,9 @@ properties: phy_type: true + iommus: + maxItems: 1 + itc-setting: description: interrupt threshold control register control, the setting should be diff --git a/Documentation/devicetree/bindings/usb/chipidea,usb2-imx.yaml b/Documentation/devicetree/bindings/usb/chipidea,usb2-imx.yaml index 8f6136f5d72e..51014955ab3c 100644 --- a/Documentation/devicetree/bindings/usb/chipidea,usb2-imx.yaml +++ b/Documentation/devicetree/bindings/usb/chipidea,usb2-imx.yaml @@ -41,6 +41,7 @@ properties: - fsl,imx8mm-usb - fsl,imx8mn-usb - fsl,imx93-usb + - fsl,imx95-usb - const: fsl,imx7d-usb - const: fsl,imx27-usb - items: @@ -54,7 +55,11 @@ properties: maxItems: 1 interrupts: - maxItems: 1 + minItems: 1 + items: + - description: USB controller interrupt or combine USB controller + and wakeup interrupts. + - description: Wakeup interrupt clocks: minItems: 1 @@ -191,6 +196,7 @@ allOf: contains: enum: - fsl,imx93-usb + - fsl,imx95-usb then: properties: clocks: @@ -238,6 +244,22 @@ allOf: maxItems: 1 clock-names: false + # imx95 soc use two interrupts + - if: + properties: + compatible: + contains: + enum: + - fsl,imx95-usb + then: + properties: + interrupts: + minItems: 2 + else: + properties: + interrupts: + maxItems: 1 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml index 0a6e7ac1b37e..019435540df0 100644 --- a/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml +++ b/Documentation/devicetree/bindings/usb/fsl,usbmisc.yaml @@ -34,6 +34,7 @@ properties: - fsl,imx8mm-usbmisc - fsl,imx8mn-usbmisc - fsl,imx8ulp-usbmisc + - fsl,imx95-usbmisc - const: fsl,imx7d-usbmisc - const: fsl,imx6q-usbmisc - items: @@ -45,7 +46,10 @@ properties: maxItems: 1 reg: - maxItems: 1 + minItems: 1 + items: + - description: Base and length of the Wrapper module register + - description: Base and length of the HSIO Block Control register '#index-cells': const: 1 @@ -56,6 +60,23 @@ required: - compatible - reg +allOf: + # imx95 soc needs use HSIO Block Control + - if: + properties: + compatible: + contains: + enum: + - fsl,imx95-usbmisc + then: + properties: + reg: + minItems: 2 + else: + properties: + reg: + maxItems: 1 + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml index 223f2abd5e59..508d958e698c 100644 --- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml +++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml @@ -86,6 +86,7 @@ properties: - nuvoton,npcm845-ehci - ti,ehci-omap - usb-ehci + - via,vt8500-ehci reg: minItems: 1 diff --git a/Documentation/devicetree/bindings/usb/parade,ps5511.yaml b/Documentation/devicetree/bindings/usb/parade,ps5511.yaml new file mode 100644 index 000000000000..10d002f09db8 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/parade,ps5511.yaml @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/parade,ps5511.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Parade PS5511 4+1 Port USB 3.2 Gen 1 Hub Controller + +maintainers: + - Pin-yen Lin <treapking@chromium.org> + +properties: + compatible: + enum: + - usb1da0,5511 + - usb1da0,55a1 + + reset-gpios: + items: + - description: GPIO specifier for RESETB pin. + + vddd11-supply: + description: + 1V1 power supply to the hub + + vdd33-supply: + description: + 3V3 power supply to the hub + + peer-hub: true + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + patternProperties: + '^port@': + $ref: /schemas/graph.yaml#/properties/port + + properties: + reg: + minimum: 1 + maximum: 5 + +additionalProperties: + properties: + reg: + minimum: 1 + maximum: 5 + +required: + - peer-hub + +allOf: + - $ref: usb-hub.yaml# + - if: + not: + properties: + compatible: + enum: + - usb1da0,55a1 + then: + properties: + ports: + properties: + port@5: false + + patternProperties: + '^.*@5$': false + +examples: + - | + usb { + #address-cells = <1>; + #size-cells = <0>; + + /* 2.0 hub on port 1 */ + hub_2_0: hub@1 { + compatible = "usb1da0,55a1"; + reg = <1>; + peer-hub = <&hub_3_0>; + #address-cells = <1>; + #size-cells = <0>; + /* USB 2.0 device on port 5 */ + device@5 { + reg = <5>; + compatible = "usb123,4567"; + }; + }; + + /* 3.0 hub on port 2 */ + hub_3_0: hub@2 { + compatible = "usb1da0,5511"; + reg = <2>; + peer-hub = <&hub_2_0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + /* Type-A connector on port 3 */ + port@3 { + reg = <3>; + endpoint { + remote-endpoint = <&usb_a0_ss>; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml index 935d57f5d26f..aeb33667818e 100644 --- a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml +++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml @@ -11,8 +11,11 @@ maintainers: properties: compatible: - enum: - - parade,ps8830 + oneOf: + - items: + - const: parade,ps8833 + - const: parade,ps8830 + - const: parade,ps8830 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml index 64137c1619a6..a792434c59db 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml @@ -4,11 +4,22 @@ $id: http://devicetree.org/schemas/usb/qcom,dwc3.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Qualcomm SuperSpeed DWC3 USB SoC controller +title: Legacy Qualcomm SuperSpeed DWC3 USB SoC controller maintainers: - Wesley Cheng <quic_wcheng@quicinc.com> +# Use the combined qcom,snps-dwc3 instead +deprecated: true + +select: + properties: + compatible: + contains: + const: qcom,dwc3 + required: + - compatible + properties: compatible: items: @@ -55,6 +66,7 @@ properties: - qcom,sm8450-dwc3 - qcom,sm8550-dwc3 - qcom,sm8650-dwc3 + - qcom,sm8750-dwc3 - qcom,x1e80100-dwc3 - qcom,x1e80100-dwc3-mp - const: qcom,dwc3 @@ -354,6 +366,7 @@ allOf: - qcom,sm8450-dwc3 - qcom,sm8550-dwc3 - qcom,sm8650-dwc3 + - qcom,sm8750-dwc3 then: properties: clocks: @@ -497,6 +510,7 @@ allOf: - qcom,sm8450-dwc3 - qcom,sm8550-dwc3 - qcom,sm8650-dwc3 + - qcom,sm8750-dwc3 then: properties: interrupts: diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml new file mode 100644 index 000000000000..8dac5eba61b4 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml @@ -0,0 +1,622 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/qcom,snps-dwc3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm SuperSpeed DWC3 USB SoC controller + +maintainers: + - Wesley Cheng <quic_wcheng@quicinc.com> + +description: + Describes the Qualcomm USB block, based on Synopsys DWC3. + +select: + properties: + compatible: + contains: + const: qcom,snps-dwc3 + required: + - compatible + +properties: + compatible: + items: + - enum: + - qcom,ipq4019-dwc3 + - qcom,ipq5018-dwc3 + - qcom,ipq5332-dwc3 + - qcom,ipq5424-dwc3 + - qcom,ipq6018-dwc3 + - qcom,ipq8064-dwc3 + - qcom,ipq8074-dwc3 + - qcom,ipq9574-dwc3 + - qcom,msm8953-dwc3 + - qcom,msm8994-dwc3 + - qcom,msm8996-dwc3 + - qcom,msm8998-dwc3 + - qcom,qcm2290-dwc3 + - qcom,qcs404-dwc3 + - qcom,qcs615-dwc3 + - qcom,qcs8300-dwc3 + - qcom,qdu1000-dwc3 + - qcom,sa8775p-dwc3 + - qcom,sar2130p-dwc3 + - qcom,sc7180-dwc3 + - qcom,sc7280-dwc3 + - qcom,sc8180x-dwc3 + - qcom,sc8180x-dwc3-mp + - qcom,sc8280xp-dwc3 + - qcom,sc8280xp-dwc3-mp + - qcom,sdm660-dwc3 + - qcom,sdm670-dwc3 + - qcom,sdm845-dwc3 + - qcom,sdx55-dwc3 + - qcom,sdx65-dwc3 + - qcom,sdx75-dwc3 + - qcom,sm4250-dwc3 + - qcom,sm6115-dwc3 + - qcom,sm6125-dwc3 + - qcom,sm6350-dwc3 + - qcom,sm6375-dwc3 + - qcom,sm8150-dwc3 + - qcom,sm8250-dwc3 + - qcom,sm8350-dwc3 + - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 + - qcom,sm8650-dwc3 + - qcom,x1e80100-dwc3 + - const: qcom,snps-dwc3 + + reg: + maxItems: 1 + + power-domains: + maxItems: 1 + + required-opps: + maxItems: 1 + + clocks: + description: | + Several clocks are used, depending on the variant. Typical ones are:: + - cfg_noc:: System Config NOC clock. + - core:: Master/Core clock, has to be >= 125 MHz for SS operation and >= + 60MHz for HS operation. + - iface:: System bus AXI clock. + - sleep:: Sleep clock, used for wakeup when USB3 core goes into low + power mode (U3). + - mock_utmi:: Mock utmi clock needed for ITP/SOF generation in host + mode. Its frequency should be 19.2MHz. + minItems: 1 + maxItems: 9 + + clock-names: + minItems: 1 + maxItems: 9 + + dma-coherent: true + + iommus: + maxItems: 1 + + resets: + maxItems: 1 + + interconnects: + maxItems: 2 + + interconnect-names: + items: + - const: usb-ddr + - const: apps-usb + + interrupts: + description: | + Different types of interrupts are used based on HS PHY used on target: + - dwc_usb3: Core DWC3 interrupt + - pwr_event: Used for wakeup based on other power events. + - hs_phy_irq: Apart from DP/DM/QUSB2 PHY interrupts, there is + hs_phy_irq which is not triggered by default and its + functionality is mutually exclusive to that of + {dp/dm}_hs_phy_irq and qusb2_phy_irq. + - qusb2_phy: SoCs with QUSB2 PHY do not have separate DP/DM IRQs and + expose only a single IRQ whose behavior can be modified + by the QUSB2PHY_INTR_CTRL register. The required DPSE/ + DMSE configuration is done in QUSB2PHY_INTR_CTRL register + of PHY address space. + - {dp/dm}_hs_phy_irq: These IRQ's directly reflect changes on the DP/ + DM pads of the SoC. These are used for wakeup + only on SoCs with non-QUSB2 targets with + exception of SDM670/SDM845/SM6350. + - ss_phy_irq: Used for remote wakeup in Super Speed mode of operation. + minItems: 3 + maxItems: 19 + + interrupt-names: + minItems: 3 + maxItems: 19 + + qcom,select-utmi-as-pipe-clk: + description: + If present, disable USB3 pipe_clk requirement. + Used when dwc3 operates without SSPHY and only + HS/FS/LS modes are supported. + type: boolean + + wakeup-source: true + +# Required child node: + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - interrupt-names + +allOf: + - $ref: snps,dwc3-common.yaml# + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq4019-dwc3 + - qcom,ipq5332-dwc3 + then: + properties: + clocks: + maxItems: 3 + clock-names: + items: + - const: core + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq8064-dwc3 + then: + properties: + clocks: + items: + - description: Master/Core clock, has to be >= 125 MHz + for SS operation and >= 60MHz for HS operation. + clock-names: + items: + - const: core + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq9574-dwc3 + - qcom,msm8953-dwc3 + - qcom,msm8996-dwc3 + - qcom,msm8998-dwc3 + - qcom,qcs8300-dwc3 + - qcom,sa8775p-dwc3 + - qcom,sc7180-dwc3 + - qcom,sc7280-dwc3 + - qcom,sdm670-dwc3 + - qcom,sdm845-dwc3 + - qcom,sdx55-dwc3 + - qcom,sdx65-dwc3 + - qcom,sdx75-dwc3 + - qcom,sm6350-dwc3 + then: + properties: + clocks: + maxItems: 5 + clock-names: + items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq6018-dwc3 + then: + properties: + clocks: + minItems: 3 + maxItems: 4 + clock-names: + oneOf: + - items: + - const: core + - const: sleep + - const: mock_utmi + - items: + - const: cfg_noc + - const: core + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq8074-dwc3 + - qcom,qdu1000-dwc3 + then: + properties: + clocks: + maxItems: 4 + clock-names: + items: + - const: cfg_noc + - const: core + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq5018-dwc3 + - qcom,msm8994-dwc3 + - qcom,qcs404-dwc3 + then: + properties: + clocks: + maxItems: 4 + clock-names: + items: + - const: core + - const: iface + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,sc8280xp-dwc3 + - qcom,sc8280xp-dwc3-mp + - qcom,x1e80100-dwc3 + - qcom,x1e80100-dwc3-mp + then: + properties: + clocks: + maxItems: 9 + clock-names: + items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + - const: noc_aggr + - const: noc_aggr_north + - const: noc_aggr_south + - const: noc_sys + + - if: + properties: + compatible: + contains: + enum: + - qcom,sdm660-dwc3 + then: + properties: + clocks: + minItems: 4 + maxItems: 5 + clock-names: + oneOf: + - items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + - items: + - const: cfg_noc + - const: core + - const: sleep + - const: mock_utmi + + - if: + properties: + compatible: + contains: + enum: + - qcom,qcm2290-dwc3 + - qcom,qcs615-dwc3 + - qcom,sar2130p-dwc3 + - qcom,sc8180x-dwc3 + - qcom,sc8180x-dwc3-mp + - qcom,sm6115-dwc3 + - qcom,sm6125-dwc3 + - qcom,sm8150-dwc3 + - qcom,sm8250-dwc3 + - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 + - qcom,sm8650-dwc3 + then: + properties: + clocks: + minItems: 6 + clock-names: + items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + - const: xo + + - if: + properties: + compatible: + contains: + enum: + - qcom,sm8350-dwc3 + then: + properties: + clocks: + minItems: 5 + maxItems: 6 + clock-names: + minItems: 5 + items: + - const: cfg_noc + - const: core + - const: iface + - const: sleep + - const: mock_utmi + - const: xo + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq5018-dwc3 + - qcom,ipq6018-dwc3 + - qcom,ipq8074-dwc3 + - qcom,msm8953-dwc3 + - qcom,msm8998-dwc3 + then: + properties: + interrupts: + minItems: 3 + maxItems: 4 + interrupt-names: + minItems: 3 + items: + - const: dwc_usb3 + - const: pwr_event + - const: qusb2_phy + - const: ss_phy_irq + + - if: + properties: + compatible: + contains: + enum: + - qcom,msm8996-dwc3 + - qcom,qcs404-dwc3 + - qcom,sdm660-dwc3 + - qcom,sm6115-dwc3 + - qcom,sm6125-dwc3 + then: + properties: + interrupts: + minItems: 4 + maxItems: 5 + interrupt-names: + minItems: 4 + items: + - const: dwc_usb3 + - const: pwr_event + - const: qusb2_phy + - const: hs_phy_irq + - const: ss_phy_irq + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq5332-dwc3 + then: + properties: + interrupts: + maxItems: 4 + interrupt-names: + items: + - const: dwc_usb3 + - const: pwr_event + - const: dp_hs_phy_irq + - const: dm_hs_phy_irq + + - if: + properties: + compatible: + contains: + enum: + - qcom,x1e80100-dwc3 + then: + properties: + interrupts: + maxItems: 5 + interrupt-names: + items: + - const: dwc_usb3 + - const: pwr_event + - const: dp_hs_phy_irq + - const: dm_hs_phy_irq + - const: ss_phy_irq + + - if: + properties: + compatible: + contains: + enum: + - qcom,ipq4019-dwc3 + - qcom,ipq8064-dwc3 + - qcom,msm8994-dwc3 + - qcom,qcs615-dwc3 + - qcom,qcs8300-dwc3 + - qcom,qdu1000-dwc3 + - qcom,sa8775p-dwc3 + - qcom,sc7180-dwc3 + - qcom,sc7280-dwc3 + - qcom,sc8180x-dwc3 + - qcom,sc8280xp-dwc3 + - qcom,sdm670-dwc3 + - qcom,sdm845-dwc3 + - qcom,sdx55-dwc3 + - qcom,sdx65-dwc3 + - qcom,sdx75-dwc3 + - qcom,sm4250-dwc3 + - qcom,sm6350-dwc3 + - qcom,sm8150-dwc3 + - qcom,sm8250-dwc3 + - qcom,sm8350-dwc3 + - qcom,sm8450-dwc3 + - qcom,sm8550-dwc3 + - qcom,sm8650-dwc3 + then: + properties: + interrupts: + minItems: 5 + maxItems: 6 + interrupt-names: + minItems: 5 + items: + - const: dwc_usb3 + - const: pwr_event + - const: hs_phy_irq + - const: dp_hs_phy_irq + - const: dm_hs_phy_irq + - const: ss_phy_irq + + - if: + properties: + compatible: + contains: + enum: + - qcom,sc8180x-dwc3-mp + - qcom,x1e80100-dwc3-mp + then: + properties: + interrupts: + minItems: 11 + maxItems: 11 + interrupt-names: + items: + - const: dwc_usb3 + - const: pwr_event_1 + - const: pwr_event_2 + - const: hs_phy_1 + - const: hs_phy_2 + - const: dp_hs_phy_1 + - const: dm_hs_phy_1 + - const: dp_hs_phy_2 + - const: dm_hs_phy_2 + - const: ss_phy_1 + - const: ss_phy_2 + + - if: + properties: + compatible: + contains: + enum: + - qcom,sc8280xp-dwc3-mp + then: + properties: + interrupts: + minItems: 19 + maxItems: 19 + interrupt-names: + items: + - const: dwc_usb3 + - const: pwr_event_1 + - const: pwr_event_2 + - const: pwr_event_3 + - const: pwr_event_4 + - const: hs_phy_1 + - const: hs_phy_2 + - const: hs_phy_3 + - const: hs_phy_4 + - const: dp_hs_phy_1 + - const: dm_hs_phy_1 + - const: dp_hs_phy_2 + - const: dm_hs_phy_2 + - const: dp_hs_phy_3 + - const: dm_hs_phy_3 + - const: dp_hs_phy_4 + - const: dm_hs_phy_4 + - const: ss_phy_1 + - const: ss_phy_2 + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/clock/qcom,gcc-sdm845.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + soc { + #address-cells = <2>; + #size-cells = <2>; + + usb@a600000 { + compatible = "qcom,sdm845-dwc3", "qcom,snps-dwc3"; + reg = <0 0x0a600000 0 0x100000>; + + clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, + <&gcc GCC_USB30_PRIM_MASTER_CLK>, + <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>, + <&gcc GCC_USB30_PRIM_SLEEP_CLK>, + <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>; + clock-names = "cfg_noc", + "core", + "iface", + "sleep", + "mock_utmi"; + + assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_PRIM_MASTER_CLK>; + assigned-clock-rates = <19200000>, <150000000>; + + interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 489 IRQ_TYPE_EDGE_BOTH>, + <GIC_SPI 488 IRQ_TYPE_EDGE_BOTH>, + <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "dwc_usb3", "pwr_event", "hs_phy_irq", + "dp_hs_phy_irq", "dm_hs_phy_irq", "ss_phy_irq"; + + power-domains = <&gcc USB30_PRIM_GDSC>; + + resets = <&gcc GCC_USB30_PRIM_BCR>; + + iommus = <&apps_smmu 0x740 0>; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + phys = <&usb_1_hsphy>, <&usb_1_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; + }; + }; +... diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml index 6577a61cc075..a020afaf2d6e 100644 --- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml +++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml @@ -10,7 +10,7 @@ maintainers: - Matthias Kaehlcke <mka@chromium.org> allOf: - - $ref: usb-device.yaml# + - $ref: usb-hub.yaml# properties: compatible: @@ -19,61 +19,35 @@ properties: - usbbda,5411 - usbbda,411 - reg: true - - '#address-cells': - const: 1 - - '#size-cells': - const: 0 - vdd-supply: description: phandle to the regulator that provides power to the hub. - peer-hub: - $ref: /schemas/types.yaml#/definitions/phandle - description: - phandle to the peer hub on the controller. + peer-hub: true ports: $ref: /schemas/graph.yaml#/properties/ports - properties: - port@1: - $ref: /schemas/graph.yaml#/properties/port - description: - 1st downstream facing USB port - - port@2: + patternProperties: + '^port@': $ref: /schemas/graph.yaml#/properties/port - description: - 2nd downstream facing USB port - port@3: - $ref: /schemas/graph.yaml#/properties/port - description: - 3rd downstream facing USB port + properties: + reg: + minimum: 1 + maximum: 4 - port@4: - $ref: /schemas/graph.yaml#/properties/port - description: - 4th downstream facing USB port - -patternProperties: - '^.*@[1-4]$': - description: The hard wired USB devices - type: object - $ref: /schemas/usb/usb-device.yaml - additionalProperties: true +additionalProperties: + properties: + reg: + minimum: 1 + maximum: 4 required: - peer-hub - compatible - reg -additionalProperties: false - examples: - | usb { diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml index 980f325341d4..6f4d41ba6ca7 100644 --- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml +++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml @@ -27,6 +27,7 @@ properties: - renesas,usbhs-r9a07g044 # RZ/G2{L,LC} - renesas,usbhs-r9a07g054 # RZ/V2L - renesas,usbhs-r9a08g045 # RZ/G3S + - renesas,usbhs-r9a09g057 # RZ/V2H(P) - const: renesas,rzg2l-usbhs - items: @@ -127,11 +128,7 @@ allOf: properties: compatible: contains: - enum: - - renesas,usbhs-r9a07g043 - - renesas,usbhs-r9a07g044 - - renesas,usbhs-r9a07g054 - - renesas,usbhs-r9a08g045 + const: renesas,rzg2l-usbhs then: properties: interrupts: diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml index 256bee2a03ca..6d39e5066944 100644 --- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml @@ -14,11 +14,13 @@ properties: oneOf: - enum: - google,gs101-dwusb3 + - samsung,exynos2200-dwusb3 - samsung,exynos5250-dwusb3 - samsung,exynos5433-dwusb3 - samsung,exynos7-dwusb3 - samsung,exynos7870-dwusb3 - samsung,exynos850-dwusb3 + - samsung,exynosautov920-dwusb3 - items: - const: samsung,exynos990-dwusb3 - const: samsung,exynos850-dwusb3 @@ -83,6 +85,19 @@ allOf: properties: compatible: contains: + const: samsung,exynos2200-dwusb3 + then: + properties: + clocks: + maxItems: 1 + clock-names: + items: + - const: link_aclk + + - if: + properties: + compatible: + contains: const: samsung,exynos5250-dwusb3 then: properties: @@ -165,6 +180,21 @@ allOf: required: - vdd10-supply + - if: + properties: + compatible: + contains: + const: samsung,exynosautov920-dwusb3 + then: + properties: + clocks: + minItems: 2 + maxItems: 2 + clock-names: + items: + - const: ref + - const: susp_clk + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/usb/smsc,usb3503.yaml b/Documentation/devicetree/bindings/usb/smsc,usb3503.yaml index 6156dc26e65c..18e35122dc1f 100644 --- a/Documentation/devicetree/bindings/usb/smsc,usb3503.yaml +++ b/Documentation/devicetree/bindings/usb/smsc,usb3503.yaml @@ -106,54 +106,54 @@ additionalProperties: false examples: - | - i2c { - #address-cells = <1>; - #size-cells = <0>; - - usb-hub@8 { - compatible = "smsc,usb3503"; - reg = <0x08>; - connect-gpios = <&gpx3 0 1>; - disabled-ports = <2 3>; - intn-gpios = <&gpx3 4 1>; - reset-gpios = <&gpx3 5 1>; - initial-mode = <1>; - clocks = <&clks 80>; - clock-names = "refclk"; - }; - }; + i2c { + #address-cells = <1>; + #size-cells = <0>; + + usb-hub@8 { + compatible = "smsc,usb3503"; + reg = <0x08>; + connect-gpios = <&gpx3 0 1>; + disabled-ports = <2 3>; + intn-gpios = <&gpx3 4 1>; + reset-gpios = <&gpx3 5 1>; + initial-mode = <1>; + clocks = <&clks 80>; + clock-names = "refclk"; + }; + }; - | - i2c { - #address-cells = <1>; - #size-cells = <0>; - - usb-hub@8 { - compatible = "smsc,usb3803"; - reg = <0x08>; - connect-gpios = <&gpx3 0 1>; - disabled-ports = <2 3>; - intn-gpios = <&gpx3 4 1>; - reset-gpios = <&gpx3 5 1>; - bypass-gpios = <&gpx3 6 1>; - initial-mode = <3>; - clocks = <&clks 80>; - clock-names = "refclk"; - }; - }; + i2c { + #address-cells = <1>; + #size-cells = <0>; + + usb-hub@8 { + compatible = "smsc,usb3803"; + reg = <0x08>; + connect-gpios = <&gpx3 0 1>; + disabled-ports = <2 3>; + intn-gpios = <&gpx3 4 1>; + reset-gpios = <&gpx3 5 1>; + bypass-gpios = <&gpx3 6 1>; + initial-mode = <3>; + clocks = <&clks 80>; + clock-names = "refclk"; + }; + }; - | - #include <dt-bindings/gpio/gpio.h> - - usb-hub { - /* I2C is not connected */ - compatible = "smsc,usb3503"; - initial-mode = <1>; /* initialize in HUB mode */ - disabled-ports = <1>; - intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ - reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */ - connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ - refclk-frequency = <19200000>; - }; + #include <dt-bindings/gpio/gpio.h> + + usb-hub { + /* I2C is not connected */ + compatible = "smsc,usb3503"; + initial-mode = <1>; /* initialize in HUB mode */ + disabled-ports = <1>; + intn-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */ + reset-gpios = <&pio 4 16 GPIO_ACTIVE_LOW>; /* PE16 */ + connect-gpios = <&pio 4 17 GPIO_ACTIVE_HIGH>; /* PE17 */ + refclk-frequency = <19200000>; + }; ... diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml index 71249b6ba616..6c0b8b653824 100644 --- a/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml +++ b/Documentation/devicetree/bindings/usb/snps,dwc3-common.yaml @@ -390,6 +390,12 @@ properties: maximum: 8 default: 1 + connector: + $ref: /schemas/connector/usb-connector.yaml# + description: Connector for dual role switch + type: object + unevaluatedProperties: false + port: $ref: /schemas/graph.yaml#/properties/port description: diff --git a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml index bce730a5e237..5e3eae9c2961 100644 --- a/Documentation/devicetree/bindings/usb/ti,usb8041.yaml +++ b/Documentation/devicetree/bindings/usb/ti,usb8041.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/usb/ti,usb8041.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: TI USB8041 USB 3.0 hub controller +title: TI USB8041 and USB8044 USB 3.0 hub controllers maintainers: - Alexander Stein <alexander.stein@ew.tq-group.com> @@ -17,6 +17,8 @@ properties: enum: - usb451,8140 - usb451,8142 + - usb451,8440 + - usb451,8442 reg: true diff --git a/Documentation/devicetree/bindings/usb/usb-device.yaml b/Documentation/devicetree/bindings/usb/usb-device.yaml index c67695681033..09fceb469f10 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.yaml +++ b/Documentation/devicetree/bindings/usb/usb-device.yaml @@ -28,7 +28,8 @@ description: | properties: compatible: - pattern: "^usb[0-9a-f]{1,4},[0-9a-f]{1,4}$" + contains: + pattern: "^usb[0-9a-f]{1,4},[0-9a-f]{1,4}$" description: Device nodes or combined nodes. "usbVID,PID", where VID is the vendor id and PID the product id. The textual representation of VID and PID shall be in lower case diff --git a/Documentation/devicetree/bindings/usb/usb-hub.yaml b/Documentation/devicetree/bindings/usb/usb-hub.yaml new file mode 100644 index 000000000000..5238ab105763 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/usb-hub.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/usb-hub.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Generic USB Hub + +maintainers: + - Pin-yen Lin <treapking@chromium.org> + +allOf: + - $ref: usb-device.yaml# + +properties: + '#address-cells': + const: 1 + + peer-hub: + $ref: /schemas/types.yaml#/definitions/phandle + description: + phandle to the peer hub on the controller. + + ports: + $ref: /schemas/graph.yaml#/properties/ports + description: + The downstream facing USB ports + + patternProperties: + "^port@[1-9a-f][0-9a-f]*$": + $ref: /schemas/graph.yaml#/properties/port + +patternProperties: + '^.*@[1-9a-f][0-9a-f]*$': + description: The hard wired USB devices + type: object + $ref: /schemas/usb/usb-device.yaml + additionalProperties: true + +required: + - compatible + - reg + +additionalProperties: true + +examples: + - | + usb { + #address-cells = <1>; + #size-cells = <0>; + + /* 2.0 hub on port 1 */ + hub_2_0: hub@1 { + compatible = "usb123,4567"; + reg = <1>; + peer-hub = <&hub_3_0>; + #address-cells = <1>; + #size-cells = <0>; + /* USB 2.0 device on port 5 */ + device@5 { + reg = <5>; + compatible = "usb765,4321"; + }; + }; + + /* 3.0 hub on port 2 */ + hub_3_0: hub@2 { + compatible = "usb123,abcd"; + reg = <2>; + peer-hub = <&hub_2_0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + /* Type-A connector on port 3 */ + port@3 { + reg = <3>; + endpoint { + remote-endpoint = <&usb_a0_ss>; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/usb-switch.yaml b/Documentation/devicetree/bindings/usb/usb-switch.yaml index da76118e73a5..896201912630 100644 --- a/Documentation/devicetree/bindings/usb/usb-switch.yaml +++ b/Documentation/devicetree/bindings/usb/usb-switch.yaml @@ -26,11 +26,24 @@ properties: type: boolean port: - $ref: /schemas/graph.yaml#/properties/port + $ref: /schemas/graph.yaml#/$defs/port-base description: A port node to link the device to a TypeC controller for the purpose of handling altmode muxing and orientation switching. + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + properties: + data-lanes: + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 8 + uniqueItems: true + items: + maximum: 8 + ports: $ref: /schemas/graph.yaml#/properties/ports properties: diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 655d8d10fe24..c650cd3dcb80 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -89,8 +89,10 @@ definitions: doc: Group of short_detected states - name: phy-upstream-type - enum-name: + enum-name: phy-upstream + header: linux/ethtool.h type: enum + name-prefix: phy-upstream entries: [ mac, phy ] - name: tcp-data-split diff --git a/Documentation/sound/soc/index.rst b/Documentation/sound/soc/index.rst index e57df2dab2fd..8bed8f8f48da 100644 --- a/Documentation/sound/soc/index.rst +++ b/Documentation/sound/soc/index.rst @@ -18,3 +18,4 @@ The documentation is spilt into the following sections:- jack dpcm codec-to-codec + usb diff --git a/Documentation/sound/soc/usb.rst b/Documentation/sound/soc/usb.rst new file mode 100644 index 000000000000..94c12f9d9dd1 --- /dev/null +++ b/Documentation/sound/soc/usb.rst @@ -0,0 +1,482 @@ +================ +ASoC USB support +================ + +Overview +======== +In order to leverage the existing USB sound device support in ALSA, the +ASoC USB APIs are introduced to allow the subsystems to exchange +configuration information. + +One potential use case would be to support USB audio offloading, which is +an implementation that allows for an alternate power-optimized path in the audio +subsystem to handle the transfer of audio data over the USB bus. This would +let the main processor to stay in lower power modes for longer duration. The +following is an example design of how the ASoC and ALSA pieces can be connected +together to achieve this: + +:: + + USB | ASoC + | _________________________ + | | ASoC Platform card | + | |_________________________| + | | | + | ___V____ ____V____ + | |ASoC BE | |ASoC FE | + | |DAI LNK | |DAI LNK | + | |________| |_________| + | ^ ^ ^ + | | |________| + | ___V____ | + | |SoC-USB | | + ________ ________ | | | + |USB SND |<--->|USBSND |<------------>|________| | + |(card.c)| |offld |<---------- | + |________| |________|___ | | | + ^ ^ | | | ____________V_________ + | | | | | |IPC | + __ V_______________V_____ | | | |______________________| + |USB SND (endpoint.c) | | | | ^ + |_________________________| | | | | + ^ | | | ___________V___________ + | | | |->|audio DSP | + ___________V_____________ | | |_______________________| + |XHCI HCD |<- | + |_________________________| | + + +SoC USB driver +============== +Structures +---------- +``struct snd_soc_usb`` + + - ``list``: list head for SND SoC struct list + - ``component``: reference to ASoC component + - ``connection_status_cb``: callback to notify connection events + - ``update_offload_route_info``: callback to fetch selected USB sound card/PCM + device + - ``priv_data``: driver data + +The snd_soc_usb structure can be referenced using the ASoC platform card +device, or a USB device (udev->dev). This is created by the ASoC BE DAI +link, and the USB sound entity will be able to pass information to the +ASoC BE DAI link using this structure. + +``struct snd_soc_usb_device`` + + - ``card_idx``: sound card index associated with USB sound device + - ``chip_idx``: USB sound chip array index + - ``cpcm_idx``: capture pcm device indexes associated with the USB sound device + - ``ppcm_idx``: playback pcm device indexes associated with the USB sound device + - ``num_playback``: number of playback streams + - ``num_capture``: number of capture streams + - ``list``: list head for the USB sound device list + +The struct snd_soc_usb_device is created by the USB sound offload driver. +This will carry basic parameters/limitations that will be used to +determine the possible offloading paths for this USB audio device. + +Functions +--------- +.. code-block:: rst + + int snd_soc_usb_find_supported_format(int card_idx, + struct snd_pcm_hw_params *params, int direction) +.. + + - ``card_idx``: the index into the USB sound chip array. + - ``params``: Requested PCM parameters from the USB DPCM BE DAI link + - ``direction``: capture or playback + +**snd_soc_usb_find_supported_format()** ensures that the requested audio profile +being requested by the external DSP is supported by the USB device. + +Returns 0 on success, and -EOPNOTSUPP on failure. + +.. code-block:: rst + + int snd_soc_usb_connect(struct device *usbdev, struct snd_soc_usb_device *sdev) +.. + + - ``usbdev``: the usb device that was discovered + - ``sdev``: capabilities of the device + +**snd_soc_usb_connect()** notifies the ASoC USB DCPM BE DAI link of a USB +audio device detection. This can be utilized in the BE DAI +driver to keep track of available USB audio devices. This is intended +to be called by the USB offload driver residing in USB SND. + +Returns 0 on success, negative error code on failure. + +.. code-block:: rst + + int snd_soc_usb_disconnect(struct device *usbdev, struct snd_soc_usb_device *sdev) +.. + + - ``usbdev``: the usb device that was removed + - ``sdev``: capabilities to free + +**snd_soc_usb_disconnect()** notifies the ASoC USB DCPM BE DAI link of a USB +audio device removal. This is intended to be called by the USB offload +driver that resides in USB SND. + +.. code-block:: rst + + void *snd_soc_usb_find_priv_data(struct device *usbdev) +.. + + - ``usbdev``: the usb device to reference to find private data + +**snd_soc_usb_find_priv_data()** fetches the private data saved to the SoC USB +device. + +Returns pointer to priv_data on success, NULL on failure. + +.. code-block:: rst + + int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack) +.. + + - ``component``: ASoC component to add the jack + - ``jack``: jack component to populate + +**snd_soc_usb_setup_offload_jack()** is a helper to add a sound jack control to +the platform sound card. This will allow for consistent naming to be used on +designs that support USB audio offloading. Additionally, this will enable the +jack to notify of changes. + +Returns 0 on success, negative otherwise. + +.. code-block:: rst + + int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm, + int direction, enum snd_soc_usb_kctl path, + long *route) +.. + + - ``dev``: USB device to look up offload path mapping + - ``card``: USB sound card index + - ``pcm``: USB sound PCM device index + - ``direction``: direction to fetch offload routing information + - ``path``: kcontrol selector - pcm device or card index + - ``route``: mapping of sound card and pcm indexes for the offload path. This is + an array of two integers that will carry the card and pcm device indexes + in that specific order. This can be used as the array for the kcontrol + output. + +**snd_soc_usb_update_offload_route()** calls a registered callback to the USB BE DAI +link to fetch the information about the mapped ASoC devices for executing USB audio +offload for the device. ``route`` may be a pointer to a kcontrol value output array, +which carries values when the kcontrol is read. + +Returns 0 on success, negative otherwise. + +.. code-block:: rst + + struct snd_soc_usb *snd_soc_usb_allocate_port(struct snd_soc_component *component, + void *data); +.. + + - ``component``: DPCM BE DAI link component + - ``data``: private data + +**snd_soc_usb_allocate_port()** allocates a SoC USB device and populates standard +parameters that is used for further operations. + +Returns a pointer to struct soc_usb on success, negative on error. + +.. code-block:: rst + + void snd_soc_usb_free_port(struct snd_soc_usb *usb); +.. + + - ``usb``: SoC USB device to free + +**snd_soc_usb_free_port()** frees a SoC USB device. + +.. code-block:: rst + + void snd_soc_usb_add_port(struct snd_soc_usb *usb); +.. + + - ``usb``: SoC USB device to add + +**snd_soc_usb_add_port()** add an allocated SoC USB device to the SOC USB framework. +Once added, this device can be referenced by further operations. + +.. code-block:: rst + + void snd_soc_usb_remove_port(struct snd_soc_usb *usb); +.. + + - ``usb``: SoC USB device to remove + +**snd_soc_usb_remove_port()** removes a SoC USB device from the SoC USB framework. +After removing a device, any SOC USB operations would not be able to reference the +device removed. + +How to Register to SoC USB +-------------------------- +The ASoC DPCM USB BE DAI link is the entity responsible for allocating and +registering the SoC USB device on the component bind. Likewise, it will +also be responsible for freeing the allocated resources. An example can +be shown below: + +.. code-block:: rst + + static int q6usb_component_probe(struct snd_soc_component *component) + { + ... + data->usb = snd_soc_usb_allocate_port(component, 1, &data->priv); + if (!data->usb) + return -ENOMEM; + + usb->connection_status_cb = q6usb_alsa_connection_cb; + + ret = snd_soc_usb_add_port(usb); + if (ret < 0) { + dev_err(component->dev, "failed to add usb port\n"); + goto free_usb; + } + ... + } + + static void q6usb_component_remove(struct snd_soc_component *component) + { + ... + snd_soc_usb_remove_port(data->usb); + snd_soc_usb_free_port(data->usb); + } + + static const struct snd_soc_component_driver q6usb_dai_component = { + .probe = q6usb_component_probe, + .remove = q6usb_component_remove, + .name = "q6usb-dai-component", + ... + }; +.. + +BE DAI links can pass along vendor specific information as part of the +call to allocate the SoC USB device. This will allow any BE DAI link +parameters or settings to be accessed by the USB offload driver that +resides in USB SND. + +USB Audio Device Connection Flow +-------------------------------- +USB devices can be hotplugged into the USB ports at any point in time. +The BE DAI link should be aware of the current state of the physical USB +port, i.e. if there are any USB devices with audio interface(s) connected. +connection_status_cb() can be used to notify the BE DAI link of any change. + +This is called whenever there is a USB SND interface bind or remove event, +using snd_soc_usb_connect() or snd_soc_usb_disconnect(): + +.. code-block:: rst + + static void qc_usb_audio_offload_probe(struct snd_usb_audio *chip) + { + ... + snd_soc_usb_connect(usb_get_usb_backend(udev), sdev); + ... + } + + static void qc_usb_audio_offload_disconnect(struct snd_usb_audio *chip) + { + ... + snd_soc_usb_disconnect(usb_get_usb_backend(chip->dev), dev->sdev); + ... + } +.. + +In order to account for conditions where driver or device existence is +not guaranteed, USB SND exposes snd_usb_rediscover_devices() to resend the +connect events for any identified USB audio interfaces. Consider the +the following situation: + + **usb_audio_probe()** + | --> USB audio streams allocated and saved to usb_chip[] + | --> Propagate connect event to USB offload driver in USB SND + | --> **snd_soc_usb_connect()** exits as USB BE DAI link is not ready + + BE DAI link component probe + | --> DAI link is probed and SoC USB port is allocated + | --> The USB audio device connect event is missed + +To ensure connection events are not missed, **snd_usb_rediscover_devices()** +is executed when the SoC USB device is registered. Now, when the BE DAI +link component probe occurs, the following highlights the sequence: + + BE DAI link component probe + | --> DAI link is probed and SoC USB port is allocated + | --> SoC USB device added, and **snd_usb_rediscover_devices()** runs + + **snd_usb_rediscover_devices()** + | --> Traverses through usb_chip[] and for non-NULL entries issue + | **connection_status_cb()** + +In the case where the USB offload driver is unbound, while USB SND is ready, +the **snd_usb_rediscover_devices()** is called during module init. This allows +for the offloading path to also be enabled with the following flow: + + **usb_audio_probe()** + | --> USB audio streams allocated and saved to usb_chip[] + | --> Propagate connect event to USB offload driver in USB SND + | --> USB offload driver **NOT** ready! + + BE DAI link component probe + | --> DAI link is probed and SoC USB port is allocated + | --> No USB connect event due to missing USB offload driver + + USB offload driver probe + | --> **qc_usb_audio_offload_init()** + | --> Calls **snd_usb_rediscover_devices()** to notify of devices + +USB Offload Related Kcontrols +============================= +Details +------- +A set of kcontrols can be utilized by applications to help select the proper sound +devices to enable USB audio offloading. SoC USB exposes the get_offload_dev() +callback that designs can use to ensure that the proper indices are returned to the +application. + +Implementation +-------------- + +**Example:** + + **Sound Cards**: + + :: + + 0 [SM8250MTPWCD938]: sm8250 - SM8250-MTP-WCD9380-WSA8810-VA-D + SM8250-MTP-WCD9380-WSA8810-VA-DMIC + 1 [Seri ]: USB-Audio - Plantronics Blackwire 3225 Seri + Plantronics Plantronics Blackwire + 3225 Seri at usb-xhci-hcd.1.auto-1.1, + full sp + 2 [C320M ]: USB-Audio - Plantronics C320-M + Plantronics Plantronics C320-M at usb-xhci-hcd.1.auto-1.2, full speed + + **PCM Devices**: + + :: + + card 0: SM8250MTPWCD938 [SM8250-MTP-WCD9380-WSA8810-VA-D], device 0: MultiMedia1 (*) [] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + card 0: SM8250MTPWCD938 [SM8250-MTP-WCD9380-WSA8810-VA-D], device 1: MultiMedia2 (*) [] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + card 1: Seri [Plantronics Blackwire 3225 Seri], device 0: USB Audio [USB Audio] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + card 2: C320M [Plantronics C320-M], device 0: USB Audio [USB Audio] + Subdevices: 1/1 + Subdevice #0: subdevice #0 + + **USB Sound Card** - card#1: + + :: + + USB Offload Playback Card Route PCM#0 -1 (range -1->32) + USB Offload Playback PCM Route PCM#0 -1 (range -1->255) + + **USB Sound Card** - card#2: + + :: + + USB Offload Playback Card Route PCM#0 0 (range -1->32) + USB Offload Playback PCM Route PCM#0 1 (range -1->255) + +The above example shows a scenario where the system has one ASoC platform card +(card#0) and two USB sound devices connected (card#1 and card#2). When reading +the available kcontrols for each USB audio device, the following kcontrols lists +the mapped offload card and pcm device indexes for the specific USB device: + + ``USB Offload Playback Card Route PCM#*`` + + ``USB Offload Playback PCM Route PCM#*`` + +The kcontrol is indexed, because a USB audio device could potentially have +several PCM devices. The above kcontrols are defined as: + + - ``USB Offload Playback Card Route PCM#`` **(R)**: Returns the ASoC platform sound + card index for a mapped offload path. The output **"0"** (card index) signifies + that there is an available offload path for the USB SND device through card#0. + If **"-1"** is seen, then no offload path is available for the USB SND device. + This kcontrol exists for each USB audio device that exists in the system, and + its expected to derive the current status of offload based on the output value + for the kcontrol along with the PCM route kcontrol. + + - ``USB Offload Playback PCM Route PCM#`` **(R)**: Returns the ASoC platform sound + PCM device index for a mapped offload path. The output **"1"** (PCM device index) + signifies that there is an available offload path for the USB SND device through + PCM device#0. If **"-1"** is seen, then no offload path is available for the USB\ + SND device. This kcontrol exists for each USB audio device that exists in the + system, and its expected to derive the current status of offload based on the + output value for this kcontrol, in addition to the card route kcontrol. + +USB Offload Playback Route Kcontrol +----------------------------------- +In order to allow for vendor specific implementations on audio offloading device +selection, the SoC USB layer exposes the following: + +.. code-block:: rst + + int (*update_offload_route_info)(struct snd_soc_component *component, + int card, int pcm, int direction, + enum snd_soc_usb_kctl path, + long *route) +.. + +These are specific for the **USB Offload Playback Card Route PCM#** and **USB +Offload PCM Route PCM#** kcontrols. + +When users issue get calls to the kcontrol, the registered SoC USB callbacks will +execute the registered function calls to the DPCM BE DAI link. + +**Callback Registration:** + +.. code-block:: rst + + static int q6usb_component_probe(struct snd_soc_component *component) + { + ... + usb = snd_soc_usb_allocate_port(component, 1, &data->priv); + if (IS_ERR(usb)) + return -ENOMEM; + + usb->connection_status_cb = q6usb_alsa_connection_cb; + usb->update_offload_route_info = q6usb_get_offload_dev; + + ret = snd_soc_usb_add_port(usb); +.. + +Existing USB Sound Kcontrol +--------------------------- +With the introduction of USB offload support, the above USB offload kcontrol +will be added to the pre existing list of kcontrols identified by the USB sound +framework. These kcontrols are still the main controls that are used to +modify characteristics pertaining to the USB audio device. + + :: + + Number of controls: 9 + ctl type num name value + 0 INT 2 Capture Channel Map 0, 0 (range 0->36) + 1 INT 2 Playback Channel Map 0, 0 (range 0->36) + 2 BOOL 1 Headset Capture Switch On + 3 INT 1 Headset Capture Volume 10 (range 0->13) + 4 BOOL 1 Sidetone Playback Switch On + 5 INT 1 Sidetone Playback Volume 4096 (range 0->8192) + 6 BOOL 1 Headset Playback Switch On + 7 INT 2 Headset Playback Volume 20, 20 (range 0->24) + 8 INT 1 USB Offload Playback Card Route PCM#0 0 (range -1->32) + 9 INT 1 USB Offload Playback PCM Route PCM#0 1 (range -1->255) + +Since USB audio device controls are handled over the USB control endpoint, use the +existing mechanisms present in the USB mixer to set parameters, such as volume. diff --git a/MAINTAINERS b/MAINTAINERS index 3cbf9ac0d83f..81a0a537a7bf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2519,6 +2519,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git F: arch/arm/boot/dts/nxp/imx/ F: arch/arm/boot/dts/nxp/mxs/ F: arch/arm64/boot/dts/freescale/ +X: Documentation/devicetree/bindings/media/i2c/ X: arch/arm64/boot/dts/freescale/fsl-* X: arch/arm64/boot/dts/freescale/qoriq-* X: drivers/media/i2c/ @@ -8726,6 +8727,7 @@ M: Chao Yu <chao@kernel.org> R: Yue Hu <zbestahu@gmail.com> R: Jeffle Xu <jefflexu@linux.alibaba.com> R: Sandeep Dhavale <dhavale@google.com> +R: Hongbo Li <lihongbo22@huawei.com> L: linux-erofs@lists.ozlabs.org S: Maintained W: https://erofs.docs.kernel.org @@ -10967,6 +10969,7 @@ M: Pengyu Luo <mitltlatltl@gmail.com> S: Maintained F: Documentation/devicetree/bindings/platform/huawei,gaokun-ec.yaml F: drivers/platform/arm64/huawei-gaokun-ec.c +F: drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c F: include/linux/platform_data/huawei-gaokun-ec.h HUGETLB SUBSYSTEM @@ -11235,7 +11238,6 @@ S: Maintained F: drivers/i2c/busses/i2c-cht-wc.c I2C/SMBUS ISMT DRIVER -M: Seth Heasley <seth.heasley@intel.com> M: Neil Horman <nhorman@tuxdriver.com> L: linux-i2c@vger.kernel.org F: Documentation/i2c/busses/i2c-ismt.rst @@ -12093,7 +12095,7 @@ F: drivers/crypto/intel/keembay/ocs-hcu.c F: drivers/crypto/intel/keembay/ocs-hcu.h INTEL LA JOLLA COVE ADAPTER (LJCA) USB I/O EXPANDER DRIVERS -M: Wentong Wu <wentong.wu@intel.com> +M: Lixu Zhang <lixu.zhang@intel.com> M: Sakari Ailus <sakari.ailus@linux.intel.com> S: Maintained F: drivers/gpio/gpio-ljca.c @@ -15071,7 +15073,7 @@ F: Documentation/devicetree/bindings/media/mediatek-jpeg-*.yaml F: drivers/media/platform/mediatek/jpeg/ MEDIATEK KEYPAD DRIVER -M: Mattijs Korpershoek <mkorpershoek@baylibre.com> +M: Mattijs Korpershoek <mkorpershoek@kernel.org> S: Supported F: Documentation/devicetree/bindings/input/mediatek,mt6779-keypad.yaml F: drivers/input/keyboard/mt6779-keypad.c @@ -15494,24 +15496,45 @@ F: Documentation/mm/ F: include/linux/gfp.h F: include/linux/gfp_types.h F: include/linux/memfd.h -F: include/linux/memory.h F: include/linux/memory_hotplug.h F: include/linux/memory-tiers.h F: include/linux/mempolicy.h F: include/linux/mempool.h F: include/linux/memremap.h -F: include/linux/mm.h -F: include/linux/mm_*.h F: include/linux/mmzone.h F: include/linux/mmu_notifier.h F: include/linux/pagewalk.h -F: include/linux/rmap.h F: include/trace/events/ksm.h F: mm/ F: tools/mm/ F: tools/testing/selftests/mm/ N: include/linux/page[-_]* +MEMORY MANAGEMENT - CORE +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> +R: Liam R. Howlett <Liam.Howlett@oracle.com> +R: Vlastimil Babka <vbabka@suse.cz> +R: Mike Rapoport <rppt@kernel.org> +R: Suren Baghdasaryan <surenb@google.com> +R: Michal Hocko <mhocko@suse.com> +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: include/linux/memory.h +F: include/linux/mm.h +F: include/linux/mm_*.h +F: include/linux/mmdebug.h +F: include/linux/pagewalk.h +F: mm/Kconfig +F: mm/debug.c +F: mm/init-mm.c +F: mm/memory.c +F: mm/pagewalk.c +F: mm/util.c + MEMORY MANAGEMENT - EXECMEM M: Andrew Morton <akpm@linux-foundation.org> M: Mike Rapoport <rppt@kernel.org> @@ -15545,6 +15568,19 @@ F: mm/page_alloc.c F: include/linux/gfp.h F: include/linux/compaction.h +MEMORY MANAGEMENT - RMAP (REVERSE MAPPING) +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> +R: Rik van Riel <riel@surriel.com> +R: Liam R. Howlett <Liam.Howlett@oracle.com> +R: Vlastimil Babka <vbabka@suse.cz> +R: Harry Yoo <harry.yoo@oracle.com> +L: linux-mm@kvack.org +S: Maintained +F: include/linux/rmap.h +F: mm/rmap.c + MEMORY MANAGEMENT - SECRETMEM M: Andrew Morton <akpm@linux-foundation.org> M: Mike Rapoport <rppt@kernel.org> @@ -15553,6 +15589,30 @@ S: Maintained F: include/linux/secretmem.h F: mm/secretmem.c +MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE) +M: Andrew Morton <akpm@linux-foundation.org> +M: David Hildenbrand <david@redhat.com> +R: Zi Yan <ziy@nvidia.com> +R: Baolin Wang <baolin.wang@linux.alibaba.com> +R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> +R: Liam R. Howlett <Liam.Howlett@oracle.com> +R: Nico Pache <npache@redhat.com> +R: Ryan Roberts <ryan.roberts@arm.com> +R: Dev Jain <dev.jain@arm.com> +L: linux-mm@kvack.org +S: Maintained +W: http://www.linux-mm.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: Documentation/admin-guide/mm/transhuge.rst +F: include/linux/huge_mm.h +F: include/linux/khugepaged.h +F: include/trace/events/huge_memory.h +F: mm/huge_memory.c +F: mm/khugepaged.c +F: tools/testing/selftests/mm/khugepaged.c +F: tools/testing/selftests/mm/split_huge_page_test.c +F: tools/testing/selftests/mm/transhuge-stress.c + MEMORY MANAGEMENT - USERFAULTFD M: Andrew Morton <akpm@linux-foundation.org> R: Peter Xu <peterx@redhat.com> @@ -22738,9 +22798,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git F: Documentation/devicetree/bindings/sound/ F: Documentation/sound/soc/ F: include/dt-bindings/sound/ +F: include/sound/cs-amp-lib.h +F: include/sound/cs35l* +F: include/sound/cs4271.h +F: include/sound/cs42l* +F: include/sound/madera-pdata.h F: include/sound/soc* F: include/sound/sof.h F: include/sound/sof/ +F: include/sound/wm*.h F: include/trace/events/sof*.h F: include/uapi/sound/asoc.h F: sound/soc/ @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc6 NAME = Baby Opossum Posse # *DOCUMENTATION* @@ -1052,13 +1052,6 @@ NOSTDINC_FLAGS += -nostdinc # perform bounds checking. KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3) -#Currently, disable -Wstringop-overflow for GCC 11, globally. -KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow) -KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) - -#Currently, disable -Wunterminated-string-initialization as broken -KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization) - # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += -fno-strict-overflow diff --git a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi index f2386dcb9ff2..dda4fa91b2f2 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6ul-imx6ull-opos6ul.dtsi @@ -40,6 +40,9 @@ reg = <1>; interrupt-parent = <&gpio4>; interrupts = <16 IRQ_TYPE_LEVEL_LOW>; + micrel,led-mode = <1>; + clocks = <&clks IMX6UL_CLK_ENET_REF>; + clock-names = "rmii-ref"; status = "okay"; }; }; diff --git a/arch/arm64/boot/dts/arm/morello.dtsi b/arch/arm64/boot/dts/arm/morello.dtsi index 0bab0b3ea969..5bc1c725dc86 100644 --- a/arch/arm64/boot/dts/arm/morello.dtsi +++ b/arch/arm64/boot/dts/arm/morello.dtsi @@ -44,7 +44,7 @@ next-level-cache = <&l2_0>; clocks = <&scmi_dvfs 0>; - l2_0: l2-cache-0 { + l2_0: l2-cache { compatible = "cache"; cache-level = <2>; /* 8 ways set associative */ @@ -53,13 +53,6 @@ cache-sets = <2048>; cache-unified; next-level-cache = <&l3_0>; - - l3_0: l3-cache { - compatible = "cache"; - cache-level = <3>; - cache-size = <0x100000>; - cache-unified; - }; }; }; @@ -78,7 +71,7 @@ next-level-cache = <&l2_1>; clocks = <&scmi_dvfs 0>; - l2_1: l2-cache-1 { + l2_1: l2-cache { compatible = "cache"; cache-level = <2>; /* 8 ways set associative */ @@ -105,7 +98,7 @@ next-level-cache = <&l2_2>; clocks = <&scmi_dvfs 1>; - l2_2: l2-cache-2 { + l2_2: l2-cache { compatible = "cache"; cache-level = <2>; /* 8 ways set associative */ @@ -132,7 +125,7 @@ next-level-cache = <&l2_3>; clocks = <&scmi_dvfs 1>; - l2_3: l2-cache-3 { + l2_3: l2-cache { compatible = "cache"; cache-level = <2>; /* 8 ways set associative */ @@ -143,6 +136,13 @@ next-level-cache = <&l3_0>; }; }; + + l3_0: l3-cache { + compatible = "cache"; + cache-level = <3>; + cache-size = <0x100000>; + cache-unified; + }; }; firmware { diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi index 7251ad3a0017..b46566f3ce20 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi @@ -144,6 +144,19 @@ startup-delay-us = <20000>; }; + reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc { + compatible = "regulator-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2_vsel>; + gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1800000>; + states = <1800000 0x1>, + <3300000 0x0>; + regulator-name = "PMIC_USDHC_VSELECT"; + vin-supply = <®_nvcc_sd>; + }; + reserved-memory { #address-cells = <2>; #size-cells = <2>; @@ -269,7 +282,7 @@ "SODIMM_19", "", "", - "", + "PMIC_USDHC_VSELECT", "", "", "", @@ -785,6 +798,7 @@ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>; pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>; vmmc-supply = <®_usdhc2_vmmc>; + vqmmc-supply = <®_usdhc2_vqmmc>; }; &wdog1 { @@ -1206,13 +1220,17 @@ <MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */ }; + pinctrl_usdhc2_vsel: usdhc2vselgrp { + fsl,pins = + <MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */ + }; + /* * Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the * on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here. */ pinctrl_usdhc2: usdhc2grp { fsl,pins = - <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */ <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */ <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */ @@ -1223,7 +1241,6 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { fsl,pins = - <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>, @@ -1234,7 +1251,6 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { fsl,pins = - <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>, @@ -1246,7 +1262,6 @@ /* Avoid backfeeding with removed card power */ pinctrl_usdhc2_sleep: usdhc2slpgrp { fsl,pins = - <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>, diff --git a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi index a1b75c9068b2..dc0ccd723c6d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-nominal.dtsi @@ -24,6 +24,20 @@ fsl,operating-mode = "nominal"; }; +&gpu2d { + assigned-clocks = <&clk IMX8MP_CLK_GPU2D_CORE>; + assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>; + assigned-clock-rates = <800000000>; +}; + +&gpu3d { + assigned-clocks = <&clk IMX8MP_CLK_GPU3D_CORE>, + <&clk IMX8MP_CLK_GPU3D_SHADER_CORE>; + assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_SYS_PLL1_800M>; + assigned-clock-rates = <800000000>, <800000000>; +}; + &pgc_hdmimix { assigned-clocks = <&clk IMX8MP_CLK_HDMI_AXI>, <&clk IMX8MP_CLK_HDMI_APB>; @@ -46,6 +60,18 @@ assigned-clock-rates = <600000000>, <300000000>; }; +&pgc_mlmix { + assigned-clocks = <&clk IMX8MP_CLK_ML_CORE>, + <&clk IMX8MP_CLK_ML_AXI>, + <&clk IMX8MP_CLK_ML_AHB>; + assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_SYS_PLL1_800M>, + <&clk IMX8MP_SYS_PLL1_800M>; + assigned-clock-rates = <800000000>, + <800000000>, + <300000000>; +}; + &media_blk_ctrl { assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>, <&clk IMX8MP_CLK_MEDIA_APB>, diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi index 9bb26b466a06..59f057ba6fa7 100644 --- a/arch/arm64/boot/dts/freescale/imx95.dtsi +++ b/arch/arm64/boot/dts/freescale/imx95.dtsi @@ -1626,7 +1626,7 @@ reg = <0 0x4c300000 0 0x10000>, <0 0x60100000 0 0xfe00000>, <0 0x4c360000 0 0x10000>, - <0 0x4c340000 0 0x2000>; + <0 0x4c340000 0 0x4000>; reg-names = "dbi", "config", "atu", "app"; ranges = <0x81000000 0x0 0x00000000 0x0 0x6ff00000 0 0x00100000>, <0x82000000 0x0 0x10000000 0x9 0x10000000 0 0x10000000>; @@ -1673,7 +1673,7 @@ reg = <0 0x4c300000 0 0x10000>, <0 0x4c360000 0 0x1000>, <0 0x4c320000 0 0x1000>, - <0 0x4c340000 0 0x2000>, + <0 0x4c340000 0 0x4000>, <0 0x4c370000 0 0x10000>, <0x9 0 1 0>; reg-names = "dbi","atu", "dbi2", "app", "dma", "addr_space"; @@ -1700,7 +1700,7 @@ reg = <0 0x4c380000 0 0x10000>, <8 0x80100000 0 0xfe00000>, <0 0x4c3e0000 0 0x10000>, - <0 0x4c3c0000 0 0x2000>; + <0 0x4c3c0000 0 0x4000>; reg-names = "dbi", "config", "atu", "app"; ranges = <0x81000000 0 0x00000000 0x8 0x8ff00000 0 0x00100000>, <0x82000000 0 0x10000000 0xa 0x10000000 0 0x10000000>; @@ -1749,7 +1749,7 @@ reg = <0 0x4c380000 0 0x10000>, <0 0x4c3e0000 0 0x1000>, <0 0x4c3a0000 0 0x1000>, - <0 0x4c3c0000 0 0x2000>, + <0 0x4c3c0000 0 0x4000>, <0 0x4c3f0000 0 0x10000>, <0xa 0 1 0>; reg-names = "dbi", "atu", "dbi2", "app", "dma", "addr_space"; diff --git a/arch/arm64/boot/dts/st/stm32mp211.dtsi b/arch/arm64/boot/dts/st/stm32mp211.dtsi index 6dd1377f3e1d..bf888d60cd4f 100644 --- a/arch/arm64/boot/dts/st/stm32mp211.dtsi +++ b/arch/arm64/boot/dts/st/stm32mp211.dtsi @@ -116,11 +116,11 @@ }; intc: interrupt-controller@4ac10000 { - compatible = "arm,cortex-a7-gic"; + compatible = "arm,gic-400"; reg = <0x4ac10000 0x0 0x1000>, - <0x4ac20000 0x0 0x2000>, - <0x4ac40000 0x0 0x2000>, - <0x4ac60000 0x0 0x2000>; + <0x4ac20000 0x0 0x20000>, + <0x4ac40000 0x0 0x20000>, + <0x4ac60000 0x0 0x20000>; #interrupt-cells = <3>; interrupt-controller; }; diff --git a/arch/arm64/boot/dts/st/stm32mp231.dtsi b/arch/arm64/boot/dts/st/stm32mp231.dtsi index 8820d219a33e..75697acd1345 100644 --- a/arch/arm64/boot/dts/st/stm32mp231.dtsi +++ b/arch/arm64/boot/dts/st/stm32mp231.dtsi @@ -1201,13 +1201,12 @@ }; intc: interrupt-controller@4ac10000 { - compatible = "arm,cortex-a7-gic"; + compatible = "arm,gic-400"; reg = <0x4ac10000 0x1000>, - <0x4ac20000 0x2000>, - <0x4ac40000 0x2000>, - <0x4ac60000 0x2000>; + <0x4ac20000 0x20000>, + <0x4ac40000 0x20000>, + <0x4ac60000 0x20000>; #interrupt-cells = <3>; - #address-cells = <1>; interrupt-controller; }; }; diff --git a/arch/arm64/boot/dts/st/stm32mp251.dtsi b/arch/arm64/boot/dts/st/stm32mp251.dtsi index f3c6cdfd7008..87110f91e489 100644 --- a/arch/arm64/boot/dts/st/stm32mp251.dtsi +++ b/arch/arm64/boot/dts/st/stm32mp251.dtsi @@ -115,14 +115,13 @@ }; intc: interrupt-controller@4ac00000 { - compatible = "arm,cortex-a7-gic"; + compatible = "arm,gic-400"; #interrupt-cells = <3>; - #address-cells = <1>; interrupt-controller; reg = <0x0 0x4ac10000 0x0 0x1000>, - <0x0 0x4ac20000 0x0 0x2000>, - <0x0 0x4ac40000 0x0 0x2000>, - <0x0 0x4ac60000 0x0 0x2000>; + <0x0 0x4ac20000 0x0 0x20000>, + <0x0 0x4ac40000 0x0 0x20000>, + <0x0 0x4ac60000 0x0 0x20000>; }; psci { diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index ebceaae3c749..d40e427ddad9 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -52,7 +52,7 @@ mrs x0, id_aa64mmfr1_el1 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 cbz x0, .Lskip_hcrx_\@ - mov_q x0, HCRX_HOST_FLAGS + mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) /* Enable GCS if supported */ mrs_s x1, SYS_ID_AA64PFR1_EL1 diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 974d72b5905b..e9c8a581e16f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -100,9 +100,8 @@ HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA) #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) -#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) +#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO) -#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) #define MPAMHCR_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h index 92a2b59a9f3d..3322c7047d84 100644 --- a/arch/arm64/include/asm/vdso/gettimeofday.h +++ b/arch/arm64/include/asm/vdso/gettimeofday.h @@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, return res; } +#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) +static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void) +{ + const struct vdso_time_data *ret = &vdso_u_time_data; + + /* Work around invalid absolute relocations */ + OPTIMIZER_HIDE_VAR(ret); + + return ret; +} +#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data +#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */ + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9c4d6d552b25..4c46d80aa64b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -114,7 +114,14 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NC DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS); -bool arm64_use_ng_mappings = false; +/* + * arm64_use_ng_mappings must be placed in the .data section, otherwise it + * ends up in the .bss section where it is initialized in early_map_kernel() + * after the MMU (with the idmap) was enabled. create_init_idmap() - which + * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG - + * may end up generating an incorrect idmap page table attributes. + */ +bool arm64_use_ng_mappings __read_mostly = false; EXPORT_SYMBOL(arm64_use_ng_mappings); DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index b198dde79e59..b607f6dfc5e6 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -879,10 +879,12 @@ static u8 spectre_bhb_loop_affected(void) static const struct midr_range spectre_bhb_k132_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), + {}, }; static const struct midr_range spectre_bhb_k38_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + {}, }; static const struct midr_range spectre_bhb_k32_list[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index b741ea6aefa5..96f625dc7256 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -235,6 +235,8 @@ static inline void __deactivate_traps_mpam(void) static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); + /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); @@ -245,11 +247,8 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) * EL1 instead of being trapped to EL2. */ if (system_supports_pmuv3()) { - struct kvm_cpu_context *hctxt; - write_sysreg(0, pmselr_el0); - hctxt = host_data_ptr(host_ctxt); ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); @@ -269,6 +268,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) hcrx &= ~clr; } + ctxt_sys_reg(hctxt, HCRX_EL2) = read_sysreg_s(SYS_HCRX_EL2); write_sysreg_s(hcrx, SYS_HCRX_EL2); } @@ -278,19 +278,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) { + struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); + write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2); write_sysreg(0, hstr_el2); if (system_supports_pmuv3()) { - struct kvm_cpu_context *hctxt; - - hctxt = host_data_ptr(host_ctxt); write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); } if (cpus_have_final_cap(ARM64_HAS_HCX)) - write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); + write_sysreg_s(ctxt_sys_reg(hctxt, HCRX_EL2), SYS_HCRX_EL2); __deactivate_traps_hfgxtr(vcpu); __deactivate_traps_mpam(); diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 2a5284f749b4..e80f3ebd3e2a 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -503,7 +503,7 @@ int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) { int ret; - if (!addr_is_memory(addr)) + if (!range_is_memory(addr, addr + size)) return -EPERM; ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index ed363aa3027e..50aa8dbcae75 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -429,23 +429,27 @@ u64 __vgic_v3_get_gic_config(void) /* * To check whether we have a MMIO-based (GICv2 compatible) * CPU interface, we need to disable the system register - * view. To do that safely, we have to prevent any interrupt - * from firing (which would be deadly). + * view. * - * Note that this only makes sense on VHE, as interrupts are - * already masked for nVHE as part of the exception entry to - * EL2. - */ - if (has_vhe()) - flags = local_daif_save(); - - /* * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates * that to be able to set ICC_SRE_EL1.SRE to 0, all the * interrupt overrides must be set. You've got to love this. + * + * As we always run VHE with HCR_xMO set, no extra xMO + * manipulation is required in that case. + * + * To safely disable SRE, we have to prevent any interrupt + * from firing (which would be deadly). This only makes sense + * on VHE, as interrupts are already masked for nVHE as part + * of the exception entry to EL2. */ - sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO); - isb(); + if (has_vhe()) { + flags = local_daif_save(); + } else { + sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO); + isb(); + } + write_gicreg(0, ICC_SRE_EL1); isb(); @@ -453,11 +457,13 @@ u64 __vgic_v3_get_gic_config(void) write_gicreg(sre, ICC_SRE_EL1); isb(); - sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0); - isb(); - if (has_vhe()) + if (has_vhe()) { local_daif_restore(flags); + } else { + sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0); + isb(); + } val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63); val |= read_gicreg(ICH_VTR_EL2); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 754f2fe0cc67..eeda92330ade 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return -EFAULT; } + if (!is_protected_kvm_enabled()) + memcache = &vcpu->arch.mmu_page_cache; + else + memcache = &vcpu->arch.pkvm_memcache; + /* * Permission faults just need to update the existing leaf entry, * and so normally don't require allocations from the memcache. The @@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (!fault_is_perm || (logging_active && write_fault)) { int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu); - if (!is_protected_kvm_enabled()) { - memcache = &vcpu->arch.mmu_page_cache; + if (!is_protected_kvm_enabled()) ret = kvm_mmu_topup_memory_cache(memcache, min_pages); - } else { - memcache = &vcpu->arch.pkvm_memcache; + else ret = topup_hyp_memcache(memcache, min_pages); - } + if (ret) return ret; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 005ad28f7306..5dde9285afc8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1945,6 +1945,12 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, if ((hw_val & mpam_mask) == (user_val & mpam_mask)) user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */ + if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) || + !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) || + (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val))) + return -EINVAL; + return set_id_reg(vcpu, rd, user_val); } diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index 0992cad9c632..c7d75807d13f 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h @@ -6,11 +6,10 @@ #include <linux/linkage.h> extern void (*cpu_wait)(void); -extern void r4k_wait(void); -extern asmlinkage void __r4k_wait(void); +extern asmlinkage void r4k_wait(void); extern void r4k_wait_irqoff(void); -static inline int using_rollback_handler(void) +static inline int using_skipover_handler(void) { return cpu_wait == r4k_wait; } diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 85fa9962266a..ef72c46b5568 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs, /* Query offset/name of register from its name/offset */ extern int regs_query_register_offset(const char *name); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) +#define MAX_REG_OFFSET \ + (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) /** * regs_get_register() - get register value from its offset diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a572ce36a24f..08c0a01d9a29 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -104,48 +104,59 @@ handle_vcei: __FINIT - .align 5 /* 32 byte rollback region */ -LEAF(__r4k_wait) - .set push - .set noreorder - /* start of rollback region */ - LONG_L t0, TI_FLAGS($28) - nop - andi t0, _TIF_NEED_RESCHED - bnez t0, 1f - nop - nop - nop -#ifdef CONFIG_CPU_MICROMIPS - nop - nop - nop - nop -#endif + .section .cpuidle.text,"ax" + /* Align to 32 bytes for the maximum idle interrupt region size. */ + .align 5 +LEAF(r4k_wait) + /* Keep the ISA bit clear for calculations on local labels here. */ +0: .fill 0 + /* Start of idle interrupt region. */ + local_irq_enable + /* + * If an interrupt lands here, before going idle on the next + * instruction, we must *NOT* go idle since the interrupt could + * have set TIF_NEED_RESCHED or caused a timer to need resched. + * Fall through -- see skipover_handler below -- and have the + * idle loop take care of things. + */ +1: .fill 0 + /* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */ + .if 1b - 0b > 32 + .error "overlong idle interrupt region" + .elseif 1b - 0b > 8 + .align 4 + .endif +2: .fill 0 + .equ r4k_wait_idle_size, 2b - 0b + /* End of idle interrupt region; size has to be a power of 2. */ .set MIPS_ISA_ARCH_LEVEL_RAW +r4k_wait_insn: wait - /* end of rollback region (the region size must be power of two) */ -1: +r4k_wait_exit: + .set mips0 + local_irq_disable jr ra - nop - .set pop - END(__r4k_wait) + END(r4k_wait) + .previous - .macro BUILD_ROLLBACK_PROLOGUE handler - FEXPORT(rollback_\handler) + .macro BUILD_SKIPOVER_PROLOGUE handler + FEXPORT(skipover_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, __r4k_wait - ori k0, 0x1f /* 32 byte rollback region */ - xori k0, 0x1f + /* Subtract/add 2 to let the ISA bit propagate through the mask. */ + PTR_LA k1, r4k_wait_insn - 2 + ori k0, r4k_wait_idle_size - 2 + .set noreorder bne k0, k1, \handler + PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2 + .set reorder MTC0 k0, CP0_EPC .set pop .endm .align 5 -BUILD_ROLLBACK_PROLOGUE handle_int +BUILD_SKIPOVER_PROLOGUE handle_int NESTED(handle_int, PT_SIZE, sp) .cfi_signal_frame #ifdef CONFIG_TRACE_IRQFLAGS @@ -265,7 +276,7 @@ NESTED(except_vec_ejtag_debug, 0, sp) * This prototype is copied to ebase + n*IntCtl.VS and patched * to invoke the handler */ -BUILD_ROLLBACK_PROLOGUE except_vec_vi +BUILD_SKIPOVER_PROLOGUE except_vec_vi NESTED(except_vec_vi, 0, sp) SAVE_SOME docfi=1 SAVE_AT docfi=1 diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 5abc8b7340f8..80e8a04a642e 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -35,13 +35,6 @@ static void __cpuidle r3081_wait(void) write_c0_conf(cfg | R30XX_CONF_HALT); } -void __cpuidle r4k_wait(void) -{ - raw_local_irq_enable(); - __r4k_wait(); - raw_local_irq_disable(); -} - /* * This variant is preferable as it allows testing need_resched and going to * sleep depending on the outcome atomically. Unfortunately the "It is diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index e85bd087467e..cc26d56f3ab6 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -332,6 +332,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) mips_cps_cluster_bootcfg = kcalloc(nclusters, sizeof(*mips_cps_cluster_bootcfg), GFP_KERNEL); + if (!mips_cps_cluster_bootcfg) + goto err_out; if (nclusters > 1) mips_cm_update_property(); @@ -348,6 +350,8 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) mips_cps_cluster_bootcfg[cl].core_power = kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long), GFP_KERNEL); + if (!mips_cps_cluster_bootcfg[cl].core_power) + goto err_out; /* Allocate VPE boot configuration structs */ for (c = 0; c < ncores; c++) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 39e248d0ed59..8ec1e185b35c 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -77,7 +77,7 @@ #include "access-helper.h" extern void check_wait(void); -extern asmlinkage void rollback_handle_int(void); +extern asmlinkage void skipover_handle_int(void); extern asmlinkage void handle_int(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); @@ -2066,7 +2066,7 @@ void *set_vi_handler(int n, vi_handler_t addr) { extern const u8 except_vec_vi[]; extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; - extern const u8 rollback_except_vec_vi[]; + extern const u8 skipover_except_vec_vi[]; unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; @@ -2095,7 +2095,7 @@ void *set_vi_handler(int n, vi_handler_t addr) change_c0_srsmap(0xf << n*4, 0 << n*4); } - vec_start = using_rollback_handler() ? rollback_except_vec_vi : + vec_start = using_skipover_handler() ? skipover_except_vec_vi : except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) ori_offset = except_vec_vi_ori - vec_start + 2; @@ -2426,8 +2426,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(EXCCODE_INT, using_rollback_handler() ? - rollback_handle_int : handle_int); + set_except_vector(EXCCODE_INT, using_skipover_handler() ? + skipover_handle_int : handle_int); set_except_vector(EXCCODE_MOD, handle_tlbm); set_except_vector(EXCCODE_TLBL, handle_tlbl); set_except_vector(EXCCODE_TLBS, handle_tlbs); diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 34495446e051..71829cb7bc81 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -97,9 +97,19 @@ handle_fpe(struct pt_regs *regs) memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { - force_sig_fault(signalcode >> 24, signalcode & 0xffffff, - (void __user *) regs->iaoq[0]); - return -1; + int sig = signalcode >> 24; + + if (sig == SIGFPE) { + /* + * Clear floating point trap bit to avoid trapping + * again on the first floating-point instruction in + * the userspace signal handler. + */ + regs->fr[0] &= ~(1ULL << 38); + } + force_sig_fault(sig, signalcode & 0xffffff, + (void __user *) regs->iaoq[0]); + return -1; } return signalcode ? -1 : 0; diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index 1db60fe13802..3d8dc822282a 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -234,10 +234,8 @@ fi # suppress some warnings in recent ld versions nowarn="-z noexecstack" -if ! ld_is_lld; then - if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then - nowarn="$nowarn --no-warn-rwx-segments" - fi +if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then + nowarn="$nowarn --no-warn-rwx-segments" fi platformo=$object/"$platform".o diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 34a5aec4908f..126bf3b06ab7 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -258,10 +258,6 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, break; } } - if (i == hdr->e_shnum) { - pr_err("%s: doesn't contain __patchable_function_entries.\n", me->name); - return -ENOEXEC; - } #endif pr_debug("Looks like a total of %lu stubs, max\n", relocs); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 311e2112d782..9f764bc42b8c 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -976,7 +976,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start, return 0; } - +#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) { if (radix_enabled()) @@ -984,6 +984,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) return false; } +#endif int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) @@ -1120,6 +1121,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in pmd_t *pmd; pte_t *pte; + /* + * Make sure we align the start vmemmap addr so that we calculate + * the correct start_pfn in altmap boundary check to decided whether + * we should use altmap or RAM based backing memory allocation. Also + * the address need to be aligned for set_pte operation. + + * If the start addr is already PMD_SIZE aligned we will try to use + * a pmd mapping. We don't want to be too aggressive here beacause + * that will cause more allocations in RAM. So only if the namespace + * vmemmap start addr is PMD_SIZE aligned we will use PMD mapping. + */ + + start = ALIGN_DOWN(start, PAGE_SIZE); for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); @@ -1145,8 +1159,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in * in altmap block allocation failures, in which case * we fallback to RAM for vmemmap allocation. */ - if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || - altmap_cross_boundary(altmap, addr, PMD_SIZE))) { + if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap && + altmap_cross_boundary(altmap, addr, PMD_SIZE))) { /* * make sure we don't create altmap mappings * covering things outside the device. diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index 3fbe0295ce14..95d7ba73d43d 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -17,7 +17,7 @@ config PPC_POWERNV select MMU_NOTIFIER select FORCE_SMP select ARCH_SUPPORTS_PER_VMA_LOCK - select PPC_RADIX_BROADCAST_TLBIE + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config OPAL_PRD diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index a934c2a262f6..fa3c2fff082a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -23,7 +23,7 @@ config PPC_PSERIES select FORCE_SMP select SWIOTLB select ARCH_SUPPORTS_PER_VMA_LOCK - select PPC_RADIX_BROADCAST_TLBIE + select PPC_RADIX_BROADCAST_TLBIE if PPC_RADIX_MMU default y config PARAVIRT diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 7c244de77180..15d8f75902f8 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -275,6 +275,9 @@ long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg) unsigned long pmm; u8 pmlen; + if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + return -EINVAL; + if (is_compat_thread(ti)) return -EINVAL; @@ -330,6 +333,9 @@ long get_tagged_addr_ctrl(struct task_struct *task) struct thread_info *ti = task_thread_info(task); long ret = 0; + if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) + return -EINVAL; + if (is_compat_thread(ti)) return -EINVAL; diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8ff8e8b36524..9c83848797a7 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -198,47 +198,57 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) +enum misaligned_access_type { + MISALIGNED_STORE, + MISALIGNED_LOAD, +}; +static const struct { + const char *type_str; + int (*handler)(struct pt_regs *regs); +} misaligned_handler[] = { + [MISALIGNED_STORE] = { + .type_str = "Oops - store (or AMO) address misaligned", + .handler = handle_misaligned_store, + }, + [MISALIGNED_LOAD] = { + .type_str = "Oops - load address misaligned", + .handler = handle_misaligned_load, + }, +}; + +static void do_trap_misaligned(struct pt_regs *regs, enum misaligned_access_type type) { + irqentry_state_t state; + if (user_mode(regs)) { irqentry_enter_from_user_mode(regs); + local_irq_enable(); + } else { + state = irqentry_nmi_enter(regs); + } - if (handle_misaligned_load(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - load address misaligned"); + if (misaligned_handler[type].handler(regs)) + do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, + misaligned_handler[type].type_str); + if (user_mode(regs)) { + local_irq_disable(); irqentry_exit_to_user_mode(regs); } else { - irqentry_state_t state = irqentry_nmi_enter(regs); - - if (handle_misaligned_load(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - load address misaligned"); - irqentry_nmi_exit(regs, state); } } -asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) +asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { - if (user_mode(regs)) { - irqentry_enter_from_user_mode(regs); - - if (handle_misaligned_store(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - store (or AMO) address misaligned"); - - irqentry_exit_to_user_mode(regs); - } else { - irqentry_state_t state = irqentry_nmi_enter(regs); - - if (handle_misaligned_store(regs)) - do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, - "Oops - store (or AMO) address misaligned"); + do_trap_misaligned(regs, MISALIGNED_LOAD); +} - irqentry_nmi_exit(regs, state); - } +asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) +{ + do_trap_misaligned(regs, MISALIGNED_STORE); } + DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 4354c87c0376..77c788660223 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -88,6 +88,13 @@ #define INSN_MATCH_C_FSWSP 0xe002 #define INSN_MASK_C_FSWSP 0xe003 +#define INSN_MATCH_C_LHU 0x8400 +#define INSN_MASK_C_LHU 0xfc43 +#define INSN_MATCH_C_LH 0x8440 +#define INSN_MASK_C_LH 0xfc43 +#define INSN_MATCH_C_SH 0x8c00 +#define INSN_MASK_C_SH 0xfc43 + #define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4) #if defined(CONFIG_64BIT) @@ -268,7 +275,7 @@ static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, int __ret; \ \ if (user_mode(regs)) { \ - __ret = __get_user(insn, (type __user *) insn_addr); \ + __ret = get_user(insn, (type __user *) insn_addr); \ } else { \ insn = *(type *)insn_addr; \ __ret = 0; \ @@ -431,6 +438,13 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs) fp = 1; len = 4; #endif + } else if ((insn & INSN_MASK_C_LHU) == INSN_MATCH_C_LHU) { + len = 2; + insn = RVC_RS2S(insn) << SH_RD; + } else if ((insn & INSN_MASK_C_LH) == INSN_MATCH_C_LH) { + len = 2; + shift = 8 * (sizeof(ulong) - len); + insn = RVC_RS2S(insn) << SH_RD; } else { regs->epc = epc; return -1; @@ -530,6 +544,9 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs) len = 4; val.data_ulong = GET_F32_RS2C(insn, regs); #endif + } else if ((insn & INSN_MASK_C_SH) == INSN_MATCH_C_SH) { + len = 2; + val.data_ulong = GET_RS2S(insn, regs); } else { regs->epc = epc; return -1; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 60d684c76c58..02635bac91f1 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -77,6 +77,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) memcpy(cntx, reset_cntx, sizeof(*cntx)); spin_unlock(&vcpu->arch.reset_cntx_lock); + memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); + kvm_riscv_vcpu_fp_reset(vcpu); kvm_riscv_vcpu_vector_reset(vcpu); diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 6f2c9ce1b154..24b22f6a9e99 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -38,7 +38,6 @@ CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_EXPERT=y -# CONFIG_SYSFS_SYSCALL is not set CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y @@ -92,7 +91,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y CONFIG_SLUB_STATS=y @@ -395,6 +393,9 @@ CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -405,6 +406,9 @@ CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m CONFIG_NET_ACT_GATE=m CONFIG_NET_TC_SKB_EXT=y CONFIG_DNS_RESOLVER=y @@ -628,8 +632,16 @@ CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=y +CONFIG_VDPA=m +CONFIG_VDPA_SIM=m +CONFIG_VDPA_SIM_NET=m +CONFIG_VDPA_SIM_BLOCK=m +CONFIG_VDPA_USER=m +CONFIG_MLX5_VDPA_NET=m +CONFIG_VP_VDPA=m CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_VDPA=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -654,7 +666,6 @@ CONFIG_NILFS2_FS=m CONFIG_BCACHEFS_FS=y CONFIG_BCACHEFS_QUOTA=y CONFIG_BCACHEFS_POSIX_ACL=y -CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FS_ENCRYPTION=y CONFIG_FS_VERITY=y @@ -724,11 +735,10 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_UNICODE=y CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_KEY_NOTIFICATIONS=y CONFIG_SECURITY=y -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_LOCKDOWN_LSM=y @@ -741,6 +751,8 @@ CONFIG_IMA=y CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y +CONFIG_FORTIFY_SOURCE=y +CONFIG_HARDENED_USERCOPY=y CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set @@ -756,7 +768,6 @@ CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ARIA=m CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -801,7 +812,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_DES_S390=m -CONFIG_CRYPTO_CHACHA_S390=m CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m @@ -812,9 +822,9 @@ CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_KRB5=m +CONFIG_CRYPTO_KRB5_SELFTESTS=y CONFIG_CORDIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_RANDOM32_SELFTEST=y CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index f18a7d97ac21..2b8b42d569bc 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -36,7 +36,6 @@ CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_EXPERT=y -# CONFIG_SYSFS_SYSCALL is not set CONFIG_PROFILING=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y @@ -86,7 +85,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y -CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_BUCKETS=y # CONFIG_COMPAT_BRK is not set @@ -385,6 +383,9 @@ CONFIG_CLS_U32_MARK=y CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m @@ -395,6 +396,9 @@ CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m CONFIG_NET_ACT_GATE=m CONFIG_NET_TC_SKB_EXT=y CONFIG_DNS_RESOLVER=y @@ -618,8 +622,16 @@ CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=y +CONFIG_VDPA=m +CONFIG_VDPA_SIM=m +CONFIG_VDPA_SIM_NET=m +CONFIG_VDPA_SIM_BLOCK=m +CONFIG_VDPA_USER=m +CONFIG_MLX5_VDPA_NET=m +CONFIG_VP_VDPA=m CONFIG_VHOST_NET=m CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_VDPA=m CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -641,7 +653,6 @@ CONFIG_NILFS2_FS=m CONFIG_BCACHEFS_FS=m CONFIG_BCACHEFS_QUOTA=y CONFIG_BCACHEFS_POSIX_ACL=y -CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y CONFIG_FS_ENCRYPTION=y CONFIG_FS_VERITY=y @@ -711,6 +722,7 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_UNICODE=y CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_KEY_NOTIFICATIONS=y CONFIG_SECURITY=y @@ -742,7 +754,6 @@ CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ARIA=m CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -788,7 +799,6 @@ CONFIG_CRYPTO_SHA3_512_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_DES_S390=m -CONFIG_CRYPTO_CHACHA_S390=m CONFIG_CRYPTO_HMAC_S390=m CONFIG_ZCRYPT=m CONFIG_PKEY=m @@ -799,10 +809,10 @@ CONFIG_PKEY_UV=m CONFIG_CRYPTO_PAES_S390=m CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_KRB5=m +CONFIG_CRYPTO_KRB5_SELFTESTS=y CONFIG_CORDIC=m CONFIG_PRIME_NUMBERS=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m CONFIG_XZ_DEC_MICROLZMA=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 853b2326a171..8163c1702720 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -70,7 +70,6 @@ CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_INFO_DWARF4=y CONFIG_DEBUG_FS=y CONFIG_PANIC_ON_OOPS=y -# CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set # CONFIG_FTRACE is not set diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dd291c9ad6a6..9980c17ba22d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -602,7 +602,8 @@ SYM_CODE_START(stack_invalid) stmg %r0,%r7,__PT_R0(%r11) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_R8(64,%r11),0(%r14) - stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 + GET_LC %r2 + mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_invalid diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 9a929bbcc397..241f7251c873 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c @@ -428,6 +428,8 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data) return; } zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state); + if (IS_ERR(zdev)) + return; list_add_tail(&zdev->entry, scan_list); } diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 3a08f9029a3f..1c6e0ae41b0c 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -55,6 +55,7 @@ do { \ goto err_label; \ } \ *((type *)dst) = get_unaligned((type *)(src)); \ + barrier(); \ current->thread.segv_continue = NULL; \ } while (0) @@ -66,6 +67,7 @@ do { \ if (__faulted) \ goto err_label; \ put_unaligned(*((type *)src), (type *)(dst)); \ + barrier(); \ current->thread.segv_continue = NULL; \ } while (0) diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index ce073150dc20..ef2272e92a43 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, panic("Failed to sync kernel TLBs: %d", err); goto out; } - else if (current->mm == NULL) { - if (current->pagefault_disabled) { - if (!mc) { - show_regs(container_of(regs, struct pt_regs, regs)); - panic("Segfault with pagefaults disabled but no mcontext"); - } - if (!current->thread.segv_continue) { - show_regs(container_of(regs, struct pt_regs, regs)); - panic("Segfault without recovery target"); - } - mc_set_rip(mc, current->thread.segv_continue); - current->thread.segv_continue = NULL; - goto out; + else if (current->pagefault_disabled) { + if (!mc) { + show_regs(container_of(regs, struct pt_regs, regs)); + panic("Segfault with pagefaults disabled but no mcontext"); } + if (!current->thread.segv_continue) { + show_regs(container_of(regs, struct pt_regs, regs)); + panic("Segfault without recovery target"); + } + mc_set_rip(mc, current->thread.segv_continue); + current->thread.segv_continue = NULL; + goto out; + } + else if (current->mm == NULL) { show_regs(container_of(regs, struct pt_regs, regs)); panic("Segfault with no mm"); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4b9f378e05f6..5873c9e39919 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2368,6 +2368,7 @@ config STRICT_SIGALTSTACK_SIZE config CFI_AUTO_DEFAULT bool "Attempt to use FineIBT by default at boot time" depends on FINEIBT + depends on !RUST || RUSTC_VERSION >= 108800 default y help Attempt to use FineIBT by default at boot time. If enabled, diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c index f676156d9f3d..0e9f84ab4bdc 100644 --- a/arch/x86/boot/compressed/mem.c +++ b/arch/x86/boot/compressed/mem.c @@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void) void arch_accept_memory(phys_addr_t start, phys_addr_t end) { - static bool sevsnp; - /* Platform-specific memory-acceptance call goes here */ if (early_is_tdx_guest()) { if (!tdx_accept_memory(start, end)) panic("TDX: Failed to accept memory\n"); - } else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) { - sevsnp = true; + } else if (early_is_sevsnp_guest()) { snp_accept_memory(start, end); } else { error("Cannot accept memory: unknown platform\n"); diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c index 89ba168f4f0f..0003e4416efd 100644 --- a/arch/x86/boot/compressed/sev.c +++ b/arch/x86/boot/compressed/sev.c @@ -645,3 +645,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt) sev_verify_cbit(top_level_pgt); } + +bool early_is_sevsnp_guest(void) +{ + static bool sevsnp; + + if (sevsnp) + return true; + + if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) + return false; + + sevsnp = true; + + if (!snp_vmpl) { + unsigned int eax, ebx, ecx, edx; + + /* + * CPUID Fn8000_001F_EAX[28] - SVSM support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax & BIT(28)) { + struct msr m; + + /* Obtain the address of the calling area to use */ + boot_rdmsr(MSR_SVSM_CAA, &m); + boot_svsm_caa = (void *)m.q; + boot_svsm_caa_pa = m.q; + + /* + * The real VMPL level cannot be discovered, but the + * memory acceptance routines make no use of that so + * any non-zero value suffices here. + */ + snp_vmpl = U8_MAX; + } + } + return true; +} diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h index 4e463f33186d..d3900384b8ab 100644 --- a/arch/x86/boot/compressed/sev.h +++ b/arch/x86/boot/compressed/sev.h @@ -13,12 +13,14 @@ bool sev_snp_enabled(void); void snp_accept_memory(phys_addr_t start, phys_addr_t end); u64 sev_get_status(void); +bool early_is_sevsnp_guest(void); #else static inline bool sev_snp_enabled(void) { return false; } static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline u64 sev_get_status(void) { return 0; } +static inline bool early_is_sevsnp_guest(void) { return false; } #endif diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 3a4f031d2f44..139ad80d1df3 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -754,7 +754,7 @@ void x86_pmu_enable_all(int added) } } -static inline int is_x86_event(struct perf_event *event) +int is_x86_event(struct perf_event *event) { int i; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 00dfe487bd00..c5f385413392 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4395,7 +4395,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) arr[pebs_enable] = (struct perf_guest_switch_msr){ .msr = MSR_IA32_PEBS_ENABLE, .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, - .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, + .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, }; if (arr[pebs_enable].host) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 18c3ab579b8b..9b20acc0e932 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2379,8 +2379,25 @@ __intel_pmu_pebs_last_event(struct perf_event *event, */ intel_pmu_save_and_restart_reload(event, count); } - } else - intel_pmu_save_and_restart(event); + } else { + /* + * For a non-precise event, it's possible the + * counters-snapshotting records a positive value for the + * overflowed event. Then the HW auto-reload mechanism + * reset the counter to 0 immediately, because the + * pebs_event_reset is cleared if the PERF_X86_EVENT_AUTO_RELOAD + * is not set. The counter backwards may be observed in a + * PMI handler. + * + * Since the event value has been updated when processing the + * counters-snapshotting record, only needs to set the new + * period for the counter. + */ + if (is_pebs_counter_event_group(event)) + static_call(x86_pmu_set_period)(event); + else + intel_pmu_save_and_restart(event); + } } static __always_inline void diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 2c0ce0e9545e..46d120597bab 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -110,14 +110,21 @@ static inline bool is_topdown_event(struct perf_event *event) return is_metric_event(event) || is_slots_event(event); } +int is_x86_event(struct perf_event *event); + +static inline bool check_leader_group(struct perf_event *leader, int flags) +{ + return is_x86_event(leader) ? !!(leader->hw.flags & flags) : false; +} + static inline bool is_branch_counters_group(struct perf_event *event) { - return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; + return check_leader_group(event->group_leader, PERF_X86_EVENT_BRANCH_COUNTERS); } static inline bool is_pebs_counter_event_group(struct perf_event *event) { - return event->group_leader->hw.flags & PERF_X86_EVENT_PEBS_CNTR; + return check_leader_group(event->group_leader, PERF_X86_EVENT_PEBS_CNTR); } struct amd_nb { diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 695e569159c1..be7cddc414e4 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -17,10 +17,12 @@ struct ucode_cpu_info { void load_ucode_bsp(void); void load_ucode_ap(void); void microcode_bsp_resume(void); +bool __init microcode_loader_disabled(void); #else static inline void load_ucode_bsp(void) { } static inline void load_ucode_ap(void) { } static inline void microcode_bsp_resume(void) { } +static inline bool __init microcode_loader_disabled(void) { return false; } #endif extern unsigned long initrd_start_early; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 4a10d35e70aa..96cb992d50ef 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -1098,15 +1098,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz static int __init save_microcode_in_initrd(void) { - unsigned int cpuid_1_eax = native_cpuid_eax(1); struct cpuinfo_x86 *c = &boot_cpu_data; struct cont_desc desc = { 0 }; + unsigned int cpuid_1_eax; enum ucode_state ret; struct cpio_data cp; - if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) return 0; + cpuid_1_eax = native_cpuid_eax(1); + if (!find_blobs_in_containers(&cp)) return -EINVAL; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b3658d11e7b6..079f046ee26d 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -41,8 +41,8 @@ #include "internal.h" -static struct microcode_ops *microcode_ops; -bool dis_ucode_ldr = true; +static struct microcode_ops *microcode_ops; +static bool dis_ucode_ldr = false; bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); module_param(force_minrev, bool, S_IRUSR | S_IWUSR); @@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void) u32 lvl, dummy, i; u32 *levels; + if (x86_cpuid_vendor() != X86_VENDOR_AMD) + return false; + native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); levels = final_levels; @@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void) return false; } -static bool __init check_loader_disabled_bsp(void) +bool __init microcode_loader_disabled(void) { - static const char *__dis_opt_str = "dis_ucode_ldr"; - const char *cmdline = boot_command_line; - const char *option = __dis_opt_str; + if (dis_ucode_ldr) + return true; /* - * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not - * completely accurate as xen pv guests don't see that CPUID bit set but - * that's good enough as they don't land on the BSP path anyway. + * Disable when: + * + * 1) The CPU does not support CPUID. + * + * 2) Bit 31 in CPUID[1]:ECX is clear + * The bit is reserved for hypervisor use. This is still not + * completely accurate as XEN PV guests don't see that CPUID bit + * set, but that's good enough as they don't land on the BSP + * path anyway. + * + * 3) Certain AMD patch levels are not allowed to be + * overwritten. */ - if (native_cpuid_ecx(1) & BIT(31)) - return true; - - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { - if (amd_check_current_patch_level()) - return true; - } - - if (cmdline_find_option_bool(cmdline, option) <= 0) - dis_ucode_ldr = false; + if (!have_cpuid_p() || + native_cpuid_ecx(1) & BIT(31) || + amd_check_current_patch_level()) + dis_ucode_ldr = true; return dis_ucode_ldr; } @@ -125,7 +130,10 @@ void __init load_ucode_bsp(void) unsigned int cpuid_1_eax; bool intel = true; - if (!have_cpuid_p()) + if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0) + dis_ucode_ldr = true; + + if (microcode_loader_disabled()) return; cpuid_1_eax = native_cpuid_eax(1); @@ -146,9 +154,6 @@ void __init load_ucode_bsp(void) return; } - if (check_loader_disabled_bsp()) - return; - if (intel) load_ucode_intel_bsp(&early_data); else @@ -159,6 +164,11 @@ void load_ucode_ap(void) { unsigned int cpuid_1_eax; + /* + * Can't use microcode_loader_disabled() here - .init section + * hell. It doesn't have to either - the BSP variant must've + * parsed cmdline already anyway. + */ if (dis_ucode_ldr) return; @@ -810,7 +820,7 @@ static int __init microcode_init(void) struct cpuinfo_x86 *c = &boot_cpu_data; int error; - if (dis_ucode_ldr) + if (microcode_loader_disabled()) return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 819199bc0119..2a397da43923 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void) if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) return 0; - if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; uci.mc = get_microcode_blob(&uci, true); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 5df621752fef..50a9702ae4e2 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void) return x86_family(eax); } -extern bool dis_ucode_ldr; extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index de001b2146ab..375f2d7f1762 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) *ptr = (unsigned long)ptep + PAGE_OFFSET; #ifdef CONFIG_MICROCODE_INITRD32 - /* Running on a hypervisor? */ - if (native_cpuid_ecx(1) & BIT(31)) - return; - params = (struct boot_params *)__pa_nodebug(&boot_params); if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) return; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index ccdc45e5b759..aa4d0221583c 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -466,10 +466,18 @@ SECTIONS } /* - * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility: + * COMPILE_TEST kernels can be large - CONFIG_KASAN, for example, can cause + * this. Let's assume that nobody will be running a COMPILE_TEST kernel and + * let's assert that fuller build coverage is more valuable than being able to + * run a COMPILE_TEST kernel. + */ +#ifndef CONFIG_COMPILE_TEST +/* + * The ASSERT() sync to . is intentional, for binutils 2.14 compatibility: */ . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); +#endif /* needed for Clang - see arch/x86/entry/entry.S */ PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 050a0e229a4d..f2b36d32ef40 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -104,6 +104,9 @@ void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { + if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) + kvm_mmu_free_obsolete_roots(vcpu); + /* * Checking root.hpa is sufficient even when KVM has mirror root. * We can have either: diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 63bb77ee1bb1..8d1b632e33d2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5974,6 +5974,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu) __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu); __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu); } +EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots); static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, int *bytes) @@ -7669,9 +7670,30 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm) } #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES +static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, + int level) +{ + return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG; +} + +static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, + int level) +{ + lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG; +} + +static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, + int level) +{ + lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; +} + bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, struct kvm_gfn_range *range) { + struct kvm_memory_slot *slot = range->slot; + int level; + /* * Zap SPTEs even if the slot can't be mapped PRIVATE. KVM x86 only * supports KVM_MEMORY_ATTRIBUTE_PRIVATE, and so it *seems* like KVM @@ -7686,6 +7708,38 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm))) return false; + if (WARN_ON_ONCE(range->end <= range->start)) + return false; + + /* + * If the head and tail pages of the range currently allow a hugepage, + * i.e. reside fully in the slot and don't have mixed attributes, then + * add each corresponding hugepage range to the ongoing invalidation, + * e.g. to prevent KVM from creating a hugepage in response to a fault + * for a gfn whose attributes aren't changing. Note, only the range + * of gfns whose attributes are being modified needs to be explicitly + * unmapped, as that will unmap any existing hugepages. + */ + for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { + gfn_t start = gfn_round_for_level(range->start, level); + gfn_t end = gfn_round_for_level(range->end - 1, level); + gfn_t nr_pages = KVM_PAGES_PER_HPAGE(level); + + if ((start != range->start || start + nr_pages > range->end) && + start >= slot->base_gfn && + start + nr_pages <= slot->base_gfn + slot->npages && + !hugepage_test_mixed(slot, start, level)) + kvm_mmu_invalidate_range_add(kvm, start, start + nr_pages); + + if (end == start) + continue; + + if ((end + nr_pages) > range->end && + (end + nr_pages) <= (slot->base_gfn + slot->npages) && + !hugepage_test_mixed(slot, end, level)) + kvm_mmu_invalidate_range_add(kvm, end, end + nr_pages); + } + /* Unmap the old attribute page. */ if (range->arg.attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE) range->attr_filter = KVM_FILTER_SHARED; @@ -7695,23 +7749,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, return kvm_unmap_gfn_range(kvm, range); } -static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, - int level) -{ - return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG; -} - -static void hugepage_clear_mixed(struct kvm_memory_slot *slot, gfn_t gfn, - int level) -{ - lpage_info_slot(gfn, slot, level)->disallow_lpage &= ~KVM_LPAGE_MIXED_FLAG; -} -static void hugepage_set_mixed(struct kvm_memory_slot *slot, gfn_t gfn, - int level) -{ - lpage_info_slot(gfn, slot, level)->disallow_lpage |= KVM_LPAGE_MIXED_FLAG; -} static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long attrs) diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index 699e551ec93b..9864c057187d 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) kvm_mmu_reset_context(vcpu); } +EXPORT_SYMBOL_GPL(kvm_smm_changed); void process_smi(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0bc708ee2788..a7a7dc507336 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3173,9 +3173,14 @@ skip_vmsa_free: kvfree(svm->sev_es.ghcb_sa); } +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) +{ + return (((u64)control->exit_code_hi) << 32) | control->exit_code; +} + static void dump_ghcb(struct vcpu_svm *svm) { - struct ghcb *ghcb = svm->sev_es.ghcb; + struct vmcb_control_area *control = &svm->vmcb->control; unsigned int nbits; /* Re-use the dump_invalid_vmcb module parameter */ @@ -3184,18 +3189,24 @@ static void dump_ghcb(struct vcpu_svm *svm) return; } - nbits = sizeof(ghcb->save.valid_bitmap) * 8; + nbits = sizeof(svm->sev_es.valid_bitmap) * 8; - pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); + /* + * Print KVM's snapshot of the GHCB values that were (unsuccessfully) + * used to handle the exit. If the guest has since modified the GHCB + * itself, dumping the raw GHCB won't help debug why KVM was unable to + * handle the VMGEXIT that KVM observed. + */ + pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa); pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code", - ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb)); + kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm)); pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1", - ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb)); + control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm)); pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2", - ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb)); + control->exit_info_2, kvm_ghcb_sw_exit_info_2_is_valid(svm)); pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch", - ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb)); - pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap); + svm->sev_es.sw_scratch, kvm_ghcb_sw_scratch_is_valid(svm)); + pr_err("%-20s%*pb\n", "valid_bitmap", nbits, svm->sev_es.valid_bitmap); } static void sev_es_sync_to_ghcb(struct vcpu_svm *svm) @@ -3266,11 +3277,6 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } -static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) -{ - return (((u64)control->exit_code_hi) << 32) | control->exit_code; -} - static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { struct vmcb_control_area *control = &svm->vmcb->control; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d5d0c5c3300b..a89c271a1951 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -607,9 +607,6 @@ static void svm_disable_virtualization_cpu(void) kvm_cpu_svm_disable(); amd_pmu_disable_virt(); - - if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) - msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); } static int svm_enable_virtualization_cpu(void) @@ -687,9 +684,6 @@ static int svm_enable_virtualization_cpu(void) rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi); } - if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) - msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); - return 0; } @@ -1518,6 +1512,63 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); } +#ifdef CONFIG_CPU_MITIGATIONS +static DEFINE_SPINLOCK(srso_lock); +static atomic_t srso_nr_vms; + +static void svm_srso_clear_bp_spec_reduce(void *ign) +{ + struct svm_cpu_data *sd = this_cpu_ptr(&svm_data); + + if (!sd->bp_spec_reduce_set) + return; + + msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); + sd->bp_spec_reduce_set = false; +} + +static void svm_srso_vm_destroy(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) + return; + + if (atomic_dec_return(&srso_nr_vms)) + return; + + guard(spinlock)(&srso_lock); + + /* + * Verify a new VM didn't come along, acquire the lock, and increment + * the count before this task acquired the lock. + */ + if (atomic_read(&srso_nr_vms)) + return; + + on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1); +} + +static void svm_srso_vm_init(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) + return; + + /* + * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0 + * transition, i.e. destroying the last VM, is fully complete, e.g. so + * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs. + */ + if (atomic_inc_not_zero(&srso_nr_vms)) + return; + + guard(spinlock)(&srso_lock); + + atomic_inc(&srso_nr_vms); +} +#else +static void svm_srso_vm_init(void) { } +static void svm_srso_vm_destroy(void) { } +#endif + static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -1550,6 +1601,11 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm))) kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); + if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) && + !sd->bp_spec_reduce_set) { + sd->bp_spec_reduce_set = true; + msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT); + } svm->guest_state_loaded = true; } @@ -2231,6 +2287,10 @@ static int shutdown_interception(struct kvm_vcpu *vcpu) */ if (!sev_es_guest(vcpu->kvm)) { clear_page(svm->vmcb); +#ifdef CONFIG_KVM_SMM + if (is_smm(vcpu)) + kvm_smm_changed(vcpu, false); +#endif kvm_vcpu_reset(vcpu, true); } @@ -5036,6 +5096,8 @@ static void svm_vm_destroy(struct kvm *kvm) { avic_vm_destroy(kvm); sev_vm_destroy(kvm); + + svm_srso_vm_destroy(); } static int svm_vm_init(struct kvm *kvm) @@ -5061,6 +5123,7 @@ static int svm_vm_init(struct kvm *kvm) return ret; } + svm_srso_vm_init(); return 0; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index d4490eaed55d..f16b068c4228 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -335,6 +335,8 @@ struct svm_cpu_data { u32 next_asid; u32 min_asid; + bool bp_spec_reduce_set; + struct vmcb *save_area; unsigned long save_area_pa; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index df5b99ea1f18..9896fd574bfc 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4597,7 +4597,7 @@ static bool kvm_is_vm_type_supported(unsigned long type) return type < 32 && (kvm_caps.supported_vm_types & BIT(type)); } -static inline u32 kvm_sync_valid_fields(struct kvm *kvm) +static inline u64 kvm_sync_valid_fields(struct kvm *kvm) { return kvm && kvm->arch.has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS; } @@ -11493,7 +11493,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { struct kvm_queued_exception *ex = &vcpu->arch.exception; struct kvm_run *kvm_run = vcpu->run; - u32 sync_valid_fields; + u64 sync_valid_fields; int r; r = kvm_mmu_post_init_vm(vcpu->kvm); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index eb83348f9305..b6d6750e4bd1 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -899,8 +899,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, cond_mitigation(tsk); /* - * Let nmi_uaccess_okay() and finish_asid_transition() - * know that CR3 is changing. + * Indicate that CR3 is about to change. nmi_uaccess_okay() + * and others are sensitive to the window where mm_cpumask(), + * CR3 and cpu_tlbstate.loaded_mm are not all in sync. */ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); barrier(); @@ -1204,8 +1205,16 @@ done: static bool should_flush_tlb(int cpu, void *data) { + struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu); struct flush_tlb_info *info = data; + /* + * Order the 'loaded_mm' and 'is_lazy' against their + * write ordering in switch_mm_irqs_off(). Ensure + * 'is_lazy' is at least as new as 'loaded_mm'. + */ + smp_rmb(); + /* Lazy TLB will get flushed at the next context switch. */ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) return false; @@ -1214,8 +1223,15 @@ static bool should_flush_tlb(int cpu, void *data) if (!info->mm) return true; + /* + * While switching, the remote CPU could have state from + * either the prev or next mm. Assume the worst and flush. + */ + if (loaded_mm == LOADED_MM_SWITCHING) + return true; + /* The target mm is loaded, and the CPU is not lazy. */ - if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm) + if (loaded_mm == info->mm) return true; /* In cpumask, but not the loaded mm? Periodically remove by flushing. */ diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h b/arch/x86/um/shared/sysdep/faultinfo_32.h index ab5c8e47049c..9193a7790a71 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_32.h +++ b/arch/x86/um/shared/sysdep/faultinfo_32.h @@ -31,8 +31,8 @@ struct faultinfo { #define ___backtrack_faulted(_faulted) \ asm volatile ( \ - "mov $0, %0\n" \ "movl $__get_kernel_nofault_faulted_%=,%1\n" \ + "mov $0, %0\n" \ "jmp _end_%=\n" \ "__get_kernel_nofault_faulted_%=:\n" \ "mov $1, %0;" \ diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h b/arch/x86/um/shared/sysdep/faultinfo_64.h index 26fb4835d3e9..61e4ca1e0ab5 100644 --- a/arch/x86/um/shared/sysdep/faultinfo_64.h +++ b/arch/x86/um/shared/sysdep/faultinfo_64.h @@ -31,8 +31,8 @@ struct faultinfo { #define ___backtrack_faulted(_faulted) \ asm volatile ( \ - "mov $0, %0\n" \ "movq $__get_kernel_nofault_faulted_%=,%1\n" \ + "mov $0, %0\n" \ "jmp _end_%=\n" \ "__get_kernel_nofault_faulted_%=:\n" \ "mov $1, %0;" \ diff --git a/block/blk.h b/block/blk.h index 328075787814..594eeba7b949 100644 --- a/block/blk.h +++ b/block/blk.h @@ -480,7 +480,8 @@ static inline void blk_zone_update_request_bio(struct request *rq, * the original BIO sector so that blk_zone_write_plug_bio_endio() can * lookup the zone write plug. */ - if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) + if (req_op(rq) == REQ_OP_ZONE_APPEND || + bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) bio->bi_iter.bi_sector = rq->__sector; } void blk_zone_write_plug_bio_endio(struct bio *bio); diff --git a/block/ioprio.c b/block/ioprio.c index 73301a261429..f0ee2798539c 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -46,12 +46,8 @@ int ioprio_check_cap(int ioprio) */ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE)) return -EPERM; - fallthrough; - /* rt has prio field too */ - case IOPRIO_CLASS_BE: - if (level >= IOPRIO_NR_LEVELS) - return -EINVAL; break; + case IOPRIO_CLASS_BE: case IOPRIO_CLASS_IDLE: break; case IOPRIO_CLASS_NONE: diff --git a/crypto/scompress.c b/crypto/scompress.c index 36934c78d127..ffeedcf20b0f 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -163,11 +163,10 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) if (ret) goto unlock; } - if (!scomp_scratch_users) { + if (!scomp_scratch_users++) { ret = crypto_scomp_alloc_scratches(); if (ret) - goto unlock; - scomp_scratch_users++; + scomp_scratch_users--; } unlock: mutex_unlock(&scomp_lock); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 5e1d709c6a46..ccaaf6c100c0 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -544,7 +544,7 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->d0i3_entry_vpu_ts); ivpu_dbg(vdev, FW_BOOT, "boot_params.system_time_us = %llu\n", boot_params->system_time_us); - ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = %u\n", + ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", boot_params->power_profile); } @@ -646,7 +646,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->d0i3_residency_time_us = 0; boot_params->d0i3_entry_vpu_ts = 0; if (IVPU_WA(disable_d0i2)) - boot_params->power_profile = 1; + boot_params->power_profile |= BIT(1); boot_params->system_time_us = ktime_to_us(ktime_get_real()); wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index ec9a3629da3a..633160470c93 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev) else vdev->timeout.autosuspend = 100; vdev->timeout.d0i3_entry_msg = 5; - vdev->timeout.state_dump_msg = 10; + vdev->timeout.state_dump_msg = 100; } } diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h index 300f749971d4..d2d82651976d 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs.h @@ -14,7 +14,7 @@ #define PLL_PROFILING_FREQ_DEFAULT 38400000 #define PLL_PROFILING_FREQ_HIGH 400000000 -#define DCT_DEFAULT_ACTIVE_PERCENT 15u +#define DCT_DEFAULT_ACTIVE_PERCENT 30u #define DCT_PERIOD_US 35300u int ivpu_hw_btrs_info_init(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 863e3cd6ace5..b28da35c30b6 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) err_erase_xa: xa_erase(&vdev->submitted_jobs_xa, job->job_id); err_unlock: - mutex_unlock(&vdev->submitted_jobs_lock); mutex_unlock(&file_priv->lock); + mutex_unlock(&vdev->submitted_jobs_lock); ivpu_rpm_put(vdev); return ret; } @@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file * int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_create *args = data; struct ivpu_cmdq *cmdq; + int ret; - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) return -ENODEV; if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) return -EINVAL; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->lock); cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false); @@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * mutex_unlock(&file_priv->lock); + ivpu_rpm_put(vdev); + return cmdq ? 0 : -ENOMEM; } @@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_destroy *args = data; struct ivpu_cmdq *cmdq; - u32 cmdq_id; + u32 cmdq_id = 0; int ret; if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) return -ENODEV; + ret = ivpu_rpm_get(vdev); + if (ret < 0) + return ret; + mutex_lock(&file_priv->lock); cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id); if (!cmdq || cmdq->is_legacy) { ret = -ENOENT; - goto err_unlock; + } else { + cmdq_id = cmdq->id; + ivpu_cmdq_destroy(file_priv, cmdq); + ret = 0; } - cmdq_id = cmdq->id; - ivpu_cmdq_destroy(file_priv, cmdq); mutex_unlock(&file_priv->lock); - ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); - return 0; -err_unlock: - mutex_unlock(&file_priv->lock); + /* Abort any pending jobs only if cmdq was destroyed */ + if (!ret) + ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); + + ivpu_rpm_put(vdev); + return ret; } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index b5891e91f7ab..ac0e22454596 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -428,16 +428,17 @@ int ivpu_pm_dct_enable(struct ivpu_device *vdev, u8 active_percent) active_us = (DCT_PERIOD_US * active_percent) / 100; inactive_us = DCT_PERIOD_US - active_us; + vdev->pm->dct_active_percent = active_percent; + + ivpu_dbg(vdev, PM, "DCT requested %u%% (D0: %uus, D0i2: %uus)\n", + active_percent, active_us, inactive_us); + ret = ivpu_jsm_dct_enable(vdev, active_us, inactive_us); if (ret) { ivpu_err_ratelimited(vdev, "Failed to enable DCT: %d\n", ret); return ret; } - vdev->pm->dct_active_percent = active_percent; - - ivpu_dbg(vdev, PM, "DCT set to %u%% (D0: %uus, D0i2: %uus)\n", - active_percent, active_us, inactive_us); return 0; } @@ -445,15 +446,16 @@ int ivpu_pm_dct_disable(struct ivpu_device *vdev) { int ret; + vdev->pm->dct_active_percent = 0; + + ivpu_dbg(vdev, PM, "DCT requested to be disabled\n"); + ret = ivpu_jsm_dct_disable(vdev); if (ret) { ivpu_err_ratelimited(vdev, "Failed to disable DCT: %d\n", ret); return ret; } - vdev->pm->dct_active_percent = 0; - - ivpu_dbg(vdev, PM, "DCT disabled\n"); return 0; } @@ -466,7 +468,7 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work) if (ivpu_hw_btrs_dct_get_request(vdev, &enable)) return; - if (vdev->pm->dct_active_percent) + if (enable) ret = ivpu_pm_dct_enable(vdev, DCT_DEFAULT_ACTIVE_PERCENT); else ret = ivpu_pm_dct_disable(vdev); diff --git a/drivers/base/module.c b/drivers/base/module.c index 5bc71bea883a..218aaa096455 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c @@ -42,16 +42,13 @@ int module_add_driver(struct module *mod, const struct device_driver *drv) if (mod) mk = &mod->mkobj; else if (drv->mod_name) { - struct kobject *mkobj; - - /* Lookup built-in module entry in /sys/modules */ - mkobj = kset_find_obj(module_kset, drv->mod_name); - if (mkobj) { - mk = container_of(mkobj, struct module_kobject, kobj); + /* Lookup or create built-in module entry in /sys/modules */ + mk = lookup_or_create_module_kobject(drv->mod_name); + if (mk) { /* remember our module structure */ drv->p->mkobj = mk; - /* kset_find_obj took a reference */ - kobject_put(mkobj); + /* lookup_or_create_module_kobject took a reference */ + kobject_put(&mk->kobj); } } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1813cfd0c4bd..cfccf3ff36e7 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1440,7 +1440,7 @@ static void platform_shutdown(struct device *_dev) static int platform_dma_configure(struct device *dev) { - struct platform_driver *drv = to_platform_driver(dev->driver); + struct device_driver *drv = READ_ONCE(dev->driver); struct fwnode_handle *fwnode = dev_fwnode(dev); enum dev_dma_attr attr; int ret = 0; @@ -1451,8 +1451,8 @@ static int platform_dma_configure(struct device *dev) attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); ret = acpi_dma_configure(dev, attr); } - /* @drv may not be valid when we're called from the IOMMU layer */ - if (ret || !dev->driver || drv->driver_managed_dma) + /* @dev->driver may not be valid when we're called from the IOMMU layer */ + if (ret || !drv || to_platform_driver(drv)->driver_managed_dma) return ret; ret = iommu_device_use_default_domain(dev); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 46cba261075f..b8ba7de08753 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -505,6 +505,17 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file) lo->lo_min_dio_size = loop_query_min_dio_size(lo); } +static int loop_check_backing_file(struct file *file) +{ + if (!file->f_op->read_iter) + return -EINVAL; + + if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter) + return -EINVAL; + + return 0; +} + /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up @@ -526,6 +537,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!file) return -EBADF; + error = loop_check_backing_file(file); + if (error) + return error; + /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); @@ -963,6 +978,14 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode, if (!file) return -EBADF; + + if ((mode & BLK_OPEN_WRITE) && !file->f_op->write_iter) + return -EINVAL; + + error = loop_check_backing_file(file); + if (error) + return error; + is_loop = is_loop_device(file); /* This is safe, since we have a reference from open(). */ diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 40f971a66d3e..f9032076bc06 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -201,15 +201,10 @@ struct ublk_params_header { static void ublk_stop_dev_unlocked(struct ublk_device *ub); static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq); static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - struct ublk_queue *ubq, int tag, size_t offset); + const struct ublk_queue *ubq, int tag, size_t offset); static inline unsigned int ublk_req_build_flags(struct request *req); static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq, int tag); -static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub) -{ - return ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); -} - static inline bool ublk_dev_is_zoned(const struct ublk_device *ub) { return ub->dev_info.flags & UBLK_F_ZONED; @@ -609,14 +604,19 @@ static void ublk_apply_params(struct ublk_device *ub) ublk_dev_param_zoned_apply(ub); } +static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq) +{ + return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY; +} + static inline bool ublk_support_user_copy(const struct ublk_queue *ubq) { - return ubq->flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY); + return ubq->flags & UBLK_F_USER_COPY; } static inline bool ublk_need_map_io(const struct ublk_queue *ubq) { - return !ublk_support_user_copy(ubq); + return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq); } static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) @@ -624,8 +624,11 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq) /* * read()/write() is involved in user copy, so request reference * has to be grabbed + * + * for zero copy, request buffer need to be registered to io_uring + * buffer table, so reference is needed */ - return ublk_support_user_copy(ubq); + return ublk_support_user_copy(ubq) || ublk_support_zero_copy(ubq); } static inline void ublk_init_req_ref(const struct ublk_queue *ubq, @@ -1946,13 +1949,20 @@ static void ublk_io_release(void *priv) } static int ublk_register_io_buf(struct io_uring_cmd *cmd, - struct ublk_queue *ubq, unsigned int tag, + const struct ublk_queue *ubq, unsigned int tag, unsigned int index, unsigned int issue_flags) { struct ublk_device *ub = cmd->file->private_data; + const struct ublk_io *io = &ubq->ios[tag]; struct request *req; int ret; + if (!ublk_support_zero_copy(ubq)) + return -EINVAL; + + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) + return -EINVAL; + req = __ublk_check_and_get_req(ub, ubq, tag, 0); if (!req) return -EINVAL; @@ -1968,8 +1978,17 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd, } static int ublk_unregister_io_buf(struct io_uring_cmd *cmd, + const struct ublk_queue *ubq, unsigned int tag, unsigned int index, unsigned int issue_flags) { + const struct ublk_io *io = &ubq->ios[tag]; + + if (!ublk_support_zero_copy(ubq)) + return -EINVAL; + + if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) + return -EINVAL; + return io_buffer_unregister_bvec(cmd, index, issue_flags); } @@ -2073,7 +2092,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, case UBLK_IO_REGISTER_IO_BUF: return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); case UBLK_IO_UNREGISTER_IO_BUF: - return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags); + return ublk_unregister_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags); case UBLK_IO_FETCH_REQ: ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr); if (ret) @@ -2125,13 +2144,10 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, } static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub, - struct ublk_queue *ubq, int tag, size_t offset) + const struct ublk_queue *ubq, int tag, size_t offset) { struct request *req; - if (!ublk_need_req_ref(ubq)) - return NULL; - req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag); if (!req) return NULL; @@ -2245,6 +2261,9 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb, if (!ubq) return ERR_PTR(-EINVAL); + if (!ublk_support_user_copy(ubq)) + return ERR_PTR(-EACCES); + if (tag >= ubq->q_depth) return ERR_PTR(-EINVAL); @@ -2783,13 +2802,18 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header) ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE | UBLK_F_URING_CMD_COMP_IN_TASK; - /* GET_DATA isn't needed any more with USER_COPY */ - if (ublk_dev_is_user_copy(ub)) + /* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */ + if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)) ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA; - /* Zoned storage support requires user copy feature */ + /* + * Zoned storage support requires reuse `ublksrv_io_cmd->addr` for + * returning write_append_lba, which is only allowed in case of + * user copy or zero copy + */ if (ublk_dev_is_zoned(ub) && - (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) { + (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !(ub->dev_info.flags & + (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY)))) { ret = -EINVAL; goto out_free_dev_number; } diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index c1e69fcc9c4f..0a759ea26fd3 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -957,8 +957,10 @@ static int btintel_pcie_recv_event(struct hci_dev *hdev, struct sk_buff *skb) /* This is a debug event that comes from IML and OP image when it * starts execution. There is no need pass this event to stack. */ - if (skb->data[2] == 0x97) + if (skb->data[2] == 0x97) { + hci_recv_diag(hdev, skb); return 0; + } } return hci_recv_frame(hdev, skb); @@ -974,7 +976,6 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, u8 pkt_type; u16 plen; u32 pcie_pkt_type; - struct sk_buff *new_skb; void *pdata; struct hci_dev *hdev = data->hdev; @@ -1051,24 +1052,20 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, bt_dev_dbg(hdev, "pkt_type: 0x%2.2x len: %u", pkt_type, plen); - new_skb = bt_skb_alloc(plen, GFP_ATOMIC); - if (!new_skb) { - bt_dev_err(hdev, "Failed to allocate memory for skb of len: %u", - skb->len); - ret = -ENOMEM; - goto exit_error; - } - - hci_skb_pkt_type(new_skb) = pkt_type; - skb_put_data(new_skb, skb->data, plen); + hci_skb_pkt_type(skb) = pkt_type; hdev->stat.byte_rx += plen; + skb_trim(skb, plen); if (pcie_pkt_type == BTINTEL_PCIE_HCI_EVT_PKT) - ret = btintel_pcie_recv_event(hdev, new_skb); + ret = btintel_pcie_recv_event(hdev, skb); else - ret = hci_recv_frame(hdev, new_skb); + ret = hci_recv_frame(hdev, skb); + skb = NULL; /* skb is freed in the callee */ exit_error: + if (skb) + kfree_skb(skb); + if (ret) hdev->stat.err_rx++; @@ -1202,8 +1199,6 @@ static void btintel_pcie_rx_work(struct work_struct *work) struct btintel_pcie_data *data = container_of(work, struct btintel_pcie_data, rx_work); struct sk_buff *skb; - int err; - struct hci_dev *hdev = data->hdev; if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { /* Unlike usb products, controller will not send hardware @@ -1224,11 +1219,7 @@ static void btintel_pcie_rx_work(struct work_struct *work) /* Process the sk_buf in queue and send to the HCI layer */ while ((skb = skb_dequeue(&data->rx_skb_q))) { - err = btintel_pcie_recv_frame(data, skb); - if (err) - bt_dev_err(hdev, "Failed to send received frame: %d", - err); - kfree_skb(skb); + btintel_pcie_recv_frame(data, skb); } } @@ -1281,10 +1272,8 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data) bt_dev_dbg(hdev, "RXQ: cr_hia: %u cr_tia: %u", cr_hia, cr_tia); /* Check CR_TIA and CR_HIA for change */ - if (cr_tia == cr_hia) { - bt_dev_warn(hdev, "RXQ: no new CD found"); + if (cr_tia == cr_hia) return; - } rxq = &data->rxq; @@ -1320,6 +1309,16 @@ static irqreturn_t btintel_pcie_msix_isr(int irq, void *data) return IRQ_WAKE_THREAD; } +static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data) +{ + return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM]; +} + +static inline bool btintel_pcie_is_txackq_empty(struct btintel_pcie_data *data) +{ + return data->ia.cr_tia[BTINTEL_PCIE_TXQ_NUM] == data->ia.cr_hia[BTINTEL_PCIE_TXQ_NUM]; +} + static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) { struct msix_entry *entry = dev_id; @@ -1351,12 +1350,18 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) btintel_pcie_msix_gp0_handler(data); /* For TX */ - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { btintel_pcie_msix_tx_handle(data); + if (!btintel_pcie_is_rxq_empty(data)) + btintel_pcie_msix_rx_handle(data); + } /* For RX */ - if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) + if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1) { btintel_pcie_msix_rx_handle(data); + if (!btintel_pcie_is_txackq_empty(data)) + btintel_pcie_msix_tx_handle(data); + } /* * Before sending the interrupt the HW disables it to prevent a nested diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index edd5eead1e93..1d26207b2ba7 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -723,6 +723,10 @@ static int btmtksdio_close(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); + /* Skip btmtksdio_close if BTMTKSDIO_FUNC_ENABLED isn't set */ + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + return 0; + sdio_claim_host(bdev->func); /* Disable interrupt */ @@ -1443,11 +1447,15 @@ static void btmtksdio_remove(struct sdio_func *func) if (!bdev) return; + hdev = bdev->hdev; + + /* Make sure to call btmtksdio_close before removing sdio card */ + if (test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) + btmtksdio_close(hdev); + /* Be consistent the state in btmtksdio_probe */ pm_runtime_get_noresume(bdev->dev); - hdev = bdev->hdev; - sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5012b5ff92c8..a42dedb78e0a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3010,22 +3010,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev) bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err); } -/* - * ==0: not a dump pkt. - * < 0: fails to handle a dump pkt - * > 0: otherwise. - */ +/* Return: 0 on success, negative errno on failure. */ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { - int ret = 1; + int ret = 0; u8 pkt_type; u8 *sk_ptr; unsigned int sk_len; u16 seqno; u32 dump_size; - struct hci_event_hdr *event_hdr; - struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; struct btusb_data *btdata = hci_get_drvdata(hdev); struct usb_device *udev = btdata->udev; @@ -3035,30 +3029,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) sk_len = skb->len; if (pkt_type == HCI_ACLDATA_PKT) { - acl_hdr = hci_acl_hdr(skb); - if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) - return 0; sk_ptr += HCI_ACL_HDR_SIZE; sk_len -= HCI_ACL_HDR_SIZE; - event_hdr = (struct hci_event_hdr *)sk_ptr; - } else { - event_hdr = hci_event_hdr(skb); } - if ((event_hdr->evt != HCI_VENDOR_PKT) - || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) - return 0; - sk_ptr += HCI_EVENT_HDR_SIZE; sk_len -= HCI_EVENT_HDR_SIZE; dump_hdr = (struct qca_dump_hdr *)sk_ptr; - if ((sk_len < offsetof(struct qca_dump_hdr, data)) - || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) - || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) - return 0; - - /*it is dump pkt now*/ seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); @@ -3132,17 +3110,84 @@ out: return ret; } +/* Return: true if the ACL packet is a dump packet, false otherwise. */ +static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + u8 *sk_ptr; + unsigned int sk_len; + + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; + + sk_ptr = skb->data; + sk_len = skb->len; + + acl_hdr = hci_acl_hdr(skb); + if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + return false; + + sk_ptr += HCI_ACL_HDR_SIZE; + sk_len -= HCI_ACL_HDR_SIZE; + event_hdr = (struct hci_event_hdr *)sk_ptr; + + if ((event_hdr->evt != HCI_VENDOR_PKT) || + (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + return false; + + sk_ptr += HCI_EVENT_HDR_SIZE; + sk_len -= HCI_EVENT_HDR_SIZE; + + dump_hdr = (struct qca_dump_hdr *)sk_ptr; + if ((sk_len < offsetof(struct qca_dump_hdr, data)) || + (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + return false; + + return true; +} + +/* Return: true if the event packet is a dump packet, false otherwise. */ +static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + u8 *sk_ptr; + unsigned int sk_len; + + struct hci_event_hdr *event_hdr; + struct qca_dump_hdr *dump_hdr; + + sk_ptr = skb->data; + sk_len = skb->len; + + event_hdr = hci_event_hdr(skb); + + if ((event_hdr->evt != HCI_VENDOR_PKT) + || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + return false; + + sk_ptr += HCI_EVENT_HDR_SIZE; + sk_len -= HCI_EVENT_HDR_SIZE; + + dump_hdr = (struct qca_dump_hdr *)sk_ptr; + if ((sk_len < offsetof(struct qca_dump_hdr, data)) || + (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || + (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + return false; + + return true; +} + static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) { - if (handle_dump_pkt_qca(hdev, skb)) - return 0; + if (acl_pkt_is_dump_qca(hdev, skb)) + return handle_dump_pkt_qca(hdev, skb); return hci_recv_frame(hdev, skb); } static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) { - if (handle_dump_pkt_qca(hdev, skb)) - return 0; + if (evt_pkt_is_dump_qca(hdev, skb)) + return handle_dump_pkt_qca(hdev, skb); return hci_recv_frame(hdev, skb); } diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 39f7c2d736d1..b603c25f3dfa 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void) #ifdef CONFIG_CLKEVT_I8253 void clockevent_i8253_disable(void) { - raw_spin_lock(&i8253_lock); + guard(raw_spinlock_irqsave)(&i8253_lock); /* * Writing the MODE register should stop the counter, according to @@ -132,8 +132,6 @@ void clockevent_i8253_disable(void) outb_p(0, PIT_CH0); outb_p(0x30, PIT_MODE); - - raw_spin_unlock(&i8253_lock); } static int pit_shutdown(struct clock_event_device *evt) diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 924314cdeebc..d26b610e4f24 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -909,8 +909,19 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) pr_warn(FW_WARN "P-state 0 is not max freq\n"); - if (acpi_cpufreq_driver.set_boost) - policy->boost_supported = true; + if (acpi_cpufreq_driver.set_boost) { + if (policy->boost_supported) { + /* + * The firmware may have altered boost state while the + * CPU was offline (for example during a suspend-resume + * cycle). + */ + if (policy->boost_enabled != boost_state(cpu)) + set_boost(policy, policy->boost_enabled); + } else { + policy->boost_supported = true; + } + } return result; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index acf19b0042bb..f45ded62b0e0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -536,14 +536,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); static unsigned int __resolve_freq(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int target_freq, + unsigned int min, unsigned int max, + unsigned int relation) { unsigned int idx; + target_freq = clamp_val(target_freq, min, max); + if (!policy->freq_table) return target_freq; - idx = cpufreq_frequency_table_target(policy, target_freq, relation); + idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); policy->cached_resolved_idx = idx; policy->cached_target_freq = target_freq; return policy->freq_table[idx].frequency; @@ -577,8 +581,7 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, if (unlikely(min > max)) min = max; - return __resolve_freq(policy, clamp_val(target_freq, min, max), - CPUFREQ_RELATION_LE); + return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -2397,8 +2400,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (cpufreq_disabled()) return -ENODEV; - target_freq = clamp_val(target_freq, policy->min, policy->max); - target_freq = __resolve_freq(policy, target_freq, relation); + target_freq = __resolve_freq(policy, target_freq, policy->min, + policy->max, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -2727,8 +2730,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, * compiler optimizations around them because they may be accessed * concurrently by cpufreq_driver_resolve_freq() during the update. */ - WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H)); - new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L); + WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, + new_data.min, new_data.max, + CPUFREQ_RELATION_H)); + new_data.min = __resolve_freq(policy, new_data.min, new_data.min, + new_data.max, CPUFREQ_RELATION_L); WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); trace_cpu_frequency_limits(policy); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index a7c38b8b3e78..0e65d37c9231 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -76,7 +76,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, return freq_next; } - index = cpufreq_frequency_table_target(policy, freq_next, relation); + index = cpufreq_frequency_table_target(policy, freq_next, policy->min, + policy->max, relation); freq_req = freq_table[index].frequency; freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index c03a91502f84..35de513af6c9 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -115,8 +115,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, @@ -147,7 +147,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; - if ((freq < policy->min) || (freq > policy->max)) + if (freq < min || freq > max) continue; if (freq == target_freq) { optimal.driver_data = i; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f41ed0b9e610..ba9bf06f1c77 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -598,6 +598,9 @@ static bool turbo_is_disabled(void) { u64 misc_en; + if (!cpu_feature_enabled(X86_FEATURE_IDA)) + return true; + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 3e971f902363..dcd7008fe06b 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -99,7 +99,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) if (status & priv->ecc_stat_ce_mask) { regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, &err_addr); - if (priv->ecc_uecnt_offset) + if (priv->ecc_cecnt_offset) regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, &err_count); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, @@ -1005,9 +1005,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, } } - /* Interrupt mode set to every SBERR */ - regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, - ALTR_A10_ECC_INTMODE); /* Enable ECC */ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); @@ -2127,6 +2124,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) return PTR_ERR(edac->ecc_mgr_map); } + /* Set irq mask for DDR SBE to avoid any pending irq before registration */ + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, + (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0)); + edac->irq_chip.name = pdev->dev.of_node->name; edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h index 3727e72c8c2e..7248d24c4908 100644 --- a/drivers/edac/altera_edac.h +++ b/drivers/edac/altera_edac.h @@ -249,6 +249,8 @@ struct altr_sdram_mc_data { #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) +#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16) +#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17) #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 19295282de24..fe55613a8ea9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -299,7 +299,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, import_uuid(&buf->uuid, (u8 *)&rx_buf->uuid); } - ffa_rx_release(); + if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY)) + ffa_rx_release(); mutex_unlock(&drv_info->rx_lock); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 7af01664ce7e..3a5474015f7d 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -255,6 +255,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent, if (!dev) return NULL; + /* Drop the refcnt bumped implicitly by device_find_child */ + put_device(dev); + return to_scmi_dev(dev); } diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 1c75a4c9c371..0390d5ff195e 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -1248,7 +1248,8 @@ static void xfer_put(const struct scmi_protocol_handle *ph, } static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, - struct scmi_xfer *xfer, ktime_t stop) + struct scmi_xfer *xfer, ktime_t stop, + bool *ooo) { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); @@ -1257,7 +1258,7 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, * in case of out-of-order receptions of delayed responses */ return info->desc->ops->poll_done(cinfo, xfer) || - try_wait_for_completion(&xfer->done) || + (*ooo = try_wait_for_completion(&xfer->done)) || ktime_after(ktime_get(), stop); } @@ -1274,15 +1275,17 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, * itself to support synchronous commands replies. */ if (!desc->sync_cmds_completed_on_ret) { + bool ooo = false; + /* * Poll on xfer using transport provided .poll_done(); * assumes no completion interrupt was available. */ ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, - xfer, stop)); - if (ktime_after(ktime_get(), stop)) { + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, + stop, &ooo)); + if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) { dev_err(dev, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig index 0a883091259a..e3c2e38b746d 100644 --- a/drivers/firmware/cirrus/Kconfig +++ b/drivers/firmware/cirrus/Kconfig @@ -6,14 +6,11 @@ config FW_CS_DSP config FW_CS_DSP_KUNIT_TEST_UTILS tristate - depends on KUNIT && REGMAP - select FW_CS_DSP config FW_CS_DSP_KUNIT_TEST tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS - depends on KUNIT && REGMAP + depends on KUNIT && REGMAP && FW_CS_DSP default KUNIT_ALL_TESTS - select FW_CS_DSP select FW_CS_DSP_KUNIT_TEST_UTILS help This builds KUnit tests for cs_dsp. diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2cba2b6ebe1c..f01925ed8176 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -188,7 +188,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS bool "Enable refcount backtrace history in the DP MST helpers" depends on STACKTRACE_SUPPORT select STACKDEPOT - depends on DRM_KMS_HELPER + select DRM_KMS_HELPER depends on DEBUG_KERNEL depends on EXPERT help diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index c98c647f981d..54cde090c3f4 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -121,7 +121,6 @@ struct adp_drv_private { dma_addr_t mask_iova; int be_irq; int fe_irq; - spinlock_t irq_lock; struct drm_pending_vblank_event *event; }; @@ -288,6 +287,7 @@ static void adp_crtc_atomic_enable(struct drm_crtc *crtc, writel(BIT(0), adp->be + ADBE_BLEND_EN3); writel(BIT(0), adp->be + ADBE_BLEND_BYPASS); writel(BIT(0), adp->be + ADBE_BLEND_EN4); + drm_crtc_vblank_on(crtc); } static void adp_crtc_atomic_disable(struct drm_crtc *crtc, @@ -310,6 +310,7 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { u32 frame_num = 1; + unsigned long flags; struct adp_drv_private *adp = crtc_to_adp(crtc); struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, crtc); u64 new_size = ALIGN(new_state->mode.hdisplay * @@ -330,13 +331,19 @@ static void adp_crtc_atomic_flush(struct drm_crtc *crtc, } writel(ADBE_FIFO_SYNC | frame_num, adp->be + ADBE_FIFO); //FIXME: use adbe flush interrupt - spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { - drm_crtc_vblank_get(crtc); - adp->event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + + crtc->state->event = NULL; + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, event); + else + adp->event = event; + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } - crtc->state->event = NULL; - spin_unlock_irq(&crtc->dev->event_lock); } static const struct drm_crtc_funcs adp_crtc_funcs = { @@ -482,8 +489,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg) u32 int_status; u32 int_ctl; - spin_lock(&adp->irq_lock); - int_status = readl(adp->fe + ADP_INT_STATUS); if (int_status & ADP_INT_STATUS_VBLANK) { drm_crtc_handle_vblank(&adp->crtc); @@ -501,7 +506,6 @@ static irqreturn_t adp_fe_irq(int irq, void *arg) writel(int_status, adp->fe + ADP_INT_STATUS); - spin_unlock(&adp->irq_lock); return IRQ_HANDLED; } @@ -512,8 +516,7 @@ static int adp_drm_bind(struct device *dev) struct adp_drv_private *adp = to_adp(drm); int err; - adp_disable_vblank(adp); - writel(ADP_CTRL_FIFO_ON | ADP_CTRL_VBLANK_ON, adp->fe + ADP_CTRL); + writel(ADP_CTRL_FIFO_ON, adp->fe + ADP_CTRL); adp->next_bridge = drmm_of_get_bridge(&adp->drm, dev->of_node, 0, 0); if (IS_ERR(adp->next_bridge)) { @@ -567,8 +570,6 @@ static int adp_probe(struct platform_device *pdev) if (IS_ERR(adp)) return PTR_ERR(adp); - spin_lock_init(&adp->irq_lock); - dev_set_drvdata(&pdev->dev, &adp->drm); err = adp_parse_of(pdev, adp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ef6e78224fdf..c3641331d4de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1614,11 +1614,9 @@ static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_cap #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); -void amdgpu_choose_low_power_state(struct amdgpu_device *adev); #else static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } -static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { } #endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index b7f8f2ff143d..707e131f89d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1533,22 +1533,4 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) #endif /* CONFIG_AMD_PMC */ } -/** - * amdgpu_choose_low_power_state - * - * @adev: amdgpu_device_pointer - * - * Choose the target low power state for the GPU - */ -void amdgpu_choose_low_power_state(struct amdgpu_device *adev) -{ - if (adev->in_runpm) - return; - - if (amdgpu_acpi_is_s0ix_active(adev)) - adev->in_s0ix = true; - else if (amdgpu_acpi_is_s3_active(adev)) - adev->in_s3 = true; -} - #endif /* CONFIG_SUSPEND */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7f354cd532dc..f8b3e04d71ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4907,28 +4907,20 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) * @data: data * * This function is called when the system is about to suspend or hibernate. - * It is used to evict resources from the device before the system goes to - * sleep while there is still access to swap. + * It is used to set the appropriate flags so that eviction can be optimized + * in the pm prepare callback. */ static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, void *data) { struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); - int r; switch (mode) { case PM_HIBERNATION_PREPARE: adev->in_s4 = true; - fallthrough; - case PM_SUSPEND_PREPARE: - r = amdgpu_device_evict_resources(adev); - /* - * This is considered non-fatal at this time because - * amdgpu_device_prepare() will also fatally evict resources. - * See https://gitlab.freedesktop.org/drm/amd/-/issues/3781 - */ - if (r) - drm_warn(adev_to_drm(adev), "Failed to evict resources, freeze active processes if problems occur: %d\n", r); + break; + case PM_POST_HIBERNATION: + adev->in_s4 = false; break; } @@ -4949,15 +4941,13 @@ int amdgpu_device_prepare(struct drm_device *dev) struct amdgpu_device *adev = drm_to_adev(dev); int i, r; - amdgpu_choose_low_power_state(adev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* Evict the majority of BOs before starting suspend sequence */ r = amdgpu_device_evict_resources(adev); if (r) - goto unprepare; + return r; flush_delayed_work(&adev->gfx.gfx_off_delay_work); @@ -4968,15 +4958,10 @@ int amdgpu_device_prepare(struct drm_device *dev) continue; r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]); if (r) - goto unprepare; + return r; } return 0; - -unprepare: - adev->in_s0ix = adev->in_s3 = adev->in_s4 = false; - - return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e6913fcf2c7b..44e120f9f764 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -199,6 +199,11 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, break; case TTM_PL_VRAM: + /* XGMI-accessible memory should never be DMA-mapped */ + if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible( + dma_buf_attach_adev(attach), bo))) + return ERR_PTR(-EINVAL); + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, bo->tbo.base.size, attach->dev, dir, &sgt); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 24ee4710f807..72c807f5822e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2615,13 +2615,8 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - int r; - - r = amdgpu_device_resume(drm_dev, true); - adev->in_s4 = false; - return r; + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) @@ -2634,9 +2629,6 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); - struct amdgpu_device *adev = drm_to_adev(drm_dev); - - adev->in_s4 = false; return amdgpu_device_resume(drm_dev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index cdcdae7f71ce..83adf81defc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -66,7 +66,6 @@ #define VCN_ENC_CMD_REG_WAIT 0x0000000c #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 -#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_VID_IP_ADDRESS_2_0 0x0 #define VCN_AON_IP_ADDRESS_2_0 0x30000 diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index f1dc13b3ab38..cbbeadeb53f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -41,7 +41,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c index 43195c079748..086a647308df 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c @@ -32,7 +32,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index fcb8dd2876bc..40940b4ab400 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -33,7 +33,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + if (amdgpu_sriov_vf(adev)) { + /* this is fine because SR_IOV doesn't remap the register */ + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); + } } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c index a88d25a06c29..6ccd31c8bc69 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c @@ -35,7 +35,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c index 49f7eb4fbd11..2c9239a22f39 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c @@ -32,7 +32,12 @@ static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev, { if (!ring || !ring->funcs->emit_wreg) { WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c index 2ece3ae75ec1..bed5ef4d8788 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c @@ -360,7 +360,7 @@ static void nbio_v7_11_get_clockgating_state(struct amdgpu_device *adev, *flags |= AMD_CG_SUPPORT_BIF_LS; } -#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) +#define MMIO_REG_HOLE_OFFSET 0x44000 static void nbio_v7_11_set_reg_remap(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 8e7a36f26e9c..b8d835c9e17e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x1fd #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x503 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index d716510b8dd6..3eec1b8feaee 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -39,6 +39,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 22ae1939476f..0b19f0ab4480 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -40,6 +40,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index c6f6392c1c20..1f777c125b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 #define VCN_HARVEST_MMSCH 0 @@ -614,7 +615,8 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 3e176b4b7c69..012f6ea928ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -45,6 +45,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300 +#define VCN1_AON_SOC_ADDRESS_3_0 0x48000 static const struct amdgpu_hwip_reg_entry vcn_reg_list_4_0_3[] = { SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS), diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index ba603b2246e2..a1171e6152ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -46,6 +46,7 @@ #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000) +#define VCN1_AON_SOC_ADDRESS_3_0 (0x48000 + 0x38000) #define VCN_HARVEST_MMSCH 0 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index d99d05f42f1d..b90da3d3e140 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -533,7 +533,8 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, /* VCN global tiling registers */ WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( - VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); + VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), + adev->gfx.config.gb_addr_config, 0, indirect); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 581d8629b9d9..e0e84ef7f568 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -503,6 +503,52 @@ static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) } /** + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode + * + * @vinst: VCN instance + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) +{ + struct amdgpu_device *adev = vinst->adev; + uint32_t reg_data = 0; + int vcn_inst; + + vcn_inst = GET_INST(VCN, vinst->inst); + + /* pause/unpause if state is changed */ + if (vinst->pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", + vinst->pause_state.fw_based, new_state->fw_based, + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } else { + /* unpause DPG, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + } + vinst->pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + + +/** * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode * * @vinst: VCN instance @@ -518,6 +564,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; int vcn_inst; uint32_t tmp; @@ -582,6 +629,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, if (indirect) amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + /* Pause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); @@ -775,9 +825,13 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) int inst_idx = vinst->inst; uint32_t tmp; int vcn_inst; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; vcn_inst = GET_INST(VCN, inst_idx); + /* Unpause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 536f73131c2d..64df8ca448b3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -673,15 +673,21 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); if (acrtc->dm_irq_params.stream && - acrtc->dm_irq_params.vrr_params.supported && - acrtc->dm_irq_params.freesync_config.state == - VRR_STATE_ACTIVE_VARIABLE) { + acrtc->dm_irq_params.vrr_params.supported) { + bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled; + bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled; + bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + mod_freesync_handle_v_update(adev->dm.freesync_module, acrtc->dm_irq_params.stream, &acrtc->dm_irq_params.vrr_params); - dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, - &acrtc->dm_irq_params.vrr_params.adjust); + /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */ + if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) { + dc_stream_adjust_vmin_vmax(adev->dm.dc, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } } /* @@ -12743,7 +12749,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ - if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); @@ -12752,22 +12758,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( goto out; } + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && p_notify->aux_reply.length && - (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { - - if (payload->length != p_notify->aux_reply.length) { - DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", - p_notify->aux_reply.length, - payload->address, payload->length); - *operation_result = AUX_RET_ERROR_INVALID_REPLY; - goto out; - } - + if (!payload->write && p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); - } /* success */ ret = p_notify->aux_reply.length; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5198a079b463..8f22ad966543 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -173,6 +173,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); hdcp_w->aconnector[conn_index] = aconnector; memset(&link_adjust, 0, sizeof(link_adjust)); @@ -220,7 +223,6 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int conn_index = aconnector->base.index; guard(mutex)(&hdcp_w->mutex); - hdcp_w->aconnector[conn_index] = aconnector; /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -236,7 +238,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } process_output(hdcp_w); } @@ -254,6 +259,10 @@ void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_inde for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } } process_output(hdcp_w); @@ -488,6 +497,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; + unsigned int conn_index = aconnector->base.index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; @@ -544,7 +554,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) guard(mutex)(&hdcp_w->mutex); mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); - + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; process_output(hdcp_w); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7ceedf626d23..074b79fd5822 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -51,6 +51,9 @@ #define PEAK_FACTOR_X1000 1006 +/* + * This function handles both native AUX and I2C-Over-AUX transactions. + */ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -87,15 +90,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { - result = 0; + result = msg->size; operation_result = AUX_RET_SUCCESS; } } - if (payload.write && result >= 0) - result = msg->size; + /* + * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER + */ + if (payload.write && result >= 0) { + if (result) { + /*one byte indicating partially written bytes. Force 0 to retry*/ + drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + result = 0; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; + } - if (result < 0) + if (result < 0) { switch (operation_result) { case AUX_RET_SUCCESS: break; @@ -114,6 +127,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } + drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) + drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 5d16f36ec95c..ed6584535e89 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -234,7 +234,9 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s if (!result) return false; + DC_FP_START(); result = dml2_build_mode_programming(mode_programming); + DC_FP_END(); if (!result) return false; @@ -277,7 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co mode_support->dml2_instance = dml_init->dml2_instance; dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; + DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); + DC_FP_END(); if (!is_supported) return false; @@ -288,16 +292,12 @@ bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml { bool out = false; - DC_FP_START(); - /* Use dml_validate_only for fast_validate path */ if (fast_validate) out = dml21_check_mode_support(in_dc, context, dml_ctx); else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); - DC_FP_END(); - return out; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 2061d43b92e1..ab6baf269801 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -973,7 +973,9 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p } } -static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc_state *context, struct scaler_data *out) +static struct scaler_data *get_scaler_data_for_plane( + const struct dc_plane_state *in, + struct dc_state *context) { int i; struct pipe_ctx *temp_pipe = &context->res_ctx.temp_pipe; @@ -994,7 +996,7 @@ static void get_scaler_data_for_plane(const struct dc_plane_state *in, struct dc } ASSERT(i < MAX_PIPES); - memcpy(out, &temp_pipe->plane_res.scl_data, sizeof(*out)); + return &temp_pipe->plane_res.scl_data; } static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, @@ -1057,11 +1059,7 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out const struct dc_plane_state *in, struct dc_state *context, const struct soc_bounding_box_st *soc) { - struct scaler_data *scaler_data = kzalloc(sizeof(*scaler_data), GFP_KERNEL); - if (!scaler_data) - return; - - get_scaler_data_for_plane(in, context, scaler_data); + struct scaler_data *scaler_data = get_scaler_data_for_plane(in, context); out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; @@ -1126,8 +1124,6 @@ static void populate_dml_plane_cfg_from_plane_state(struct dml_plane_cfg_st *out out->DynamicMetadataTransmittedBytes[location] = 0; out->NumberOfCursors[location] = 1; - - kfree(scaler_data); } static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml2, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 2a59cc61ed8c..944650cb13de 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2114,8 +2114,6 @@ static bool dcn32_resource_construct( #define REG_STRUCT dccg_regs dccg_regs_init(); - DC_FP_START(); - ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_dcn32; @@ -2501,14 +2499,10 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; - DC_FP_END(); - return true; create_fail: - DC_FP_END(); - dcn32_resource_destruct(pool); return false; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 17fc5dc708f4..60e5ac179c15 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method) if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt)) break; - len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery); + len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery); } if (recovery) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index c299cd94d3f7..cf2463090d3a 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -964,6 +964,10 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) struct drm_file *file = f->private_data; struct drm_device *dev = file->minor->dev; struct drm_printer p = drm_seq_file_printer(m); + int idx; + + if (!drm_dev_enter(dev, &idx)) + return; drm_printf(&p, "drm-driver:\t%s\n", dev->driver->name); drm_printf(&p, "drm-client-id:\t%llu\n", file->client_id); @@ -983,6 +987,8 @@ void drm_show_fdinfo(struct seq_file *m, struct file *f) if (dev->driver->show_fdinfo) dev->driver->show_fdinfo(&p, file); + + drm_dev_exit(idx); } EXPORT_SYMBOL(drm_show_fdinfo); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 38431e8360e7..de424e670995 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1469,9 +1469,9 @@ map_pages: } i += 1 << order; num_dma_mapped = i; + range->flags.has_dma_mapping = true; } - range->flags.has_dma_mapping = true; if (zdd) { range->flags.has_devmem_pages = true; range->dpagemap = dpagemap; diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 89e05a5bed1d..a4cd476f9b30 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -404,12 +404,16 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev) u16 height = drm->mode_config.min_height; u16 width = drm->mode_config.min_width; struct mipi_dbi *dbi = &dbidev->dbi; - size_t len = width * height * 2; + const struct drm_format_info *dst_format; + size_t len; int idx; if (!drm_dev_enter(drm, &idx)) return; + dst_format = drm_format_info(dbidev->pixel_format); + len = drm_format_info_min_pitch(dst_format, 0, width) * height; + memset(dbidev->tx_buf, 0, len); mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 02f95108c637..6dc2d31ccb5a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -242,7 +242,7 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp, to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - bool is_mst = intel_dp->is_mst; + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int bpp_x16, slots = -EINVAL; int dsc_slice_count = 0; int max_dpt_bpp_x16; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 64e9317f58fb..71ee01d9ef64 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1001,6 +1001,10 @@ void intel_rps_dec_waiters(struct intel_rps *rps) if (rps_uses_slpc(rps)) { slpc = rps_to_slpc(rps); + /* Don't decrement num_waiters for req where increment was skipped */ + if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) + return; + intel_guc_slpc_dec_waiters(slpc); } else { atomic_dec(&rps->num_waiters); @@ -1029,11 +1033,15 @@ void intel_rps_boost(struct i915_request *rq) if (slpc->power_profile == SLPC_POWER_PROFILES_POWER_SAVING) return; - if (slpc->min_freq_softlimit >= slpc->boost_freq) - return; - /* Return if old value is non zero */ if (!atomic_fetch_inc(&slpc->num_waiters)) { + /* + * Skip queuing boost work if frequency is already boosted, + * but still increment num_waiters. + */ + if (slpc->min_freq_softlimit >= slpc->boost_freq) + return; + GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", rq->fence.context, rq->fence.seqno); queue_work(rps_to_gt(rps)->i915->unordered_wq, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h index 9aae779c4da3..4969d3de2bac 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.h @@ -23,6 +23,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp); int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id); void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); +bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); #else static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp) @@ -34,8 +35,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp) return 0; } -#endif +static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp) +{ + return false; +} -bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp); +#endif #endif /*__INTEL_PXP_GSCCS_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 7cc84472cece..edddfc036c6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -90,7 +90,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); - if (error) + if (error && !dma_fence_is_signaled_locked(&fence->base)) dma_fence_set_error(&fence->base, error); if (nouveau_fence_signal(fence)) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 232b03c1a259..33a37539de57 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = { }, }; -static const struct drm_display_mode auo_g101evn010_mode = { - .clock = 68930, - .hdisplay = 1280, - .hsync_start = 1280 + 82, - .hsync_end = 1280 + 82 + 2, - .htotal = 1280 + 82 + 2 + 84, - .vdisplay = 800, - .vsync_start = 800 + 8, - .vsync_end = 800 + 8 + 2, - .vtotal = 800 + 8 + 2 + 6, +static const struct display_timing auo_g101evn010_timing = { + .pixelclock = { 64000000, 68930000, 85000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 8, 64, 256 }, + .hback_porch = { 8, 64, 256 }, + .hsync_len = { 40, 168, 767 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 4, 8, 100 }, + .vback_porch = { 4, 8, 100 }, + .vsync_len = { 8, 16, 223 }, }; static const struct panel_desc auo_g101evn010 = { - .modes = &auo_g101evn010_mode, - .num_modes = 1, + .timings = &auo_g101evn010_timing, + .num_timings = 1, .bpc = 6, .size = { .width = 216, .height = 135, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index fd4215e2f982..925fbc2cda70 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -216,6 +216,9 @@ static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); KUNIT_EXPECT_NULL(test, shmem->sgt); + ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt); + KUNIT_ASSERT_EQ(test, ret, 0); + ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt); KUNIT_ASSERT_EQ(test, ret, 0); diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c index 93c007f18855..9e2d72c447ee 100644 --- a/drivers/gpu/drm/ttm/ttm_backup.c +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -8,20 +8,6 @@ #include <linux/swap.h> /* - * Casting from randomized struct file * to struct ttm_backup * is fine since - * struct ttm_backup is never defined nor dereferenced. - */ -static struct file *ttm_backup_to_file(struct ttm_backup *backup) -{ - return (void *)backup; -} - -static struct ttm_backup *ttm_file_to_backup(struct file *file) -{ - return (void *)file; -} - -/* * Need to map shmem indices to handle since a handle value * of 0 means error, following the swp_entry_t convention. */ @@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) * @backup: The struct backup pointer used to obtain the handle * @handle: The handle obtained from the @backup_page function. */ -void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) +void ttm_backup_drop(struct file *backup, pgoff_t handle) { loff_t start = ttm_backup_handle_to_shmem_idx(handle); start <<= PAGE_SHIFT; - shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start, + shmem_truncate_range(file_inode(backup), start, start + PAGE_SIZE - 1); } @@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle) * @backup: The struct backup pointer used to back up the page. * @dst: The struct page to copy into. * @handle: The handle returned when the page was backed up. - * @intr: Try to perform waits interruptable or at least killable. + * @intr: Try to perform waits interruptible or at least killable. * * Return: 0 on success, Negative error code on failure, notably * -EINTR if @intr was set to true and a signal is pending. */ -int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, +int ttm_backup_copy_page(struct file *backup, struct page *dst, pgoff_t handle, bool intr) { - struct file *filp = ttm_backup_to_file(backup); - struct address_space *mapping = filp->f_mapping; + struct address_space *mapping = backup->f_mapping; struct folio *from_folio; pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); @@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, * the folio size- and usage. */ s64 -ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, +ttm_backup_backup_page(struct file *backup, struct page *page, bool writeback, pgoff_t idx, gfp_t page_gfp, gfp_t alloc_gfp) { - struct file *filp = ttm_backup_to_file(backup); - struct address_space *mapping = filp->f_mapping; + struct address_space *mapping = backup->f_mapping; unsigned long handle = 0; struct folio *to_folio; int ret; @@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, * * After a call to this function, it's illegal to use the @backup pointer. */ -void ttm_backup_fini(struct ttm_backup *backup) +void ttm_backup_fini(struct file *backup) { - fput(ttm_backup_to_file(backup)); + fput(backup); } /** @@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); * * Create a backup utilizing shmem objects. * - * Return: A pointer to a struct ttm_backup on success, + * Return: A pointer to a struct file on success, * an error pointer on error. */ -struct ttm_backup *ttm_backup_shmem_create(loff_t size) +struct file *ttm_backup_shmem_create(loff_t size) { - struct file *filp; - - filp = shmem_file_setup("ttm shmem backup", size, 0); - - return ttm_file_to_backup(filp); + return shmem_file_setup("ttm shmem backup", size, 0); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 95b86003c50d..5bf3c969907c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1093,7 +1093,8 @@ struct ttm_bo_swapout_walk { struct ttm_lru_walk walk; /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */ gfp_t gfp_flags; - + /** @hit_low: Whether we should attempt to swap BO's with low watermark threshold */ + /** @evict_low: If we cannot swap a bo when @try_low is false (first pass) */ bool hit_low, evict_low; }; diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 83b10706ba89..c2ea865be657 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated, * if successful, populate the page-table and dma-address arrays. */ static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore, - struct ttm_backup *backup, + struct file *backup, const struct ttm_operation_ctx *ctx, struct ttm_pool_alloc_state *alloc) @@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, pgoff_t start_page, pgoff_t end_page) { struct page **pages = &tt->pages[start_page]; - struct ttm_backup *backup = tt->backup; + struct file *backup = tt->backup; pgoff_t i, nr; for (i = start_page; i < end_page; i += nr, pages += nr) { @@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt) long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt, const struct ttm_backup_flags *flags) { - struct ttm_backup *backup = tt->backup; + struct file *backup = tt->backup; struct page *page; unsigned long handle; gfp_t alloc_gfp; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index df0aa6c4b8b8..698cd4bf5e46 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit); */ int ttm_tt_setup_backup(struct ttm_tt *tt) { - struct ttm_backup *backup = + struct file *backup = ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT); if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 4a7701a33cf8..eb35482f6fb5 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -/* If the current address or return address have changed, then the GPU - * has probably made progress and we should delay the reset. This - * could fail if the GPU got in an infinite loop in the CL, but that - * is pretty unlikely outside of an i-g-t testcase. - */ +static void +v3d_sched_skip_reset(struct drm_sched_job *sched_job) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); +} + static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) @@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + /* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); - /* If we've made progress, skip reset and let the timer get - * rearmed. + /* If we've made progress, skip reset, add the job to the pending + * list, and let the timer get rearmed. */ if (job->timedout_batches != batches) { job->timedout_batches = batches; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index ef1e5256c56a..0e502feaca81 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -46,8 +46,11 @@ static void read_l3cc_table(struct xe_gt *gt, unsigned int fw_ref, i; u32 reg_val; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { + xe_force_wake_put(gt_to_fw(gt), fw_ref); + KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n"); + } for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index f2bb9168967c..e2bb156c71fb 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -52,6 +52,8 @@ struct xe_eu_stall_data_stream { struct xe_gt *gt; struct xe_bo *bo; + /* Lock to protect data buffer pointers */ + struct mutex xecore_buf_lock; struct per_xecore_buf *xecore_buf; struct { bool reported_to_user; @@ -208,6 +210,9 @@ int xe_eu_stall_init(struct xe_gt *gt) struct xe_device *xe = gt_to_xe(gt); int ret; + if (!xe_eu_stall_supported_on_platform(xe)) + return 0; + gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL); if (!gt->eu_stall) { ret = -ENOMEM; @@ -378,7 +383,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream) u16 group, instance; unsigned int xecore; - mutex_lock(>->eu_stall->stream_lock); + mutex_lock(&stream->xecore_buf_lock); for_each_dss_steering(xecore, gt, group, instance) { xecore_buf = &stream->xecore_buf[xecore]; read_ptr = xecore_buf->read; @@ -396,7 +401,7 @@ static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream) set_bit(xecore, stream->data_drop.mask); xecore_buf->write = write_ptr; } - mutex_unlock(>->eu_stall->stream_lock); + mutex_unlock(&stream->xecore_buf_lock); return min_data_present; } @@ -511,11 +516,13 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st unsigned int xecore; int ret = 0; + mutex_lock(&stream->xecore_buf_lock); if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) { if (!stream->data_drop.reported_to_user) { stream->data_drop.reported_to_user = true; xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n", XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask); + mutex_unlock(&stream->xecore_buf_lock); return -EIO; } stream->data_drop.reported_to_user = false; @@ -527,6 +534,7 @@ static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *st if (ret || count == total_size) break; } + mutex_unlock(&stream->xecore_buf_lock); return total_size ?: (ret ?: -EAGAIN); } @@ -583,6 +591,7 @@ static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream) { struct xe_gt *gt = stream->gt; + mutex_destroy(&stream->xecore_buf_lock); gt->eu_stall->stream = NULL; kfree(stream); } @@ -718,6 +727,7 @@ static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream, } init_waitqueue_head(&stream->poll_wq); + mutex_init(&stream->xecore_buf_lock); INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn); stream->per_xecore_buf_size = per_xecore_buf_size; stream->sampling_rate_mult = props->sampling_rate_mult; diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h index ed9d0f233566..d1c76e503799 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.h +++ b/drivers/gpu/drm/xe/xe_eu_stall.h @@ -7,6 +7,7 @@ #define __XE_EU_STALL_H__ #include "xe_gt_types.h" +#include "xe_sriov.h" size_t xe_eu_stall_get_per_xecore_buf_size(void); size_t xe_eu_stall_data_record_size(struct xe_device *xe); @@ -19,6 +20,6 @@ int xe_eu_stall_stream_open(struct drm_device *dev, static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe) { - return xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20; + return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20); } #endif diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index fd41113f8572..0bcf97063ff6 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -555,6 +555,28 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc) flush_work(&gsc->work); } +void xe_gsc_stop_prepare(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + int ret; + + if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw)) + return; + + xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC); + + /* + * If the GSC FW load or the proxy init are interrupted, the only way + * to recover it is to do an FLR and reload the GSC from scratch. + * Therefore, let's wait for the init to complete before stopping + * operations. The proxy init is the last step, so we can just wait on + * that + */ + ret = xe_gsc_wait_for_proxy_init_done(gsc); + if (ret) + xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n"); +} + /* * wa_14015076503: if the GSC FW is loaded, we need to alert it before doing a * GSC engine reset by writing a notification bit in the GS1 register and then diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h index d99f66c38075..b8b8e0810ad9 100644 --- a/drivers/gpu/drm/xe/xe_gsc.h +++ b/drivers/gpu/drm/xe/xe_gsc.h @@ -16,6 +16,7 @@ struct xe_hw_engine; int xe_gsc_init(struct xe_gsc *gsc); int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc); void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc); +void xe_gsc_stop_prepare(struct xe_gsc *gsc); void xe_gsc_load_start(struct xe_gsc *gsc); void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec); diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index 8cf70b228ff3..d0519cd6704a 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -71,6 +71,17 @@ bool xe_gsc_proxy_init_done(struct xe_gsc *gsc) HECI1_FWSTS1_PROXY_STATE_NORMAL; } +int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc) +{ + struct xe_gt *gt = gsc_to_gt(gsc); + + /* Proxy init can take up to 500ms, so wait double that for safety */ + return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), + HECI1_FWSTS1_CURRENT_STATE, + HECI1_FWSTS1_PROXY_STATE_NORMAL, + USEC_PER_SEC, NULL, false); +} + static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) { struct xe_gt *gt = gsc_to_gt(gsc); diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h index fdef56995cd4..765602221dbc 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.h +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h @@ -12,6 +12,7 @@ struct xe_gsc; int xe_gsc_proxy_init(struct xe_gsc *gsc); bool xe_gsc_proxy_init_done(struct xe_gsc *gsc); +int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc); int xe_gsc_proxy_start(struct xe_gsc *gsc); int xe_gsc_proxy_request_handler(struct xe_gsc *gsc); diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 10a9e3c72b36..66198cf2662c 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -857,7 +857,7 @@ void xe_gt_suspend_prepare(struct xe_gt *gt) fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - xe_uc_stop_prepare(>->uc); + xe_uc_suspend_prepare(>->uc); xe_force_wake_put(gt_to_fw(gt), fw_ref); } diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c index 2d63a69cbfa3..f7005a3643e6 100644 --- a/drivers/gpu/drm/xe/xe_gt_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c @@ -92,22 +92,23 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p) struct xe_hw_engine *hwe; enum xe_hw_engine_id id; unsigned int fw_ref; + int ret = 0; xe_pm_runtime_get(xe); fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - xe_pm_runtime_put(xe); - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto fw_put; } for_each_hw_engine(hwe, gt, id) xe_hw_engine_print(hwe, p); +fw_put: xe_force_wake_put(gt_to_fw(gt), fw_ref); xe_pm_runtime_put(xe); - return 0; + return ret; } static int powergate_info(struct xe_gt *gt, struct drm_printer *p) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index c5ad9a0a89c2..0c22b3a36655 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -435,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss, XE_MAX_EU_FUSE_BITS) * num_dss; - /* user can issue separate page faults per EU and per CS */ + /* + * user can issue separate page faults per EU and per CS + * + * XXX: Multiplier required as compute UMD are getting PF queue errors + * without it. Follow on why this multiplier is required. + */ +#define PF_MULTIPLIER 8 pf_queue->num_dw = - (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW; + (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; +#undef PF_MULTIPLIER pf_queue->gt = gt; pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw, diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index f6d523e4c5fe..9095618648bc 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -359,7 +359,7 @@ static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext, ext->reg = XE_REG(extlist->reg.__reg.addr); ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1); - ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); + ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id); ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id); ext->regname = extlist->name; } diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index f8c128524d9f..24c578e1170e 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -79,7 +79,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm) range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) - return ERR_PTR(-ENOMEM); + return NULL; INIT_LIST_HEAD(&range->garbage_collector_link); xe_vm_get(gpusvm_to_vm(gpusvm)); @@ -947,3 +947,15 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) return 0; } #endif + +/** + * xe_svm_flush() - SVM flush + * @vm: The VM. + * + * Flush all SVM actions. + */ +void xe_svm_flush(struct xe_vm *vm) +{ + if (xe_vm_in_fault_mode(vm)) + flush_work(&vm->svm.garbage_collector.work); +} diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index e059590e5076..be306fe7aaa4 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -72,6 +72,9 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); + +void xe_svm_flush(struct xe_vm *vm); + #else static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range) { @@ -124,6 +127,11 @@ static inline void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } + +static inline void xe_svm_flush(struct xe_vm *vm) +{ +} + #endif /** diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index c14bd2282044..3a8751a8b92d 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -244,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc) void xe_uc_stop_prepare(struct xe_uc *uc) { - xe_gsc_wait_for_worker_completion(&uc->gsc); + xe_gsc_stop_prepare(&uc->gsc); xe_guc_stop_prepare(&uc->guc); } @@ -278,6 +278,12 @@ again: goto again; } +void xe_uc_suspend_prepare(struct xe_uc *uc) +{ + xe_gsc_wait_for_worker_completion(&uc->gsc); + xe_guc_stop_prepare(&uc->guc); +} + int xe_uc_suspend(struct xe_uc *uc) { /* GuC submission not enabled, nothing to do */ diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index 3813c1ede450..c23e6f5e2514 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -18,6 +18,7 @@ int xe_uc_reset_prepare(struct xe_uc *uc); void xe_uc_stop_prepare(struct xe_uc *uc); void xe_uc_stop(struct xe_uc *uc); int xe_uc_start(struct xe_uc *uc); +void xe_uc_suspend_prepare(struct xe_uc *uc); int xe_uc_suspend(struct xe_uc *uc); int xe_uc_sanitize_reset(struct xe_uc *uc); void xe_uc_declare_wedged(struct xe_uc *uc); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 60303998bd61..367c84b90e9e 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3312,8 +3312,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) } /* Ensure all UNMAPs visible */ - if (xe_vm_in_fault_mode(vm)) - flush_work(&vm->svm.garbage_collector.work); + xe_svm_flush(vm); err = down_write_killable(&vm->lock); if (err) diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs index 17c9660da450..ab0e5a72a059 100644 --- a/drivers/gpu/nova-core/gpu.rs +++ b/drivers/gpu/nova-core/gpu.rs @@ -93,7 +93,7 @@ impl Chipset { // For now, redirect to fmt::Debug for convenience. impl fmt::Display for Chipset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index b3b11be11650..59792e00cecf 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -307,7 +307,7 @@ void __init hv_get_partition_id(void) local_irq_save(flags); output = *this_cpu_ptr(hyperv_pcpu_input_arg); - status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, &output); + status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output); pt_id = output->partition_id; local_irq_restore(flags); @@ -566,9 +566,11 @@ int hv_common_cpu_die(unsigned int cpu) * originally allocated memory is reused in hv_common_cpu_init(). */ - synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); - kfree(*synic_eventring_tail); - *synic_eventring_tail = NULL; + if (hv_root_partition()) { + synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail); + kfree(*synic_eventring_tail); + *synic_eventring_tail = NULL; + } return 0; } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 29780f3a7478..0b450e53161e 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -477,4 +477,10 @@ static inline int hv_debug_add_dev_dir(struct hv_device *dev) #endif /* CONFIG_HYPERV_TESTING */ +/* Create and remove sysfs entry for memory mapped ring buffers for a channel */ +int hv_create_ring_sysfs(struct vmbus_channel *channel, + int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + struct vm_area_struct *vma)); +int hv_remove_ring_sysfs(struct vmbus_channel *channel); + #endif /* _HYPERV_VMBUS_H */ diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 8d3cff42bdbb..e3d51a316316 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1802,6 +1802,26 @@ static ssize_t subchannel_id_show(struct vmbus_channel *channel, } static VMBUS_CHAN_ATTR_RO(subchannel_id); +static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj, + const struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj); + + /* + * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer + * is not NULL. + */ + return channel->mmap_ring_buffer(channel, vma); +} + +static struct bin_attribute chan_attr_ring_buffer = { + .attr = { + .name = "ring", + .mode = 0600, + }, + .mmap = hv_mmap_ring_buffer_wrapper, +}; static struct attribute *vmbus_chan_attrs[] = { &chan_attr_out_mask.attr, &chan_attr_in_mask.attr, @@ -1821,6 +1841,11 @@ static struct attribute *vmbus_chan_attrs[] = { NULL }; +static struct bin_attribute *vmbus_chan_bin_attrs[] = { + &chan_attr_ring_buffer, + NULL +}; + /* * Channel-level attribute_group callback function. Returns the permission for * each attribute, and returns 0 if an attribute is not visible. @@ -1841,9 +1866,34 @@ static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj, return attr->mode; } +static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj, + const struct bin_attribute *attr, int idx) +{ + const struct vmbus_channel *channel = + container_of(kobj, struct vmbus_channel, kobj); + + /* Hide ring attribute if channel's ring_sysfs_visible is set to false */ + if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible) + return 0; + + return attr->attr.mode; +} + +static size_t vmbus_chan_bin_size(struct kobject *kobj, + const struct bin_attribute *bin_attr, int a) +{ + const struct vmbus_channel *channel = + container_of(kobj, struct vmbus_channel, kobj); + + return channel->ringbuffer_pagecount << PAGE_SHIFT; +} + static const struct attribute_group vmbus_chan_group = { .attrs = vmbus_chan_attrs, - .is_visible = vmbus_chan_attr_is_visible + .bin_attrs = vmbus_chan_bin_attrs, + .is_visible = vmbus_chan_attr_is_visible, + .is_bin_visible = vmbus_chan_bin_attr_is_visible, + .bin_size = vmbus_chan_bin_size, }; static const struct kobj_type vmbus_chan_ktype = { @@ -1851,6 +1901,63 @@ static const struct kobj_type vmbus_chan_ktype = { .release = vmbus_chan_release, }; +/** + * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel. + * @channel: Pointer to vmbus_channel structure + * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of + * channel's "ring" sysfs node, which is for the ring buffer of that channel. + * Function pointer is of below type: + * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + * struct vm_area_struct *vma)) + * This has a pointer to the channel and a pointer to vm_area_struct, + * used for mmap, as arguments. + * + * Sysfs node for ring buffer of a channel is created along with other fields, however its + * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case + * is running. + * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of + * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid + * exposing the ring buffer by default, this function is reponsible to enable visibility of + * ring for userspace to use. + * Note: Race conditions can happen with userspace and it is not encouraged to create new + * use-cases for this. This was added to maintain backward compatibility, while solving + * one of the race conditions in uio_hv_generic while creating sysfs. + * + * Returns 0 on success or error code on failure. + */ +int hv_create_ring_sysfs(struct vmbus_channel *channel, + int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel, + struct vm_area_struct *vma)) +{ + struct kobject *kobj = &channel->kobj; + + channel->mmap_ring_buffer = hv_mmap_ring_buffer; + channel->ring_sysfs_visible = true; + + return sysfs_update_group(kobj, &vmbus_chan_group); +} +EXPORT_SYMBOL_GPL(hv_create_ring_sysfs); + +/** + * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel. + * @channel: Pointer to vmbus_channel structure + * + * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group. + * + * Returns 0 on success or error code on failure. + */ +int hv_remove_ring_sysfs(struct vmbus_channel *channel) +{ + struct kobject *kobj = &channel->kobj; + int ret; + + channel->ring_sysfs_visible = false; + ret = sysfs_update_group(kobj, &vmbus_chan_group); + channel->mmap_ring_buffer = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs); + /* * vmbus_add_channel_kobj - setup a sub-directory under device/channels */ diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index 0d4b3935e687..342d47e67586 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -1380,9 +1380,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev) return 0; rpm_disable: - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 16afb9ca19bb..876791d20ed5 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1454,7 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev) (1000 * omap->speed / 8); } - if (of_property_read_bool(node, "mux-states")) { + if (of_property_present(node, "mux-states")) { struct mux_state *mux_state; mux_state = devm_mux_state_get(&pdev->dev, NULL); diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index 8601b9a8b8e7..5127e58eebc7 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = { BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), IIO_CHAN_SOFT_TIMESTAMP(7) }; diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index e8cd21fa77a6..cbac622ef821 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -231,7 +231,7 @@ struct adxl355_data { u8 transf_buf[3]; struct { u8 buf[14]; - s64 ts; + aligned_s64 ts; } buffer; } __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index add4053e7a02..0c04b2bb7efb 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -601,18 +601,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr) if (ret) return ret; + st->odr = odr; + /* Activity timers depend on ODR */ ret = _adxl367_set_act_time_ms(st, st->act_time_ms); if (ret) return ret; - ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms); - if (ret) - return ret; - - st->odr = odr; - - return 0; + return _adxl367_set_inact_time_ms(st, st->inact_time_ms); } static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr) diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 48e4282964a0..bf1d3923a181 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -1226,8 +1226,11 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq) if (ret) return ret; - if (device_property_read_bool(dev, "wakeup-source")) - device_init_wakeup(dev, true); + if (device_property_read_bool(dev, "wakeup-source")) { + ret = devm_device_init_wakeup(dev); + if (ret) + return dev_err_probe(dev, ret, "Failed to init wakeup\n"); + } return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 18559757f908..7fef2727f89e 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -45,7 +45,7 @@ struct ad7266_state { */ struct { __be16 sample[2]; - s64 timestamp; + aligned_s64 timestamp; } data __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c index 4fcb49fdf566..aef85093eb16 100644 --- a/drivers/iio/adc/ad7380.c +++ b/drivers/iio/adc/ad7380.c @@ -1211,6 +1211,9 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev) struct ad7380_state *st = iio_priv(indio_dev); int ret; + spi_offload_trigger_disable(st->offload, st->offload_trigger); + spi_unoptimize_message(&st->offload_msg); + if (st->seq) { ret = regmap_update_bits(st->regmap, AD7380_REG_ADDR_CONFIG1, @@ -1222,10 +1225,6 @@ static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev) st->seq = false; } - spi_offload_trigger_disable(st->offload, st->offload_trigger); - - spi_unoptimize_message(&st->offload_msg); - return 0; } @@ -1611,11 +1610,25 @@ static int ad7380_write_event_config(struct iio_dev *indio_dev, return ret; } -static int ad7380_get_alert_th(struct ad7380_state *st, +static int ad7380_get_alert_th(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, enum iio_event_direction dir, int *val) { - int ret, tmp; + struct ad7380_state *st = iio_priv(indio_dev); + const struct iio_scan_type *scan_type; + int ret, tmp, shift; + + scan_type = iio_get_current_scan_type(indio_dev, chan); + if (IS_ERR(scan_type)) + return PTR_ERR(scan_type); + + /* + * The register value is 12-bits and is compared to the most significant + * bits of raw value, therefore a shift is required to convert this to + * the same scale as the raw value. + */ + shift = scan_type->realbits - 12; switch (dir) { case IIO_EV_DIR_RISING: @@ -1625,7 +1638,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st, if (ret) return ret; - *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp); + *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp) << shift; return IIO_VAL_INT; case IIO_EV_DIR_FALLING: ret = regmap_read(st->regmap, @@ -1634,7 +1647,7 @@ static int ad7380_get_alert_th(struct ad7380_state *st, if (ret) return ret; - *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp); + *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp) << shift; return IIO_VAL_INT; default: return -EINVAL; @@ -1648,7 +1661,6 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev, enum iio_event_info info, int *val, int *val2) { - struct ad7380_state *st = iio_priv(indio_dev); int ret; switch (info) { @@ -1656,7 +1668,7 @@ static int ad7380_read_event_value(struct iio_dev *indio_dev, if (!iio_device_claim_direct(indio_dev)) return -EBUSY; - ret = ad7380_get_alert_th(st, dir, val); + ret = ad7380_get_alert_th(indio_dev, chan, dir, val); iio_device_release_direct(indio_dev); return ret; diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index 1a314fddd7eb..703556eb7257 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -1236,9 +1236,11 @@ static int ad7616_sw_mode_setup(struct iio_dev *indio_dev) st->write_scale = ad7616_write_scale_sw; st->write_os = &ad7616_write_os_sw; - ret = st->bops->sw_mode_config(indio_dev); - if (ret) - return ret; + if (st->bops->sw_mode_config) { + ret = st->bops->sw_mode_config(indio_dev); + if (ret) + return ret; + } /* Activate Burst mode and SEQEN MODE */ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER, @@ -1268,6 +1270,9 @@ static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev) st->write_scale = ad7606_write_scale_sw; st->write_os = &ad7606_write_os_sw; + if (!st->bops->sw_mode_config) + return 0; + return st->bops->sw_mode_config(indio_dev); } diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 885bf0b68e77..179115e90988 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -131,7 +131,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr) { .tx_buf = &st->d16[0], .len = 2, - .cs_change = 0, + .cs_change = 1, }, { .rx_buf = &st->d16[1], .len = 2, diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 5a863005aca6..5e0be36af0c5 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -168,7 +168,7 @@ struct ad7768_state { union { struct { __be32 chan; - s64 timestamp; + aligned_s64 timestamp; } scan; __be32 d32; u8 d8[2]; diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index a1e48a756a7b..359e26e3f5bc 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -466,7 +466,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct { __le16 values[DLN2_ADC_MAX_CHANNELS]; - int64_t timestamp_space; + aligned_s64 timestamp_space; } data; struct dln2_adc_get_all_vals dev_data; struct dln2_adc *dln2 = iio_priv(indio_dev); diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c index 7fb8b2499a1d..b64a8a407168 100644 --- a/drivers/iio/adc/qcom-spmi-iadc.c +++ b/drivers/iio/adc/qcom-spmi-iadc.c @@ -543,7 +543,9 @@ static int iadc_probe(struct platform_device *pdev) else return ret; } else { - device_init_wakeup(iadc->dev, 1); + ret = devm_device_init_wakeup(iadc->dev); + if (ret) + return dev_err_probe(iadc->dev, ret, "Failed to init wakeup\n"); } ret = iadc_update_offset(iadc); diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 9a099df79518..5e28bd28b81a 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -520,15 +520,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (info->reset) rockchip_saradc_reset_controller(info->reset); - /* - * Use a default value for the converter clock. - * This may become user-configurable in the future. - */ - ret = clk_set_rate(info->clk, info->data->clk_rate); - if (ret < 0) - return dev_err_probe(&pdev->dev, ret, - "failed to set adc clk rate\n"); - ret = regulator_enable(info->vref); if (ret < 0) return dev_err_probe(&pdev->dev, ret, @@ -555,6 +546,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev) if (IS_ERR(info->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get adc clock\n"); + /* + * Use a default value for the converter clock. + * This may become user-configurable in the future. + */ + ret = clk_set_rate(info->clk, info->data->clk_rate); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, + "failed to set adc clk rate\n"); platform_set_drvdata(pdev, indio_dev); diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c index d0bd94912e0a..e05ce1f12065 100644 --- a/drivers/iio/chemical/pms7003.c +++ b/drivers/iio/chemical/pms7003.c @@ -5,7 +5,6 @@ * Copyright (c) Tomasz Duszynski <tduszyns@gmail.com> */ -#include <linux/unaligned.h> #include <linux/completion.h> #include <linux/device.h> #include <linux/errno.h> @@ -19,6 +18,8 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/serdev.h> +#include <linux/types.h> +#include <linux/unaligned.h> #define PMS7003_DRIVER_NAME "pms7003" @@ -76,7 +77,7 @@ struct pms7003_state { /* Used to construct scan to push to the IIO buffer */ struct { u16 data[3]; /* PM1, PM2P5, PM10 */ - s64 ts; + aligned_s64 ts; } scan; }; diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index 6f4f2ba2c09d..a7888146188d 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) int ret; struct { s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ - s64 ts; + aligned_s64 ts; } scan; mutex_lock(&state->lock); diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index ad1882f608c0..2055a03cbeb1 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -66,6 +66,10 @@ static struct { {HID_USAGE_SENSOR_HUMIDITY, 0, 1000, 0}, {HID_USAGE_SENSOR_HINGE, 0, 0, 17453293}, {HID_USAGE_SENSOR_HINGE, HID_USAGE_SENSOR_UNITS_DEGREES, 0, 17453293}, + + {HID_USAGE_SENSOR_HUMAN_PRESENCE, 0, 1, 0}, + {HID_USAGE_SENSOR_HUMAN_PROXIMITY, 0, 1, 0}, + {HID_USAGE_SENSOR_HUMAN_ATTENTION, 0, 1, 0}, }; static void simple_div(int dividend, int divisor, int *whole, diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c index b14ea8937c7f..28f0dbd0226c 100644 --- a/drivers/iio/imu/adis16550.c +++ b/drivers/iio/imu/adis16550.c @@ -836,7 +836,7 @@ static irqreturn_t adis16550_trigger_handler(int irq, void *p) u16 dummy; bool valid; struct iio_poll_func *pf = p; - __be32 data[ADIS16550_MAX_SCAN_DATA]; + __be32 data[ADIS16550_MAX_SCAN_DATA] __aligned(8); struct iio_dev *indio_dev = pf->indio_dev; struct adis16550 *st = iio_priv(indio_dev); struct adis *adis = iio_device_get_drvdata(indio_dev); diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c index a86be5af5ccb..2e4469f30d53 100644 --- a/drivers/iio/imu/bmi270/bmi270_core.c +++ b/drivers/iio/imu/bmi270/bmi270_core.c @@ -918,8 +918,7 @@ static int bmi270_configure_imu(struct bmi270_data *data) FIELD_PREP(BMI270_ACC_CONF_ODR_MSK, BMI270_ACC_CONF_ODR_100HZ) | FIELD_PREP(BMI270_ACC_CONF_BWP_MSK, - BMI270_ACC_CONF_BWP_NORMAL_MODE) | - BMI270_PWR_CONF_ADV_PWR_SAVE_MSK); + BMI270_ACC_CONF_BWP_NORMAL_MODE)); if (ret) return dev_err_probe(dev, ret, "Failed to configure accelerometer"); @@ -927,8 +926,7 @@ static int bmi270_configure_imu(struct bmi270_data *data) FIELD_PREP(BMI270_GYR_CONF_ODR_MSK, BMI270_GYR_CONF_ODR_200HZ) | FIELD_PREP(BMI270_GYR_CONF_BWP_MSK, - BMI270_GYR_CONF_BWP_NORMAL_MODE) | - BMI270_PWR_CONF_ADV_PWR_SAVE_MSK); + BMI270_GYR_CONF_BWP_NORMAL_MODE)); if (ret) return dev_err_probe(dev, ret, "Failed to configure gyroscope"); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 3d3b27f28c9d..273196e647a2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -50,7 +50,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) u16 fifo_count; u32 fifo_period; s64 timestamp; - u8 data[INV_MPU6050_OUTPUT_DATA_SIZE]; + u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(8); size_t i, nb; mutex_lock(&st->lock); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 0a7cd8c1aa33..8a9d2593576a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -392,6 +392,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK)) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_SAMPLE_SIZE; + fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) * ST_LSM6DSX_CHAN_SIZE; fifo_len = (fifo_len / pattern_len) * pattern_len; @@ -623,6 +626,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw) if (!fifo_len) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE; + for (read_len = 0; read_len < fifo_len; read_len += pattern_len) { err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR, diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 4fdcc2acc94e..96c6106b95ee 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -2719,8 +2719,11 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id, } if (device_property_read_bool(dev, "wakeup-source") || - (pdata && pdata->wakeup_source)) - device_init_wakeup(dev, true); + (pdata && pdata->wakeup_source)) { + err = devm_device_init_wakeup(dev); + if (err) + return dev_err_probe(dev, err, "Failed to init wakeup\n"); + } return 0; } diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 76b76d12b388..4c65b32d34ce 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -34,9 +34,9 @@ struct prox_state { struct iio_chan_spec channels[MAX_CHANNELS]; u32 channel2usage[MAX_CHANNELS]; u32 human_presence[MAX_CHANNELS]; - int scale_pre_decml; - int scale_post_decml; - int scale_precision; + int scale_pre_decml[MAX_CHANNELS]; + int scale_post_decml[MAX_CHANNELS]; + int scale_precision[MAX_CHANNELS]; unsigned long scan_mask[2]; /* One entry plus one terminator. */ int num_channels; }; @@ -116,13 +116,15 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: - *val = prox_state->scale_pre_decml; - *val2 = prox_state->scale_post_decml; - ret_type = prox_state->scale_precision; + if (chan->scan_index >= prox_state->num_channels) + return -EINVAL; + + *val = prox_state->scale_pre_decml[chan->scan_index]; + *val2 = prox_state->scale_post_decml[chan->scan_index]; + ret_type = prox_state->scale_precision[chan->scan_index]; break; case IIO_CHAN_INFO_OFFSET: - *val = hid_sensor_convert_exponent( - prox_state->prox_attr[chan->scan_index].unit_expo); + *val = 0; ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: @@ -249,6 +251,10 @@ static int prox_parse_report(struct platform_device *pdev, st->prox_attr[index].size); dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr[index].index, st->prox_attr[index].report_id); + st->scale_precision[index] = + hid_sensor_format_scale(usage_id, &st->prox_attr[index], + &st->scale_pre_decml[index], + &st->scale_post_decml[index]); index++; } diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c index 65b295877b41..393a3d2fbe1d 100644 --- a/drivers/iio/light/opt3001.c +++ b/drivers/iio/light/opt3001.c @@ -788,8 +788,9 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) int ret; bool wake_result_ready_queue = false; enum iio_chan_type chan_type = opt->chip_info->chan_type; + bool ok_to_ignore_lock = opt->ok_to_ignore_lock; - if (!opt->ok_to_ignore_lock) + if (!ok_to_ignore_lock) mutex_lock(&opt->lock); ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION); @@ -826,7 +827,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio) } out: - if (!opt->ok_to_ignore_lock) + if (!ok_to_ignore_lock) mutex_unlock(&opt->lock); if (wake_result_ready_queue) diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h index 9d5c30afa9d6..d62a018eaff3 100644 --- a/drivers/iio/pressure/mprls0025pa.h +++ b/drivers/iio/pressure/mprls0025pa.h @@ -34,16 +34,6 @@ struct iio_dev; struct mpr_data; struct mpr_ops; -/** - * struct mpr_chan - * @pres: pressure value - * @ts: timestamp - */ -struct mpr_chan { - s32 pres; - s64 ts; -}; - enum mpr_func_id { MPR_FUNCTION_A, MPR_FUNCTION_B, @@ -69,6 +59,8 @@ enum mpr_func_id { * reading in a loop until data is ready * @completion: handshake from irq to read * @chan: channel values for buffered mode + * @chan.pres: pressure value + * @chan.ts: timestamp * @buffer: raw conversion data */ struct mpr_data { @@ -87,7 +79,10 @@ struct mpr_data { struct gpio_desc *gpiod_reset; int irq; struct completion completion; - struct mpr_chan chan; + struct { + s32 pres; + aligned_s64 ts; + } chan; u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index c28a7a6dea5f..555a61e2f3fd 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = { struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; + char tc_type; u8 buffer[16] __aligned(IIO_DMA_MINALIGN); - char tc_type; }; static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, diff --git a/drivers/input/joystick/magellan.c b/drivers/input/joystick/magellan.c index d73389af4dd5..7622638e5bb8 100644 --- a/drivers/input/joystick/magellan.c +++ b/drivers/input/joystick/magellan.c @@ -48,7 +48,7 @@ struct magellan { static int magellan_crunch_nibbles(unsigned char *data, int count) { - static unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?"; + static const unsigned char nibbles[16] __nonstring = "0AB3D56GH9:K<MN?"; do { if (data[count] == nibbles[data[count] & 0xf]) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index c33e6f33265b..57a5ff3d1992 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -77,12 +77,13 @@ * xbox d-pads should map to buttons, as is required for DDR pads * but we map them to axes when possible to simplify things */ -#define MAP_DPAD_TO_BUTTONS (1 << 0) -#define MAP_TRIGGERS_TO_BUTTONS (1 << 1) -#define MAP_STICKS_TO_NULL (1 << 2) -#define MAP_SELECT_BUTTON (1 << 3) -#define MAP_PADDLES (1 << 4) -#define MAP_PROFILE_BUTTON (1 << 5) +#define MAP_DPAD_TO_BUTTONS BIT(0) +#define MAP_TRIGGERS_TO_BUTTONS BIT(1) +#define MAP_STICKS_TO_NULL BIT(2) +#define MAP_SHARE_BUTTON BIT(3) +#define MAP_PADDLES BIT(4) +#define MAP_PROFILE_BUTTON BIT(5) +#define MAP_SHARE_OFFSET BIT(6) #define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \ MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL) @@ -135,14 +136,14 @@ static const struct xpad_device { { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE }, - { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */ + { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, - { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, + { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, @@ -159,7 +160,7 @@ static const struct xpad_device { { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE }, { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE }, - { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, + { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE }, { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, @@ -205,13 +206,13 @@ static const struct xpad_device { { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, - { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, + { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, - { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE }, + { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE }, { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, @@ -240,7 +241,7 @@ static const struct xpad_device { { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, - { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE }, + { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, @@ -281,6 +282,7 @@ static const struct xpad_device { { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, + { 0x0f0d, 0x01b2, "HORI Taiko No Tatsujin Drum Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, @@ -353,6 +355,8 @@ static const struct xpad_device { { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, + { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, + { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, @@ -384,13 +388,16 @@ static const struct xpad_device { { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, + { 0x2dc8, 0x200f, "8BitDo Ultimate 3-mode Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, + { 0x2e24, 0x0423, "Hyperkin DuchesS Xbox One pad", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, - { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, + { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, @@ -714,8 +721,10 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on), + XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth), + XBOXONE_INIT_PKT(0x0f0d, 0x01b2, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), @@ -1027,7 +1036,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha * The report format was gleaned from * https://github.com/kylelemons/xbox/blob/master/xbox.go */ -static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) +static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len) { struct input_dev *dev = xpad->dev; bool do_sync = false; @@ -1068,8 +1077,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char /* menu/view buttons */ input_report_key(dev, BTN_START, data[4] & BIT(2)); input_report_key(dev, BTN_SELECT, data[4] & BIT(3)); - if (xpad->mapping & MAP_SELECT_BUTTON) - input_report_key(dev, KEY_RECORD, data[22] & BIT(0)); + if (xpad->mapping & MAP_SHARE_BUTTON) { + if (xpad->mapping & MAP_SHARE_OFFSET) + input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0)); + else + input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0)); + } /* buttons A,B,X,Y */ input_report_key(dev, BTN_A, data[4] & BIT(4)); @@ -1217,7 +1230,7 @@ static void xpad_irq_in(struct urb *urb) xpad360w_process_packet(xpad, 0, xpad->idata); break; case XTYPE_XBOXONE: - xpadone_process_packet(xpad, 0, xpad->idata); + xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length); break; default: xpad_process_packet(xpad, 0, xpad->idata); @@ -1944,7 +1957,7 @@ static int xpad_init_input(struct usb_xpad *xpad) xpad->xtype == XTYPE_XBOXONE) { for (i = 0; xpad360_btn[i] >= 0; i++) input_set_capability(input_dev, EV_KEY, xpad360_btn[i]); - if (xpad->mapping & MAP_SELECT_BUTTON) + if (xpad->mapping & MAP_SHARE_BUTTON) input_set_capability(input_dev, EV_KEY, KEY_RECORD); } else { for (i = 0; xpad_btn[i] >= 0; i++) diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c index 5ad6be914160..061d48350df6 100644 --- a/drivers/input/keyboard/mtk-pmic-keys.c +++ b/drivers/input/keyboard/mtk-pmic-keys.c @@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys, u32 value, mask; int error; - kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs; - kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs; + kregs_home = ®s->keys_regs[MTK_PMIC_HOMEKEY_INDEX]; + kregs_pwr = ®s->keys_regs[MTK_PMIC_PWRKEY_INDEX]; error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec", &long_press_debounce); diff --git a/drivers/input/misc/hisi_powerkey.c b/drivers/input/misc/hisi_powerkey.c index d3c293a95d32..d315017324d9 100644 --- a/drivers/input/misc/hisi_powerkey.c +++ b/drivers/input/misc/hisi_powerkey.c @@ -30,7 +30,7 @@ static irqreturn_t hi65xx_power_press_isr(int irq, void *q) { struct input_dev *input = q; - pm_wakeup_event(input->dev.parent, MAX_HELD_TIME); + pm_wakeup_dev_event(input->dev.parent, MAX_HELD_TIME, true); input_report_key(input, KEY_POWER, 1); input_sync(input); diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 8d7303fc13bc..1cfadd73829f 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -74,9 +74,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) @@ -109,9 +114,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned return -1; switch (code) { - case SND_BELL: if (value) value = 1000; - case SND_TONE: break; - default: return -1; + case SND_BELL: + if (value) + value = 1000; + break; + case SND_TONE: + break; + default: + return -1; } if (value > 20 && value < 32767) diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 309c360aab55..c5c88a75a019 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -164,6 +164,7 @@ static const char * const topbuttonpad_pnp_ids[] = { #ifdef CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS static const char * const smbus_pnp_ids[] = { /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ + "DLL060d", /* Dell Precision M3800 */ "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN0049", /* Yoga 11e */ @@ -190,11 +191,15 @@ static const char * const smbus_pnp_ids[] = { "LEN2054", /* E480 */ "LEN2055", /* E580 */ "LEN2068", /* T14 Gen 1 */ + "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */ + "SYN3003", /* HP EliteBook 850 G1 */ "SYN3015", /* HP EliteBook 840 G2 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "TOS01f6", /* Dynabook Portege X30L-G */ + "TOS0213", /* Dynabook Portege X30-D */ NULL }; #endif diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c index eafe5a9b8964..071b7c9bf566 100644 --- a/drivers/input/touchscreen/cyttsp5.c +++ b/drivers/input/touchscreen/cyttsp5.c @@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on) int rc; SET_CMD_REPORT_TYPE(cmd[0], 0); - SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP); + SET_CMD_REPORT_ID(cmd[0], state); SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER); rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd)); @@ -870,13 +870,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq, ts->input->phys = ts->phys; input_set_drvdata(ts->input, ts); - /* Reset the gpio to be in a reset state */ + /* Assert gpio to be in a reset state */ ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ts->reset_gpio)) { error = PTR_ERR(ts->reset_gpio); dev_err(dev, "Failed to request reset gpio, error %d\n", error); return error; } + + fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */ + gpiod_set_value_cansleep(ts->reset_gpio, 0); /* Need a delay to have device up */ diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c index a94a1997f96b..af0fb38bcfdc 100644 --- a/drivers/input/touchscreen/stmpe-ts.c +++ b/drivers/input/touchscreen/stmpe-ts.c @@ -366,12 +366,7 @@ static struct platform_driver stmpe_ts_driver = { }; module_platform_driver(stmpe_ts_driver); -static const struct of_device_id stmpe_ts_ids[] = { - { .compatible = "st,stmpe-ts", }, - { }, -}; -MODULE_DEVICE_TABLE(of, stmpe_ts_ids); - +MODULE_ALIAS("platform:stmpe-ts"); MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>"); MODULE_DESCRIPTION("STMPEXXX touchscreen driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index dd9e26b7b718..14aa0d77df26 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3664,6 +3664,14 @@ found: while (*uid == '0' && *(uid + 1)) uid++; + if (strlen(hid) >= ACPIHID_HID_LEN) { + pr_err("Invalid command line: hid is too long\n"); + return 1; + } else if (strlen(uid) >= ACPIHID_UID_LEN) { + pr_err("Invalid command line: uid is too long\n"); + return 1; + } + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 9ba596430e7c..980cc6b33c43 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -411,6 +411,12 @@ struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev, return ERR_CAST(smmu_domain); smmu_domain->domain.type = IOMMU_DOMAIN_SVA; smmu_domain->domain.ops = &arm_smmu_sva_domain_ops; + + /* + * Choose page_size as the leaf page size for invalidation when + * ARM_SMMU_FEAT_RANGE_INV is present + */ + smmu_domain->domain.pgsize_bitmap = PAGE_SIZE; smmu_domain->smmu = smmu; ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index b4c21aaed126..48d910399a1b 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3388,6 +3388,7 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, mutex_lock(&smmu->streams_mutex); for (i = 0; i < fwspec->num_ids; i++) { struct arm_smmu_stream *new_stream = &master->streams[i]; + struct rb_node *existing; u32 sid = fwspec->ids[i]; new_stream->id = sid; @@ -3398,11 +3399,21 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, break; /* Insert into SID tree */ - if (rb_find_add(&new_stream->node, &smmu->streams, - arm_smmu_streams_cmp_node)) { - dev_warn(master->dev, "stream %u already in tree\n", - sid); - ret = -EINVAL; + existing = rb_find_add(&new_stream->node, &smmu->streams, + arm_smmu_streams_cmp_node); + if (existing) { + struct arm_smmu_master *existing_master = + rb_entry(existing, struct arm_smmu_stream, node) + ->master; + + /* Bridged PCI devices may end up with duplicated IDs */ + if (existing_master == master) + continue; + + dev_warn(master->dev, + "Aliasing StreamID 0x%x (from %s) unsupported, expect DMA to be broken\n", + sid, dev_name(existing_master->dev)); + ret = -ENODEV; break; } } @@ -4429,6 +4440,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); if (FIELD_GET(IDR3_RIL, reg)) smmu->features |= ARM_SMMU_FEAT_RANGE_INV; + if (FIELD_GET(IDR3_FWB, reg)) + smmu->features |= ARM_SMMU_FEAT_S2FWB; /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b29da2d96d0b..cb0b993bebb4 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3785,6 +3785,22 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) intel_iommu_debugfs_create_dev(info); + return &iommu->iommu; +free_table: + intel_pasid_free_table(dev); +clear_rbtree: + device_rbtree_remove(info); +free: + kfree(info); + + return ERR_PTR(ret); +} + +static void intel_iommu_probe_finalize(struct device *dev) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + /* * The PCIe spec, in its wisdom, declares that the behaviour of the * device is undefined if you enable PASID support after ATS support. @@ -3792,22 +3808,12 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) * we can't yet know if we're ever going to use it. */ if (info->pasid_supported && - !pci_enable_pasid(pdev, info->pasid_supported & ~1)) + !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1)) info->pasid_enabled = 1; - if (sm_supported(iommu)) + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) iommu_enable_pci_ats(info); iommu_enable_pci_pri(info); - - return &iommu->iommu; -free_table: - intel_pasid_free_table(dev); -clear_rbtree: - device_rbtree_remove(info); -free: - kfree(info); - - return ERR_PTR(ret); } static void intel_iommu_release_device(struct device *dev) @@ -4391,6 +4397,7 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc_sva = intel_svm_domain_alloc, .domain_alloc_nested = intel_iommu_domain_alloc_nested, .probe_device = intel_iommu_probe_device, + .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, @@ -4432,6 +4439,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); +/* QM57/QS57 integrated gfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx); + /* Broadwell igfx malfunctions with dmar */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); @@ -4509,7 +4519,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index 7942d8eb3d00..f772deb9cba5 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -227,6 +227,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + if (pin == GPIO_NO_WAKE_IRQ) + return irq_domain_disconnect_hierarchy(domain, virq); + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, &qcom_mpm_chip, priv); if (ret) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9c8ed65cd87e..f0b5a6931161 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -68,6 +68,8 @@ #define LIST_DIRTY 1 #define LIST_SIZE 2 +#define SCAN_RESCHED_CYCLE 16 + /*--------------------------------------------------------------*/ /* @@ -2424,7 +2426,12 @@ static void __scan(struct dm_bufio_client *c) atomic_long_dec(&c->need_shrink); freed++; - cond_resched(); + + if (unlikely(freed % SCAN_RESCHED_CYCLE == 0)) { + dm_bufio_unlock(c); + cond_resched(); + dm_bufio_lock(c); + } } } } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 2a283feb3319..cc3d3897ef42 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -5164,7 +5164,7 @@ static void dm_integrity_dtr(struct dm_target *ti) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!list_empty(&ic->wait_list)); - if (ic->mode == 'B') + if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) cancel_delayed_work_sync(&ic->bitmap_flush_work); if (ic->metadata_wq) destroy_workqueue(ic->metadata_wq); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 35100a435c88..6b23e777e10e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -523,8 +523,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv) gfp = GFP_NOIO; } argv = kmalloc_array(new_size, sizeof(*argv), gfp); - if (argv && old_argv) { - memcpy(argv, old_argv, *size * sizeof(*argv)); + if (argv) { + if (old_argv) + memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; } @@ -1049,7 +1050,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * unsigned int min_pool_size = 0, pool_size; struct dm_md_mempools *pools; unsigned int bioset_flags = 0; - bool mempool_needs_integrity = t->integrity_supported; if (unlikely(type == DM_TYPE_NONE)) { DMERR("no table type is set, can't allocate mempools"); @@ -1074,8 +1074,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * per_io_data_size = max(per_io_data_size, ti->per_io_data_size); min_pool_size = max(min_pool_size, ti->num_flush_bios); - - mempool_needs_integrity |= ti->mempool_needs_integrity; } pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); front_pad = roundup(per_io_data_size, @@ -1175,7 +1173,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, t = dm_get_live_table(md, &srcu_idx); if (!t) - return 0; + goto put_live_table; for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1186,6 +1184,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, (void *)key); } +put_live_table: dm_put_live_table(md, srcu_idx); return 0; } diff --git a/drivers/media/cec/i2c/Kconfig b/drivers/media/cec/i2c/Kconfig index b9d21643eef1..c31abc26f602 100644 --- a/drivers/media/cec/i2c/Kconfig +++ b/drivers/media/cec/i2c/Kconfig @@ -16,6 +16,7 @@ config CEC_CH7322 config CEC_NXP_TDA9950 tristate "NXP Semiconductors TDA9950/TDA998X HDMI CEC" + depends on I2C select CEC_NOTIFIER select CEC_CORE default DRM_I2C_NXP_TDA998X diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index e576b213084d..e45ba127069f 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -1149,8 +1149,11 @@ config VIDEO_ISL7998X config VIDEO_LT6911UXE tristate "Lontium LT6911UXE decoder" - depends on ACPI && VIDEO_DEV + depends on ACPI && VIDEO_DEV && I2C select V4L2_FWNODE + select V4L2_CCI_I2C + select MEDIA_CONTROLLER + select VIDEO_V4L2_SUBDEV_API help This is a Video4Linux2 sensor-level driver for the Lontium LT6911UXE HDMI to MIPI CSI-2 bridge. diff --git a/drivers/media/platform/synopsys/hdmirx/Kconfig b/drivers/media/platform/synopsys/hdmirx/Kconfig index 27e6706f84a3..4321f985f632 100644 --- a/drivers/media/platform/synopsys/hdmirx/Kconfig +++ b/drivers/media/platform/synopsys/hdmirx/Kconfig @@ -2,6 +2,7 @@ config VIDEO_SYNOPSYS_HDMIRX tristate "Synopsys DesignWare HDMI Receiver driver" + depends on ARCH_ROCKCHIP || COMPILE_TEST depends on VIDEO_DEV select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/test-drivers/vivid/Kconfig b/drivers/media/test-drivers/vivid/Kconfig index e95edc0f22bf..cc470070a7a5 100644 --- a/drivers/media/test-drivers/vivid/Kconfig +++ b/drivers/media/test-drivers/vivid/Kconfig @@ -32,7 +32,8 @@ config VIDEO_VIVID_CEC config VIDEO_VIVID_OSD bool "Enable Framebuffer for testing Output Overlay" - depends on VIDEO_VIVID && FB + depends on VIDEO_VIVID && FB_CORE + depends on VIDEO_VIVID=m || FB_CORE=y default y select FB_IOMEM_HELPERS help diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 6824131b69b1..264e11fa58ea 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -691,8 +691,8 @@ config MMC_TMIO_CORE config MMC_SDHI tristate "Renesas SDHI SD/SDIO controller support" depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + depends on (RESET_CONTROLLER && REGULATOR) || !OF select MMC_TMIO_CORE - select RESET_CONTROLLER if ARCH_RENESAS help This provides support for the SDHI SD/SDIO controller found in Renesas SuperH, ARM and ARM64 based SoCs diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fa6526be3638..8c83e203c516 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -1179,7 +1179,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (IS_ERR(rdev)) { dev_err(dev, "regulator register failed err=%ld", PTR_ERR(rdev)); ret = PTR_ERR(rdev); - goto efree; + goto edisclk; } priv->rdev = rdev; } @@ -1243,26 +1243,26 @@ int renesas_sdhi_probe(struct platform_device *pdev, num_irqs = platform_irq_count(pdev); if (num_irqs < 0) { ret = num_irqs; - goto eirq; + goto edisclk; } /* There must be at least one IRQ source */ if (!num_irqs) { ret = -ENXIO; - goto eirq; + goto edisclk; } for (i = 0; i < num_irqs; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; - goto eirq; + goto edisclk; } ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq; + goto edisclk; } ret = tmio_mmc_host_probe(host); @@ -1274,8 +1274,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, return ret; -eirq: - tmio_mmc_host_remove(host); edisclk: renesas_sdhi_clk_disable(host); efree: diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 884a6352c42b..c2c116ce1087 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -2379,6 +2379,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, SET_NETDEV_DEV(net_dev, dev); m_can_of_parse_mram(class_dev, mram_config_vals); + spin_lock_init(&class_dev->tx_handling_spinlock); out: return class_dev; } @@ -2462,9 +2463,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register); void m_can_class_unregister(struct m_can_classdev *cdev) { + unregister_candev(cdev->net); if (cdev->is_peripheral) can_rx_offload_del(&cdev->offload); - unregister_candev(cdev->net); } EXPORT_SYMBOL_GPL(m_can_class_unregister); diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index 7107a37da36c..c3fb3176ce42 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -937,8 +937,8 @@ static void rkcanfd_remove(struct platform_device *pdev) struct rkcanfd_priv *priv = platform_get_drvdata(pdev); struct net_device *ndev = priv->ndev; - can_rx_offload_del(&priv->offload); rkcanfd_unregister(priv); + can_rx_offload_del(&priv->offload); free_candev(ndev); } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 3bc56517fe7a..c30b04f8fc0d 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { .brp_inc = 1, }; +/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of + * [-64,63] for TDCO, indicating a relative TDCO. + * + * Manual tests have shown, that using a relative TDCO configuration + * results in bus off, while an absolute configuration works. + * + * For TDCO use the max value (63) from the data sheet, but 0 as the + * minimum. + */ +static const struct can_tdc_const mcp251xfd_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 63, + .tdco_min = 0, + .tdco_max = 63, + .tdcf_min = 0, + .tdcf_max = 0, +}; + static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) { switch (model) { @@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) { const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; - u32 val = 0; - s8 tdco; + u32 tdcmod, val = 0; int err; /* CAN Control Register @@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) return err; /* Transmitter Delay Compensation */ - tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), - -64, 63); - val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, - MCP251XFD_REG_TDC_TDCMOD_AUTO) | - FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; + else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; + else + tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; + + val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); } @@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; + priv->can.tdc_const = &mcp251xfd_tdc_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | - CAN_CTRLMODE_CC_LEN8_DLC; + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); priv->ndev = ndev; priv->spi = spi; @@ -2174,8 +2198,8 @@ static void mcp251xfd_remove(struct spi_device *spi) struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; - can_rx_offload_del(&priv->offload); mcp251xfd_unregister(priv); + can_rx_offload_del(&priv->offload); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); } diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index e5ba71897906..9eb39cfa5fb2 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); } + vc1 &= ~VC1_RX_MCST_FWD_EN; + if (enable) { vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; - vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; + vc1 |= VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; if (enable_filtering) { vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; vc5 |= VC5_DROP_VTABLE_MISS; } else { - vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; vc5 &= ~VC5_DROP_VTABLE_MISS; } @@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, } else { vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); - vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); + vc1 &= ~VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; vc5 &= ~VC5_DROP_VTABLE_MISS; @@ -576,6 +578,18 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable) b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg); } +int b53_setup_port(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + + b53_port_set_ucast_flood(dev, port, true); + b53_port_set_mcast_flood(dev, port, true); + b53_port_set_learning(dev, port, false); + + return 0; +} +EXPORT_SYMBOL(b53_setup_port); + int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; @@ -588,10 +602,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - b53_port_set_ucast_flood(dev, port, true); - b53_port_set_mcast_flood(dev, port, true); - b53_port_set_learning(dev, port, false); - if (dev->ops->irq_enable) ret = dev->ops->irq_enable(dev, port); if (ret) @@ -722,10 +732,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl); b53_brcm_hdr_setup(dev->ds, port); - - b53_port_set_ucast_flood(dev, port, true); - b53_port_set_mcast_flood(dev, port, true); - b53_port_set_learning(dev, port, false); } static void b53_enable_mib(struct b53_device *dev) @@ -761,6 +767,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port) return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port); } +static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + struct dsa_port *dp; + + if (!dev->vlan_filtering) + return true; + + dp = dsa_to_port(ds, port); + + if (dsa_port_is_cpu(dp)) + return true; + + return dp->bridge == NULL; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; @@ -779,7 +801,7 @@ int b53_configure_vlan(struct dsa_switch *ds) b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering); + b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering); /* Create an untagged VLAN entry for the default PVID in case * CONFIG_VLAN_8021Q is disabled and there are no calls to @@ -787,26 +809,39 @@ int b53_configure_vlan(struct dsa_switch *ds) * entry. Do this only when the tagging protocol is not * DSA_TAG_PROTO_NONE */ + v = &dev->vlans[def_vid]; b53_for_each_port(dev, i) { - v = &dev->vlans[def_vid]; - v->members |= BIT(i); + if (!b53_vlan_port_may_join_untagged(ds, i)) + continue; + + vl.members |= BIT(i); if (!b53_vlan_port_needs_forced_tagged(ds, i)) - v->untag = v->members; - b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), def_vid); + vl.untag = vl.members; + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i), + def_vid); } + b53_set_vlan_entry(dev, def_vid, &vl); - /* Upon initial call we have not set-up any VLANs, but upon - * system resume, we need to restore all VLAN entries. - */ - for (vid = def_vid; vid < dev->num_vlans; vid++) { - v = &dev->vlans[vid]; + if (dev->vlan_filtering) { + /* Upon initial call we have not set-up any VLANs, but upon + * system resume, we need to restore all VLAN entries. + */ + for (vid = def_vid + 1; vid < dev->num_vlans; vid++) { + v = &dev->vlans[vid]; - if (!v->members) - continue; + if (!v->members) + continue; + + b53_set_vlan_entry(dev, vid, v); + b53_fast_age_vlan(dev, vid); + } - b53_set_vlan_entry(dev, vid, v); - b53_fast_age_vlan(dev, vid); + b53_for_each_port(dev, i) { + if (!dsa_is_cpu_port(ds, i)) + b53_write16(dev, B53_VLAN_PAGE, + B53_VLAN_PORT_DEF_TAG(i), + dev->ports[i].pvid); + } } return 0; @@ -1125,7 +1160,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources); static int b53_setup(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; + struct b53_vlan *vl; unsigned int port; + u16 pvid; int ret; /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set @@ -1133,12 +1170,26 @@ static int b53_setup(struct dsa_switch *ds) */ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE; + /* The switch does not tell us the original VLAN for untagged + * packets, so keep the CPU port always tagged. + */ + ds->untag_vlan_aware_bridge_pvid = true; + ret = b53_reset_switch(dev); if (ret) { dev_err(ds->dev, "failed to reset switch\n"); return ret; } + /* setup default vlan for filtering mode */ + pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + b53_for_each_port(dev, port) { + vl->members |= BIT(port); + if (!b53_vlan_port_needs_forced_tagged(ds, port)) + vl->untag |= BIT(port); + } + b53_reset_mib(dev); ret = b53_apply_config(dev); @@ -1492,7 +1543,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, { struct b53_device *dev = ds->priv; - b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering); + if (dev->vlan_filtering != vlan_filtering) { + dev->vlan_filtering = vlan_filtering; + b53_apply_config(dev); + } return 0; } @@ -1517,7 +1571,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid >= dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, port, true, ds->vlan_filtering); + b53_enable_vlan(dev, port, true, dev->vlan_filtering); return 0; } @@ -1530,18 +1584,29 @@ int b53_vlan_add(struct dsa_switch *ds, int port, bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; struct b53_vlan *vl; + u16 old_pvid, new_pvid; int err; err = b53_vlan_prepare(ds, port, vlan); if (err) return err; - vl = &dev->vlans[vlan->vid]; + if (vlan->vid == 0) + return 0; - b53_get_vlan_entry(dev, vlan->vid, vl); + old_pvid = dev->ports[port].pvid; + if (pvid) + new_pvid = vlan->vid; + else if (!pvid && vlan->vid == old_pvid) + new_pvid = b53_default_pvid(dev); + else + new_pvid = old_pvid; + dev->ports[port].pvid = new_pvid; + + vl = &dev->vlans[vlan->vid]; - if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev)) - untagged = true; + if (dsa_is_cpu_port(ds, port)) + untagged = false; vl->members |= BIT(port); if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) @@ -1549,13 +1614,16 @@ int b53_vlan_add(struct dsa_switch *ds, int port, else vl->untag &= ~BIT(port); + if (!dev->vlan_filtering) + return 0; + b53_set_vlan_entry(dev, vlan->vid, vl); b53_fast_age_vlan(dev, vlan->vid); - if (pvid && !dsa_is_cpu_port(ds, port)) { + if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), - vlan->vid); - b53_fast_age_vlan(dev, vlan->vid); + new_pvid); + b53_fast_age_vlan(dev, old_pvid); } return 0; @@ -1570,20 +1638,25 @@ int b53_vlan_del(struct dsa_switch *ds, int port, struct b53_vlan *vl; u16 pvid; - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + if (vlan->vid == 0) + return 0; - vl = &dev->vlans[vlan->vid]; + pvid = dev->ports[port].pvid; - b53_get_vlan_entry(dev, vlan->vid, vl); + vl = &dev->vlans[vlan->vid]; vl->members &= ~BIT(port); if (pvid == vlan->vid) pvid = b53_default_pvid(dev); + dev->ports[port].pvid = pvid; if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port)) vl->untag &= ~(BIT(port)); + if (!dev->vlan_filtering) + return 0; + b53_set_vlan_entry(dev, vlan->vid, vl); b53_fast_age_vlan(dev, vlan->vid); @@ -1916,8 +1989,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack) { struct b53_device *dev = ds->priv; + struct b53_vlan *vl; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - u16 pvlan, reg; + u16 pvlan, reg, pvid; unsigned int i; /* On 7278, port 7 which connects to the ASP should only receive @@ -1926,15 +2000,29 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, if (dev->chip_id == BCM7278_DEVICE_ID && port == 7) return -EINVAL; - /* Make this port leave the all VLANs join since we will have proper - * VLAN entries from now on - */ - if (is58xx(dev)) { - b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); - reg &= ~BIT(port); - if ((reg & BIT(cpu_port)) == BIT(cpu_port)) - reg &= ~BIT(cpu_port); - b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + + if (dev->vlan_filtering) { + /* Make this port leave the all VLANs join since we will have + * proper VLAN entries from now on + */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, + ®); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, + reg); + } + + b53_get_vlan_entry(dev, pvid, vl); + vl->members &= ~BIT(port); + if (vl->members == BIT(cpu_port)) + vl->members &= ~BIT(cpu_port); + vl->untag = vl->members; + b53_set_vlan_entry(dev, pvid, vl); } b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); @@ -1967,7 +2055,7 @@ EXPORT_SYMBOL(b53_br_join); void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct b53_device *dev = ds->priv; - struct b53_vlan *vl = &dev->vlans[0]; + struct b53_vlan *vl; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; @@ -1993,15 +2081,18 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) dev->ports[port].vlan_ctl_mask = pvlan; pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; + + if (dev->vlan_filtering) { + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { + b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); + } - /* Make this port join all VLANs without VLAN entries */ - if (is58xx(dev)) { - b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®); - reg |= BIT(port); - if (!(reg & BIT(cpu_port))) - reg |= BIT(cpu_port); - b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); - } else { b53_get_vlan_entry(dev, pvid, vl); vl->members |= BIT(port) | BIT(cpu_port); vl->untag |= BIT(port) | BIT(cpu_port); @@ -2300,6 +2391,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .phylink_get_caps = b53_phylink_get_caps, + .port_setup = b53_setup_port, .port_enable = b53_enable_port, .port_disable = b53_disable_port, .support_eee = b53_support_eee, @@ -2757,6 +2849,7 @@ struct b53_device *b53_switch_alloc(struct device *base, ds->ops = &b53_switch_ops; ds->phylink_mac_ops = &b53_phylink_mac_ops; dev->vlan_enabled = true; + dev->vlan_filtering = false; /* Let DSA handle the case were multiple bridges span the same switch * device and different VLAN awareness settings are requested, which * would be breaking filtering semantics for any of the other bridge diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 0166c37a13a7..2cf3e6a81e37 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -96,6 +96,7 @@ struct b53_pcs { struct b53_port { u16 vlan_ctl_mask; + u16 pvid; struct ethtool_keee eee; }; @@ -147,6 +148,7 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; bool vlan_enabled; + bool vlan_filtering; unsigned int num_ports; struct b53_port *ports; @@ -382,6 +384,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); void b53_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); +int b53_setup_port(struct dsa_switch *ds, int port); int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); void b53_disable_port(struct dsa_switch *ds, int port); void b53_brcm_hdr_setup(struct dsa_switch *ds, int port); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index fa2bf3fa9019..454a8c7fd7ee 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .resume = bcm_sf2_sw_resume, .get_wol = bcm_sf2_sw_get_wol, .set_wol = bcm_sf2_sw_set_wol, + .port_setup = b53_setup_port, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, .support_eee = b53_support_eee, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 940f1b71226d..7b35d24c38d7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) struct tc_taprio_qopt_offload *taprio; struct ocelot_port *ocelot_port; struct timespec64 base_ts; - int port; + int i, port; u32 val; mutex_lock(&ocelot->fwd_domain_lock); @@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, QSYS_PARAM_CFG_REG_3); + for (i = 0; i < taprio->num_entries; i++) + vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL); diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 7a5710f9ccf6..ead0625e781f 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -104,12 +104,14 @@ struct ppe_mbox_data { u8 xpon_hal_api; u8 wan_xsi; u8 ct_joyme4; - int ppe_type; - int wan_mode; - int wan_sel; + u8 max_packet; + u8 rsv[3]; + u32 ppe_type; + u32 wan_mode; + u32 wan_sel; } init_info; struct { - int func_id; + u32 func_id; u32 size; u32 data; } set_info; diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index c9aac27883a3..92f359f2b449 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -186,7 +186,6 @@ void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf, pds_client_unregister(pf, padev->client_id); auxiliary_device_delete(&padev->aux_dev); auxiliary_device_uninit(&padev->aux_dev); - padev->client_id = 0; *pd_ptr = NULL; mutex_unlock(&pf->config_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 230726d7b74f..d41b58fad37b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, } /* Set up the header page info */ - xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, - XGBE_SKB_ALLOC_SIZE); + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + } else { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + pdata->rx_buf_size); + } /* Set up the buffer page info */ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f393228d41c7..f1b0fb02b3cd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } +static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); + } +} + static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, unsigned int index, unsigned int val) { @@ -3545,8 +3557,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); - xgbe_config_sph_mode(pdata); - xgbe_config_rss(pdata); + + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); + } + desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -3702,5 +3718,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_vxlan = xgbe_disable_vxlan; hw_if->set_vxlan_id = xgbe_set_vxlan_id; + /* For Split Header*/ + hw_if->enable_sph = xgbe_config_sph_mode; + hw_if->disable_sph = xgbe_disable_sph_mode; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d84a310dfcd4..8e09ad8fa022 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev, if (ret) return ret; - if ((features & NETIF_F_RXCSUM) && !rxcsum) + if ((features & NETIF_F_RXCSUM) && !rxcsum) { + hw_if->enable_sph(pdata); + hw_if->enable_vxlan(pdata); hw_if->enable_rx_csum(pdata); - else if (!(features & NETIF_F_RXCSUM) && rxcsum) + schedule_work(&pdata->restart_work); + } else if (!(features & NETIF_F_RXCSUM) && rxcsum) { + hw_if->disable_sph(pdata); + hw_if->disable_vxlan(pdata); hw_if->disable_rx_csum(pdata); + schedule_work(&pdata->restart_work); + } if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) hw_if->enable_rx_vlan_stripping(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index d85386cac8d1..ed5d43c16d0e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -865,6 +865,10 @@ struct xgbe_hw_if { void (*enable_vxlan)(struct xgbe_prv_data *); void (*disable_vxlan)(struct xgbe_prv_data *); void (*set_vxlan_id)(struct xgbe_prv_data *); + + /* For Split Header */ + void (*enable_sph)(struct xgbe_prv_data *pdata); + void (*disable_sph)(struct xgbe_prv_data *pdata); }; /* This structure represents implementation specific routines for an diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c8e3468eee61..86a5de44b6f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2015,6 +2015,7 @@ static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, } return skb; vlan_err: + skb_mark_for_recycle(skb); dev_kfree_skb(skb); return NULL; } @@ -3414,6 +3415,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) bnxt_free_one_tx_ring_skbs(bp, txr, i); } + + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + bnxt_ptp_free_txts_skbs(bp->ptp_cfg); } static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) @@ -11599,6 +11603,9 @@ static void bnxt_init_napi(struct bnxt *bp) poll_fn = bnxt_poll_p5; else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) cp_nr_rings--; + + set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); + for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, @@ -12318,12 +12325,15 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { struct hwrm_func_drv_if_change_output *resp; struct hwrm_func_drv_if_change_input *req; - bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; bool caps_change = false; int rc, retry = 0; + bool fw_reset; u32 flags = 0; + fw_reset = (bp->fw_reset_state == BNXT_FW_RESET_STATE_ABORT); + bp->fw_reset_state = 0; + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; @@ -12392,13 +12402,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return rc; } + /* IRQ will be initialized later in bnxt_request_irq()*/ bnxt_clear_int_mode(bp); - rc = bnxt_init_int_mode(bp); - if (rc) { - clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); - netdev_err(bp->dev, "init int mode failed\n"); - return rc; - } } rc = bnxt_cancel_reservations(bp, fw_reset); } @@ -12797,8 +12802,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); - if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) - WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) @@ -14833,7 +14836,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) bnxt_dl_health_fw_status_update(bp, false); - bp->fw_reset_state = 0; + bp->fw_reset_state = BNXT_FW_RESET_STATE_ABORT; netif_close(bp->dev); } @@ -16003,8 +16006,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_rdma_aux_device_del(bp); - bnxt_ptp_clear(bp); unregister_netdev(dev); + bnxt_ptp_clear(bp); bnxt_rdma_aux_device_uninit(bp); @@ -16931,10 +16934,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) if (!err) result = PCI_ERS_RESULT_RECOVERED; + /* IRQ will be initialized later in bnxt_io_resume */ bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - err = bnxt_init_int_mode(bp); - bnxt_ulp_irq_restart(bp, err); } reset_exit: @@ -16963,10 +16965,13 @@ static void bnxt_io_resume(struct pci_dev *pdev) err = bnxt_hwrm_func_qcaps(bp); if (!err) { - if (netif_running(netdev)) + if (netif_running(netdev)) { err = bnxt_open(netdev); - else + } else { err = bnxt_reserve_rings(bp, true); + if (!err) + err = bnxt_init_int_mode(bp); + } } if (!err) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 21726cf56586..bc8b3b7e915d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2614,6 +2614,7 @@ struct bnxt { #define BNXT_FW_RESET_STATE_POLL_FW 4 #define BNXT_FW_RESET_STATE_OPENING 5 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 +#define BNXT_FW_RESET_STATE_ABORT 7 u16 fw_reset_min_dsecs; #define BNXT_DFLT_FW_RST_MIN_DSECS 20 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index 5576e7cf8463..a000d3f630bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -110,20 +110,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, } } + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) + info->dest_buf_size += len; + if (info->dest_buf) { if ((info->seg_start + off + len) <= BNXT_COREDUMP_BUF_LEN(info->buf_len)) { - memcpy(info->dest_buf + off, dma_buf, len); + u16 copylen = min_t(u16, len, + info->dest_buf_size - off); + + memcpy(info->dest_buf + off, dma_buf, copylen); + if (copylen < len) + break; } else { rc = -ENOBUFS; + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + kfree(info->dest_buf); + info->dest_buf = NULL; + } break; } } - if (cmn_req->req_type == - cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) - info->dest_buf_size += len; - if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 48dd5922e4dd..f5d490bf997e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2062,6 +2062,17 @@ static int bnxt_get_regs_len(struct net_device *dev) return reg_len; } +#define BNXT_PCIE_32B_ENTRY(start, end) \ + { offsetof(struct pcie_ctx_hw_stats, start), \ + offsetof(struct pcie_ctx_hw_stats, end) } + +static const struct { + u16 start; + u16 end; +} bnxt_pcie_32b_entries[] = { + BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), +}; + static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { @@ -2094,12 +2105,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); rc = hwrm_req_send(bp, req); if (!rc) { - __le64 *src = (__le64 *)hw_pcie_stats; - u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); - int i; - - for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) - dst[i] = le64_to_cpu(src[i]); + u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); + u8 *src = (u8 *)hw_pcie_stats; + int i, j; + + for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { + if (i >= bnxt_pcie_32b_entries[j].start && + i <= bnxt_pcie_32b_entries[j].end) { + u32 *dst32 = (u32 *)(dst + i); + + *dst32 = le32_to_cpu(*(__le32 *)(src + i)); + i += 4; + if (i > bnxt_pcie_32b_entries[j].end && + j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) + j++; + } else { + u64 *dst64 = (u64 *)(dst + i); + + *dst64 = le64_to_cpu(*(__le64 *)(src + i)); + i += 8; + } + } } hwrm_req_drop(bp, req); } @@ -4991,6 +5017,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, if (!bp->num_tests || !BNXT_PF(bp)) return; + memset(buf, 0, sizeof(u64) * bp->num_tests); if (etest->flags & ETH_TEST_FL_OFFLINE && bnxt_ulp_registered(bp->edev)) { etest->flags |= ETH_TEST_FL_FAILED; @@ -4998,7 +5025,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, return; } - memset(buf, 0, sizeof(u64) * bp->num_tests); if (!netif_running(dev)) { etest->flags |= ETH_TEST_FL_FAILED; return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 2d4e19b96ee7..0669d43472f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -794,6 +794,27 @@ next_slot: return HZ; } +void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp) +{ + struct bnxt_ptp_tx_req *txts_req; + u16 cons = ptp->txts_cons; + + /* make sure ptp aux worker finished with + * possible BNXT_STATE_OPEN set + */ + ptp_cancel_worker_sync(ptp->ptp_clock); + + ptp->tx_avail = BNXT_MAX_TX_TS; + while (cons != ptp->txts_prod) { + txts_req = &ptp->txts_req[cons]; + if (!IS_ERR_OR_NULL(txts_req->tx_skb)) + dev_kfree_skb_any(txts_req->tx_skb); + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; + ptp_schedule_worker(ptp->ptp_clock, 0); +} + int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) { spin_lock_bh(&ptp->ptp_tx_lock); @@ -1105,7 +1126,6 @@ out: void bnxt_ptp_clear(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - int i; if (!ptp) return; @@ -1117,12 +1137,5 @@ void bnxt_ptp_clear(struct bnxt *bp) kfree(ptp->ptp_info.pin_config); ptp->ptp_info.pin_config = NULL; - for (i = 0; i < BNXT_MAX_TX_TS; i++) { - if (ptp->txts_req[i].tx_skb) { - dev_kfree_skb_any(ptp->txts_req[i].tx_skb); - ptp->txts_req[i].tx_skb = NULL; - } - } - bnxt_unmap_ptp_regs(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index a95f05e9c579..0481161d26ef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -162,6 +162,7 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); +void bnxt_ptp_free_txts_skbs(struct bnxt_ptp_cfg *ptp); int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index d88fbecdab4b..232e839a9d07 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev) eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { - np->led_mode = psrom->led_mode; + np->led_mode = le16_to_cpu(psrom->led_mode); return 0; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 195dc6cfd895..0e33e2eaae96 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -335,7 +335,7 @@ typedef struct t_SROM { u16 sub_system_id; /* 0x06 */ u16 pci_base_1; /* 0x08 (IP1000A only) */ u16 pci_base_2; /* 0x0a (IP1000A only) */ - u16 led_mode; /* 0x0c (IP1000A only) */ + __le16 led_mode; /* 0x0c (IP1000A only) */ u16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a86cfebedaa8..17e9bddb9ddd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -714,7 +714,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 09749e9f7398..4e5d8bc39a1b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -61,7 +61,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .name = "tm_qset", .cmd = HNAE3_DBG_CMD_TM_QSET, .dentry = HNS3_DBG_DENTRY_TM, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_1MB, .init = hns3_dbg_common_file_init, }, { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9ff797fb36c4..b03b8758c777 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, writel(mask_en, tqp_vector->mask_addr); } -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); enable_irq(tqp_vector->vector_irq); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); } -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) { - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - disable_irq(tqp_vector->vector_irq); napi_disable(&tqp_vector->napi); cancel_work_sync(&tqp_vector->rx_group.dim.work); @@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev) return 0; } +static void hns3_enable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_enable(&priv->tqp_vector[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 1); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); +} + +static void hns3_disable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 0); + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_disable(&priv->tqp_vector[i]); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int i, j; int ret; ret = hns3_nic_reset_all_ring(h); @@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* enable rcb */ - for (j = 0; j < h->kinfo.num_tqps; j++) - hns3_tqp_enable(h->kinfo.tqp[j]); + hns3_enable_irqs_and_tqps(netdev); /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) { set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - while (j--) - hns3_tqp_disable(h->kinfo.tqp[j]); - - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); + hns3_disable_irqs_and_tqps(netdev); } return ret; @@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; - int i; - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* disable rcb */ - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(netdev); /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; @@ -5864,8 +5871,6 @@ int hns3_set_channels(struct net_device *netdev, void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5876,11 +5881,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) netif_carrier_off(ndev); netif_tx_disable(ndev); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(ndev); /* delay ring buffer clearing to hns3_reset_notify_uninit_enet * during reset process, because driver may not be able @@ -5896,7 +5897,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5912,11 +5912,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_enable(h->kinfo.tqp[i]); + hns3_enable_irqs_and_tqps(ndev); netif_tx_wake_all_queues(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 59cc9221185f..ec581d4b696f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) ptp->info.settime64 = hclge_ptp_settime; ptp->info.n_alarm = 0; + + spin_lock_init(&ptp->lock); + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + hdev->ptp = ptp; + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); if (IS_ERR(ptp->clock)) { dev_err(&hdev->pdev->dev, @@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) return -ENODEV; } - spin_lock_init(&ptp->lock); - ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; - ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; - ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; - hdev->ptp = ptp; - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9ba767740a04..dada42e7e0ec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1292,9 +1292,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) rtnl_unlock(); } -static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1303,6 +1302,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); + if (ret) + return ret; + + hdev->rxvtag_strip_en = enable; + return 0; +} + static int hclgevf_reset_tqp(struct hnae3_handle *handle) { #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U @@ -2204,12 +2216,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) tc_valid, tc_size); } -static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, + bool rxvtag_strip_en) { struct hnae3_handle *nic = &hdev->nic; int ret; - ret = hclgevf_en_hw_strip_rxvtag(nic, true); + ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to enable rx vlan offload, ret = %d\n", ret); @@ -2879,7 +2892,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); @@ -2994,7 +3007,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, true); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index cccef3228461..0208425ab594 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -253,6 +253,7 @@ struct hclgevf_dev { int *vector_irq; bool gro_en; + bool rxvtag_strip_en; unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 01a08cfd0090..66e070095d1b 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only // SPDX-FileCopyrightText: Copyright Red Hat -#include <linux/bitfield.h> #include <linux/cleanup.h> #include <linux/mutex.h> #include <linux/pci.h> @@ -14,32 +13,16 @@ static DEFINE_XARRAY(ice_adapters); static DEFINE_MUTEX(ice_adapters_mutex); -/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ -#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) -#define INDEX_FIELD_DEV GENMASK(31, 16) -#define INDEX_FIELD_BUS GENMASK(12, 5) -#define INDEX_FIELD_SLOT GENMASK(4, 0) - -static unsigned long ice_adapter_index(const struct pci_dev *pdev) +static unsigned long ice_adapter_index(u64 dsn) { - unsigned int domain = pci_domain_nr(pdev->bus); - - WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN)); - - switch (pdev->device) { - case ICE_DEV_ID_E825C_BACKPLANE: - case ICE_DEV_ID_E825C_QSFP: - case ICE_DEV_ID_E825C_SFP: - case ICE_DEV_ID_E825C_SGMII: - return FIELD_PREP(INDEX_FIELD_DEV, pdev->device); - default: - return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) | - FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) | - FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn)); - } +#if BITS_PER_LONG == 64 + return dsn; +#else + return (u32)dsn ^ (u32)(dsn >> 32); +#endif } -static struct ice_adapter *ice_adapter_new(void) +static struct ice_adapter *ice_adapter_new(u64 dsn) { struct ice_adapter *adapter; @@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void) if (!adapter) return NULL; + adapter->device_serial_number = dsn; spin_lock_init(&adapter->ptp_gltsyn_time_lock); refcount_set(&adapter->refcount, 1); @@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter) * Return: Pointer to ice_adapter on success. * ERR_PTR() on error. -ENOMEM is the only possible error. */ -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); + u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; + unsigned long index; int err; + index = ice_adapter_index(dsn); scoped_guard(mutex, &ice_adapters_mutex) { err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); if (err == -EBUSY) { adapter = xa_load(&ice_adapters, index); refcount_inc(&adapter->refcount); + WARN_ON_ONCE(adapter->device_serial_number != dsn); return adapter; } if (err) return ERR_PTR(err); - adapter = ice_adapter_new(); + adapter = ice_adapter_new(dsn); if (!adapter) return ERR_PTR(-ENOMEM); xa_store(&ice_adapters, index, adapter, GFP_KERNEL); @@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) * * Context: Process, may sleep. */ -void ice_adapter_put(const struct pci_dev *pdev) +void ice_adapter_put(struct pci_dev *pdev) { - unsigned long index = ice_adapter_index(pdev); + u64 dsn = pci_get_dsn(pdev); struct ice_adapter *adapter; + unsigned long index; + index = ice_adapter_index(dsn); scoped_guard(mutex, &ice_adapters_mutex) { adapter = xa_load(&ice_adapters, index); if (WARN_ON(!adapter)) diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h index e233225848b3..ac15c0d2bc1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.h +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -32,6 +32,7 @@ struct ice_port_list { * @refcount: Reference count. struct ice_pf objects hold the references. * @ctrl_pf: Control PF of the adapter * @ports: Ports list + * @device_serial_number: DSN cached for collision detection on 32bit systems */ struct ice_adapter { refcount_t refcount; @@ -40,9 +41,10 @@ struct ice_adapter { struct ice_pf *ctrl_pf; struct ice_port_list ports; + u64 device_serial_number; }; -struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev); -void ice_adapter_put(const struct pci_dev *pdev); +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev); +void ice_adapter_put(struct pci_dev *pdev); #endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 69d5b1a28491..59323c019544 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2345,15 +2345,15 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; - if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; - } - if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + if (hw->mac_type == ICE_MAC_E810 || + hw->mac_type == ICE_MAC_GENERIC) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 7752920d7a8e..1cca9b2262e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -2097,6 +2097,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err_exit; + } #define ICE_VF_MAX_FDIR_FILTERS 128 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 66544faab710..aef0e9775a33 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -629,13 +629,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) -#define IDPF_CAP_RX_CSUM_L4V4 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) +#define IDPF_CAP_TX_CSUM_L4V4 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP) -#define IDPF_CAP_RX_CSUM_L4V6 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) +#define IDPF_CAP_TX_CSUM_L4V6 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP) #define IDPF_CAP_RX_CSUM (\ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ @@ -644,11 +644,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) -#define IDPF_CAP_SCTP_CSUM (\ +#define IDPF_CAP_TX_SCTP_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP) #define IDPF_CAP_TUNNEL_TX_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index aa755dedb41d..82f09b4030bc 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -703,8 +703,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct idpf_vport_config *vport_config; + netdev_features_t other_offloads = 0; + netdev_features_t csum_offloads = 0; + netdev_features_t tso_offloads = 0; netdev_features_t dflt_features; - netdev_features_t offloads = 0; struct idpf_netdev_priv *np; struct net_device *netdev; u16 idx = vport->idx; @@ -766,53 +768,32 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) dflt_features |= NETIF_F_RXHASH; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) - dflt_features |= NETIF_F_IP_CSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) - dflt_features |= NETIF_F_IPV6_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) + csum_offloads |= NETIF_F_IP_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) + csum_offloads |= NETIF_F_IPV6_CSUM; if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) - dflt_features |= NETIF_F_RXCSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) - dflt_features |= NETIF_F_SCTP_CRC; + csum_offloads |= NETIF_F_RXCSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) + csum_offloads |= NETIF_F_SCTP_CRC; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) - dflt_features |= NETIF_F_TSO; + tso_offloads |= NETIF_F_TSO; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) - dflt_features |= NETIF_F_TSO6; + tso_offloads |= NETIF_F_TSO6; if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_UDP | VIRTCHNL2_CAP_SEG_IPV6_UDP)) - dflt_features |= NETIF_F_GSO_UDP_L4; + tso_offloads |= NETIF_F_GSO_UDP_L4; if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) - offloads |= NETIF_F_GRO_HW; - /* advertise to stack only if offloads for encapsulated packets is - * supported - */ - if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, - VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { - offloads |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_PARTIAL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - 0; - - if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, - IDPF_CAP_TUNNEL_TX_CSUM)) - netdev->gso_partial_features |= - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - offloads |= NETIF_F_TSO_MANGLEID; - } + other_offloads |= NETIF_F_GRO_HW; if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) - offloads |= NETIF_F_LOOPBACK; + other_offloads |= NETIF_F_LOOPBACK; - netdev->features |= dflt_features; - netdev->hw_features |= dflt_features | offloads; - netdev->hw_enc_features |= dflt_features | offloads; + netdev->features |= dflt_features | csum_offloads | tso_offloads; + netdev->hw_features |= netdev->features | other_offloads; + netdev->vlan_features |= netdev->features | other_offloads; + netdev->hw_enc_features |= dflt_features | other_offloads; idpf_set_ethtool_ops(netdev); netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -1132,11 +1113,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, num_max_q = max(max_q->max_txq, max_q->max_rxq); vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); - if (!vport->q_vector_idxs) { - kfree(vport); + if (!vport->q_vector_idxs) + goto free_vport; - return NULL; - } idpf_vport_init(vport, max_q); /* This alloc is done separate from the LUT because it's not strictly @@ -1146,11 +1125,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, */ rss_data = &adapter->vport_config[idx]->user_config.rss_data; rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); - if (!rss_data->rss_key) { - kfree(vport); + if (!rss_data->rss_key) + goto free_vector_idxs; - return NULL; - } /* Initialize default rss key */ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); @@ -1163,6 +1140,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, adapter->next_vport = idpf_get_free_slot(adapter); return vport; + +free_vector_idxs: + kfree(vport->q_vector_idxs); +free_vport: + kfree(vport); + + return NULL; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index bec4a02c5373..b35713036a54 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -89,6 +89,7 @@ static void idpf_shutdown(struct pci_dev *pdev) { struct idpf_adapter *adapter = pci_get_drvdata(pdev); + cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->vc_event_task); idpf_vc_core_deinit(adapter); idpf_deinit_dflt_mbx(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 612ed26a29c5..efc7b30e4211 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -1290,6 +1290,8 @@ void igc_ptp_reset(struct igc_adapter *adapter) /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + mutex_lock(&adapter->ptm_lock); + spin_lock_irqsave(&adapter->tmreg_lock, flags); switch (adapter->hw.mac.type) { @@ -1308,7 +1310,6 @@ void igc_ptp_reset(struct igc_adapter *adapter) if (!igc_is_crosststamp_supported(adapter)) break; - mutex_lock(&adapter->ptm_lock); wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); @@ -1332,7 +1333,6 @@ void igc_ptp_reset(struct igc_adapter *adapter) netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); igc_ptm_reset(hw); - mutex_unlock(&adapter->ptm_lock); break; default: /* No work to do. */ @@ -1349,5 +1349,7 @@ void igc_ptp_reset(struct igc_adapter *adapter) out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptm_lock); + wrfl(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 0a679e95196f..24499bb36c00 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1223,7 +1223,7 @@ static void octep_hb_timeout_task(struct work_struct *work) miss_cnt); rtnl_lock(); if (netif_running(oct->netdev)) - octep_stop(oct->netdev); + dev_close(oct->netdev); rtnl_unlock(); } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 18c922dd5fc6..ccb69bc5c952 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -835,7 +835,9 @@ static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) struct octep_vf_device *oct = netdev_priv(netdev); netdev_hold(netdev, NULL, GFP_ATOMIC); - schedule_work(&oct->tx_timeout_task); + if (!schedule_work(&oct->tx_timeout_task)) + netdev_put(netdev, NULL); + } static int octep_vf_set_mac(struct net_device *netdev, void *p) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 47807b202310..22a532695fb0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -269,12 +269,8 @@ static const char * const mtk_clks_source_name[] = { "ethwarp_wocpu2", "ethwarp_wocpu1", "ethwarp_wocpu0", - "top_usxgmii0_sel", - "top_usxgmii1_sel", "top_sgm0_sel", "top_sgm1_sel", - "top_xfi_phy0_xtal_sel", - "top_xfi_phy1_xtal_sel", "top_eth_gmii_sel", "top_eth_refck_50m_sel", "top_eth_sys_200m_sel", @@ -2252,14 +2248,18 @@ skip_rx: ring->data[idx] = new_data; rxd->rxd1 = (unsigned int)dma_addr; release_desc: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { + if (unlikely(dma_addr == DMA_MAPPING_ERROR)) + addr64 = FIELD_GET(RX_DMA_ADDR64_MASK, + rxd->rxd2); + else + addr64 = RX_DMA_PREP_ADDR64(dma_addr); + } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) rxd->rxd2 = RX_DMA_LSO; else - rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); - - if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) && - likely(dma_addr != DMA_MAPPING_ERROR)) - rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr); + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64; ring->calc_idx = idx; done++; @@ -3186,11 +3186,19 @@ static int mtk_dma_init(struct mtk_eth *eth) static void mtk_dma_free(struct mtk_eth *eth) { const struct mtk_soc_data *soc = eth->soc; - int i; + int i, j, txqs = 1; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + txqs = MTK_QDMA_NUM_QUEUES; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + for (j = 0; j < txqs; j++) + netdev_tx_reset_subqueue(eth->netdev[i], j); + } - for (i = 0; i < MTK_MAX_DEVS; i++) - if (eth->netdev[i]) - netdev_reset_queue(eth->netdev[i]); if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) { dma_free_coherent(eth->dma_dev, MTK_QDMA_RING_SIZE * soc->tx.desc_size, @@ -3465,9 +3473,6 @@ static int mtk_open(struct net_device *dev) } mtk_gdm_config(eth, target_mac->id, gdm_config); } - /* Reset and enable PSE */ - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); - mtk_w32(eth, 0, MTK_RST_GL); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 76f202d7f055..b175119a6a7d 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) struct net_device *ndev = priv->ndev; unsigned int head = ring->head; unsigned int entry = ring->tail; + unsigned long flags; while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { ret = mtk_star_tx_complete_one(priv); @@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) netif_wake_queue(ndev); if (napi_complete(napi)) { - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, false, true); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return 0; @@ -1341,16 +1342,16 @@ push_new_skb: static int mtk_star_rx_poll(struct napi_struct *napi, int budget) { struct mtk_star_priv *priv; + unsigned long flags; int work_done = 0; priv = container_of(napi, struct mtk_star_priv, rx_napi); work_done = mtk_star_rx(priv, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); - spin_lock(&priv->lock); + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, true, false); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return work_done; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 532c7fa94d17..dbd9482359e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -176,6 +176,7 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) priv = ptpsq->txqsq.priv; + rtnl_lock(); mutex_lock(&priv->state_lock); chs = &priv->channels; netdev = priv->netdev; @@ -183,22 +184,19 @@ static int mlx5e_tx_reporter_ptpsq_unhealthy_recover(void *ctx) carrier_ok = netif_carrier_ok(netdev); netif_carrier_off(netdev); - rtnl_lock(); mlx5e_deactivate_priv_channels(priv); - rtnl_unlock(); mlx5e_ptp_close(chs->ptp); err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); - rtnl_lock(); mlx5e_activate_priv_channels(priv); - rtnl_unlock(); /* return carrier back if needed */ if (carrier_ok) netif_carrier_on(netdev); mutex_unlock(&priv->state_lock); + rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index 5c762a71818d..7a18a469961d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -165,9 +165,6 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, struct flow_match_enc_keyid enc_keyid; void *misc_c, *misc_v; - misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) return 0; @@ -182,6 +179,30 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, err = mlx5e_tc_tun_parse_vxlan_gbp_option(priv, spec, f); if (err) return err; + + /* We can't mix custom tunnel headers with symbolic ones and we + * don't have a symbolic field name for GBP, so we use custom + * tunnel headers in this case. We need hardware support to + * match on custom tunnel headers, but we already know it's + * supported because the previous call successfully checked for + * that. + */ + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_5); + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_5); + + /* Shift by 8 to account for the reserved bits in the vxlan + * header after the VNI. + */ + MLX5_SET(fte_match_set_misc5, misc_c, tunnel_header_1, + be32_to_cpu(enc_keyid.mask->keyid) << 8); + MLX5_SET(fte_match_set_misc5, misc_v, tunnel_header_1, + be32_to_cpu(enc_keyid.key->keyid) << 8); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_5; + + return 0; } /* match on VNI is required */ @@ -195,6 +216,11 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9ba99609999f..f1d908f61134 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1750,9 +1750,6 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr !list_is_first(&attr->list, &flow->attrs)) return 0; - if (flow_flag_test(flow, SLOW)) - return 0; - esw_attr = attr->esw_attr; if (!esw_attr->split_count || esw_attr->split_count == esw_attr->out_count - 1) @@ -1766,7 +1763,7 @@ extra_split_attr_dests_needed(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr for (i = esw_attr->split_count; i < esw_attr->out_count; i++) { /* external dest with encap is considered as internal by firmware */ if (esw_attr->dests[i].vport == MLX5_VPORT_UPLINK && - !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) + !(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) ext_dest = true; else int_dest = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index a6a8eea5980c..0e3a977d5332 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3533,7 +3533,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) int err; mutex_init(&esw->offloads.termtbl_mutex); - mlx5_rdma_enable_roce(esw->dev); + err = mlx5_rdma_enable_roce(esw->dev); + if (err) + goto err_roce; err = mlx5_esw_host_number_init(esw); if (err) @@ -3594,6 +3596,7 @@ err_vport_metadata: esw_offloads_metadata_uninit(esw); err_metadata: mlx5_rdma_disable_roce(esw->dev); +err_roce: mutex_destroy(&esw->offloads.termtbl_mutex); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index a42f6cd99b74..5c552b71e371 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid * static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) { + u8 mac[ETH_ALEN] = {}; union ib_gid gid; - u8 mac[ETH_ALEN]; mlx5_rdma_make_default_gid(dev, &gid); return mlx5_core_roce_gid_set(dev, 0, @@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) mlx5_nic_vport_disable_roce(dev); } -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { int err; if (!MLX5_CAP_GEN(dev, roce)) - return; + return 0; err = mlx5_nic_vport_enable_roce(dev); if (err) { mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); - return; + return err; } err = mlx5_rdma_add_roce_addr(dev); @@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) goto del_roce_addr; } - return; + return err; del_roce_addr: mlx5_rdma_del_roce_addr(dev); disable_roce: mlx5_nic_vport_disable_roce(dev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h index 750cff2a71a4..3d9e76c3d42f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h @@ -8,12 +8,12 @@ #ifdef CONFIG_MLX5_ESWITCH -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ -static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} +static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} #endif /* CONFIG_MLX5_ESWITCH */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 4ca7b99ef131..de6b1a340f55 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -154,14 +154,14 @@ struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev); void fbnic_devlink_register(struct fbnic_dev *fbd); void fbnic_devlink_unregister(struct fbnic_dev *fbd); -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); +int fbnic_fw_request_mbx(struct fbnic_dev *fbd); +void fbnic_fw_free_mbx(struct fbnic_dev *fbd); void fbnic_hwmon_register(struct fbnic_dev *fbd); void fbnic_hwmon_unregister(struct fbnic_dev *fbd); -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); +int fbnic_pcs_request_irq(struct fbnic_dev *fbd); +void fbnic_pcs_free_irq(struct fbnic_dev *fbd); void fbnic_napi_name_irqs(struct fbnic_dev *fbd); int fbnic_napi_request_irq(struct fbnic_dev *fbd, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 3b12a0ab5906..51bee8072420 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -796,8 +796,10 @@ enum { /* PUL User Registers */ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) #define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \ 0x3106e /* 0xc41b8 */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index 88db3dacb940..3d9636a6c968 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + /* Write the upper 32b and then the lower 32b. Doing this the + * FW can then read lower, upper, lower to verify that the state + * of the descriptor wasn't changed mid-transaction. + */ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); fw_wrfl(fbd); fw_wr32(fbd, desc_offset, lower_32_bits(desc)); } +static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx, u32 desc) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + + /* For initialization we write the lower 32b of the descriptor first. + * This way we can set the state to mark it invalid before we clear the + * upper 32b. + */ + fw_wr32(fbd, desc_offset, desc); + fw_wrfl(fbd); + fw_wr32(fbd, desc_offset + 1, 0); +} + static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) { u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); @@ -33,29 +51,41 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) return desc; } -static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int desc_idx; + /* Disable DMA transactions from the device, + * and flush any transactions triggered during cleaning + */ + switch (mbx_idx) { + case FBNIC_IPC_MBX_RX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH); + break; + case FBNIC_IPC_MBX_TX_IDX: + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH); + break; + } + + wrfl(fbd); + /* Initialize first descriptor to all 0s. Doing this gives us a * solid stop for the firmware to hit when it is done looping * through the ring. */ - __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0); - - fw_wrfl(fbd); + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0); /* We then fill the rest of the ring starting at the end and moving * back toward descriptor 0 with skip descriptors that have no * length nor address, and tell the firmware that they can skip * them and just move past them to the one we initialized to 0. */ - for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) { - __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx, - FBNIC_IPC_MBX_DESC_FW_CMPL | - FBNIC_IPC_MBX_DESC_HOST_CMPL); - fw_wrfl(fbd); - } + for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) + __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx, + FBNIC_IPC_MBX_DESC_FW_CMPL | + FBNIC_IPC_MBX_DESC_HOST_CMPL); } void fbnic_mbx_init(struct fbnic_dev *fbd) @@ -76,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd) wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_init_desc_ring(fbd, i); + fbnic_mbx_reset_desc_ring(fbd, i); } static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, @@ -141,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { int i; - fbnic_mbx_init_desc_ring(fbd, mbx_idx); + fbnic_mbx_reset_desc_ring(fbd, mbx_idx); for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); @@ -322,67 +352,41 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) return err; } -/** - * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message - * @fbd: FBNIC device structure - * - * Return: NULL on failure to allocate, error pointer on error, or pointer - * to new TLV test message. - * - * Sends a single TLV header indicating the host wants the firmware to - * confirm the capabilities and version. - **/ -static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd) -{ - int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); - - /* Return 0 if we are not calling this on ASIC */ - return (err == -EOPNOTSUPP) ? 0 : err; -} - -static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) { struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; - /* This is a one time init, so just exit if it is completed */ - if (mbx->ready) - return; - mbx->ready = true; switch (mbx_idx) { case FBNIC_IPC_MBX_RX_IDX: + /* Enable DMA writes from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); + /* Make sure we have a page for the FW to write to */ fbnic_mbx_alloc_rx_msgs(fbd); break; case FBNIC_IPC_MBX_TX_IDX: - /* Force version to 1 if we successfully requested an update - * from the firmware. This should be overwritten once we get - * the actual version from the firmware in the capabilities - * request message. - */ - if (!fbnic_fw_xmit_cap_msg(fbd) && - !fbd->fw_cap.running.mgmt.version) - fbd->fw_cap.running.mgmt.version = 1; + /* Enable DMA reads from the device */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); break; } } -static void fbnic_mbx_postinit(struct fbnic_dev *fbd) +static bool fbnic_mbx_event(struct fbnic_dev *fbd) { - int i; - - /* We only need to do this on the first interrupt following init. + /* We only need to do this on the first interrupt following reset. * this primes the mailbox so that we will have cleared all the * skip descriptors. */ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) - return; + return false; wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); - for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) - fbnic_mbx_postinit_desc_ring(fbd, i); + return true; } /** @@ -859,7 +863,7 @@ next_page: void fbnic_mbx_poll(struct fbnic_dev *fbd) { - fbnic_mbx_postinit(fbd); + fbnic_mbx_event(fbd); fbnic_mbx_process_tx_msgs(fbd); fbnic_mbx_process_rx_msgs(fbd); @@ -867,60 +871,97 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd) int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) { - struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; + unsigned long timeout = jiffies + 10 * HZ + 1; + int err, i; - /* Immediate fail if BAR4 isn't there */ - if (!fbnic_fw_present(fbd)) - return -ENODEV; + do { + if (!time_is_after_jiffies(timeout)) + return -ETIMEDOUT; - tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - while (!tx_mbx->ready && --attempts) { /* Force the firmware to trigger an interrupt response to * avoid the mailbox getting stuck closed if the interrupt * is reset. */ - fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); + fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); - msleep(200); + /* Immediate fail if BAR4 went away */ + if (!fbnic_fw_present(fbd)) + return -ENODEV; - fbnic_mbx_poll(fbd); - } + msleep(20); + } while (!fbnic_mbx_event(fbd)); + + /* FW has shown signs of life. Enable DMA and start Tx/Rx */ + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_init_desc_ring(fbd, i); + + /* Request an update from the firmware. This should overwrite + * mgmt.version once we get the actual version from the firmware + * in the capabilities request message. + */ + err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); + if (err) + goto clean_mbx; + + /* Use "1" to indicate we entered the state waiting for a response */ + fbd->fw_cap.running.mgmt.version = 1; + + return 0; +clean_mbx: + /* Cleanup Rx buffers and disable mailbox */ + fbnic_mbx_clean(fbd); + return err; +} + +static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data) +{ + cmpl_data->result = -EPIPE; + complete(&cmpl_data->done); +} - return attempts ? 0 : -ETIMEDOUT; +static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd) +{ + if (fbd->cmpl_data) { + __fbnic_fw_evict_cmpl(fbd->cmpl_data); + fbd->cmpl_data = NULL; + } } void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) { + unsigned long timeout = jiffies + 10 * HZ + 1; struct fbnic_fw_mbx *tx_mbx; - int attempts = 50; - u8 count = 0; - - /* Nothing to do if there is no mailbox */ - if (!fbnic_fw_present(fbd)) - return; + u8 tail; /* Record current Rx stats */ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; - /* Nothing to do if mailbox never got to ready */ - if (!tx_mbx->ready) - return; + spin_lock_irq(&fbd->fw_tx_lock); + + /* Clear ready to prevent any further attempts to transmit */ + tx_mbx->ready = false; + + /* Read tail to determine the last tail state for the ring */ + tail = tx_mbx->tail; + + /* Flush any completions as we are no longer processing Rx */ + fbnic_mbx_evict_all_cmpl(fbd); + + spin_unlock_irq(&fbd->fw_tx_lock); /* Give firmware time to process packet, - * we will wait up to 10 seconds which is 50 waits of 200ms. + * we will wait up to 10 seconds which is 500 waits of 20ms. */ do { u8 head = tx_mbx->head; - if (head == tx_mbx->tail) + /* Tx ring is empty once head == tail */ + if (head == tail) break; - msleep(200); + msleep(20); fbnic_mbx_process_tx_msgs(fbd); - - count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; - } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); + } while (time_is_after_jiffies(timeout)); } void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c index 1bbc0e56f3a0..1c88a2bf3a7a 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -19,69 +19,105 @@ static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } +static int __fbnic_fw_enable_mbx(struct fbnic_dev *fbd, int vector) +{ + int err; + + /* Initialize mailbox and attempt to poll it into ready state */ + fbnic_mbx_init(fbd); + err = fbnic_mbx_poll_tx_ready(fbd); + if (err) { + dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + return err; + } + + /* Enable interrupt and unmask the vector */ + enable_irq(vector); + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return 0; +} + /** - * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox + * fbnic_fw_request_mbx - Configure and initialize Firmware Mailbox * @fbd: Pointer to device to initialize * - * This function will initialize the firmware mailbox rings, enable the IRQ - * and initialize the communication between the Firmware and the host. The - * firmware is expected to respond to the initialization by sending an - * interrupt essentially notifying the host that it has seen the - * initialization and is now synced up. + * This function will allocate the IRQ and then reinitialize the mailbox + * starting communication between the host and firmware. * * Return: non-zero on failure. **/ -int fbnic_fw_enable_mbx(struct fbnic_dev *fbd) +int fbnic_fw_request_mbx(struct fbnic_dev *fbd) { - u32 vector = fbd->fw_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; + + WARN_ON(fbd->fw_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); + if (vector < 0) + return vector; /* Request the IRQ for FW Mailbox vector. */ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr, - IRQF_ONESHOT, dev_name(fbd->dev), fbd); + IRQF_ONESHOT | IRQF_NO_AUTOEN, + dev_name(fbd->dev), fbd); if (err) return err; /* Initialize mailbox and attempt to poll it into ready state */ - fbnic_mbx_init(fbd); - err = fbnic_mbx_poll_tx_ready(fbd); - if (err) { - dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + err = __fbnic_fw_enable_mbx(fbd, vector); + if (err) free_irq(vector, fbd); - return err; - } - /* Enable interrupts */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + fbd->fw_msix_vector = vector; - return 0; + return err; } /** - * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state - * @fbd: Pointer to device to disable + * fbnic_fw_disable_mbx - Temporarily place mailbox in standby state + * @fbd: Pointer to device * - * This function will disable the mailbox interrupt, free any messages still - * in the mailbox and place it into a standby state. The firmware is - * expected to see the update and assume that the host is in the reset state. + * Shutdown the mailbox by notifying the firmware to stop sending us logs, mask + * and synchronize the IRQ, and then clean up the rings. **/ -void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) +static void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) { - /* Disable interrupt and free vector */ - fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); + /* Disable interrupt and synchronize the IRQ */ + disable_irq(fbd->fw_msix_vector); - /* Free the vector */ - free_irq(fbd->fw_msix_vector, fbd); + /* Mask the vector */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); /* Make sure disabling logs message is sent, must be done here to * avoid risk of completing without a running interrupt. */ fbnic_mbx_flush_tx(fbd); - - /* Reset the mailboxes to the initialized state */ fbnic_mbx_clean(fbd); } +/** + * fbnic_fw_free_mbx - Disable mailbox and place it in standby state + * @fbd: Pointer to device to disable + * + * This function will disable the mailbox interrupt, free any messages still + * in the mailbox and place it into a disabled state. The firmware is + * expected to see the update and assume that the host is in the reset state. + **/ +void fbnic_fw_free_mbx(struct fbnic_dev *fbd) +{ + /* Vector has already been freed */ + if (!fbd->fw_msix_vector) + return; + + fbnic_fw_disable_mbx(fbd); + + /* Free the vector */ + free_irq(fbd->fw_msix_vector, fbd); + fbd->fw_msix_vector = 0; +} + static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) { struct fbnic_dev *fbd = data; @@ -101,7 +137,7 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) } /** - * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link + * fbnic_pcs_request_irq - Configure the PCS to enable it to advertise link * @fbd: Pointer to device to initialize * * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ @@ -109,41 +145,61 @@ static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) * * Return: non-zero on failure. **/ -int fbnic_pcs_irq_enable(struct fbnic_dev *fbd) +int fbnic_pcs_request_irq(struct fbnic_dev *fbd) { - u32 vector = fbd->pcs_msix_vector; - int err; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int vector, err; - /* Request the IRQ for MAC link vector. - * Map MAC cause to it, and unmask it + WARN_ON(fbd->pcs_msix_vector); + + vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); + if (vector < 0) + return vector; + + /* Request the IRQ for PCS link vector. + * Map PCS cause to it, and unmask it */ err = request_irq(vector, &fbnic_pcs_msix_intr, 0, fbd->netdev->name, fbd); if (err) return err; + /* Map and enable interrupt, unmask vector after link is configured */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); + fbd->pcs_msix_vector = vector; + return 0; } /** - * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping + * fbnic_pcs_free_irq - Teardown the PCS IRQ to prepare for stopping * @fbd: Pointer to device that is stopping * - * This function undoes the work done in fbnic_pcs_irq_enable and prepares + * This function undoes the work done in fbnic_pcs_request_irq and prepares * the device to no longer receive traffic on the host interface. **/ -void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) +void fbnic_pcs_free_irq(struct fbnic_dev *fbd) { + /* Vector has already been freed */ + if (!fbd->pcs_msix_vector) + return; + /* Disable interrupt */ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), FBNIC_PCS_MSIX_ENTRY); + fbnic_wrfl(fbd); + + /* Synchronize IRQ to prevent race that would unmask vector */ + synchronize_irq(fbd->pcs_msix_vector); + + /* Mask the vector */ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); /* Free the vector */ free_irq(fbd->pcs_msix_vector, fbd); + fbd->pcs_msix_vector = 0; } void fbnic_synchronize_irq(struct fbnic_dev *fbd, int nr) @@ -226,9 +282,6 @@ void fbnic_free_irqs(struct fbnic_dev *fbd) { struct pci_dev *pdev = to_pci_dev(fbd->dev); - fbd->pcs_msix_vector = 0; - fbd->fw_msix_vector = 0; - fbd->num_irqs = 0; pci_free_irq_vectors(pdev); @@ -254,8 +307,5 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd) fbd->num_irqs = num_irqs; - fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); - fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); - return 0; } diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c index 14291401f463..dde4a37116e2 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd) fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); - - /* Enable XALI AR/AW outbound */ - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, - FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); - wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, - FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); } static void fbnic_mac_init_qm(struct fbnic_dev *fbd) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 79a01fdd1dd1..2524d9b88d59 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -44,9 +44,10 @@ int __fbnic_open(struct fbnic_net *fbn) if (err) goto time_stop; - err = fbnic_pcs_irq_enable(fbd); + err = fbnic_pcs_request_irq(fbd); if (err) goto time_stop; + /* Pull the BMC config and initialize the RPC */ fbnic_bmc_rpc_init(fbd); fbnic_rss_reinit(fbd, fbn); @@ -82,7 +83,7 @@ static int fbnic_stop(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); fbnic_down(fbn); - fbnic_pcs_irq_disable(fbn->fbd); + fbnic_pcs_free_irq(fbn->fbd); fbnic_time_stop(fbn); fbnic_fw_xmit_ownership_msg(fbn->fbd, false); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c index 6cbbc2ee3e1f..4e8595239c0f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -283,7 +283,7 @@ static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto free_irqs; } - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) { dev_err(&pdev->dev, "Firmware mailbox initialization failure\n"); @@ -363,7 +363,7 @@ static void fbnic_remove(struct pci_dev *pdev) fbnic_hwmon_unregister(fbd); fbnic_dbg_fbd_exit(fbd); fbnic_devlink_unregister(fbd); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); fbnic_free_irqs(fbd); fbnic_devlink_free(fbd); @@ -387,7 +387,7 @@ static int fbnic_pm_suspend(struct device *dev) rtnl_unlock(); null_uc_addr: - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ fbnic_free_irqs(fbd); @@ -420,7 +420,7 @@ static int __fbnic_pm_resume(struct device *dev) fbd->mac->init_regs(fbd); /* Re-enable mailbox */ - err = fbnic_fw_enable_mbx(fbd); + err = fbnic_fw_request_mbx(fbd); if (err) goto err_free_irqs; @@ -438,15 +438,15 @@ static int __fbnic_pm_resume(struct device *dev) if (netif_running(netdev)) { err = __fbnic_open(fbn); if (err) - goto err_disable_mbx; + goto err_free_mbx; } rtnl_unlock(); return 0; -err_disable_mbx: +err_free_mbx: rtnl_unlock(); - fbnic_fw_disable_mbx(fbd); + fbnic_fw_free_mbx(fbd); err_free_irqs: fbnic_free_irqs(fbd); err_invalidate_uc_addr: diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 23760b613d3e..e2d6bfb5d693 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1815,6 +1815,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, if (nr_frags <= 0) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_first; } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); @@ -1884,6 +1885,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, tx->frame_first = 0; tx->frame_data0 = 0; tx->frame_tail = 0; + tx->frame_last = 0; return -ENOMEM; } @@ -1924,16 +1926,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, TX_DESC_DATA0_DTYPE_DATA_) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_tail; } - tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; - buffer_info = &tx->buffer_info[tx->frame_tail]; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last]; + buffer_info = &tx->buffer_info[tx->frame_last]; buffer_info->skb = skb; if (time_stamp) buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; if (ignore_sync) buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); tx->last_tail = tx->frame_tail; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 7f73d66854be..db5fc73e41cc 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -980,6 +980,7 @@ struct lan743x_tx { u32 frame_first; u32 frame_data0; u32 frame_tail; + u32 frame_last; struct lan743x_tx_buffer_info *buffer_info; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index ef93df520887..08bee56aea35 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -830,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; int err; /* Ignore VID 0 added to our RX filter by the 8021q module, since @@ -849,6 +850,11 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, ocelot_bridge_vlan_find(ocelot, vid)); if (err) return err; + } else if (ocelot_port->pvid_vlan && + ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { + err = ocelot_port_set_pvid(ocelot, port, NULL); + if (err) + return err; } /* Untagged egress vlan clasification */ diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 2aacc1996796..55b8d3666153 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1925,8 +1925,8 @@ static u16 rtase_calc_time_mitigation(u32 time_us) time_us = min_t(int, time_us, RTASE_MITI_MAX_TIME); - msb = fls(time_us); - if (msb >= RTASE_MITI_COUNT_BIT_NUM) { + if (time_us > RTASE_MITI_TIME_COUNT_MASK) { + msb = fls(time_us); time_unit = msb - RTASE_MITI_COUNT_BIT_NUM; time_count = time_us >> (msb - RTASE_MITI_COUNT_BIT_NUM); } else { diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index b4be76e13a2f..d88a0180294e 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -187,7 +187,6 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, xdp_return_frame(xdpf); break; default: - netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type); prueth_xmit_free(tx_chn, desc_tx); ndev->stats.tx_dropped++; continue; @@ -567,6 +566,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, { struct cppi5_host_desc_t *first_desc; struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; struct prueth_swdata *swdata; @@ -620,12 +620,17 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, swdata->data.xdpf = xdpf; } + /* Report BQL before sending the packet */ + netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); + netdev_tx_sent_queue(netif_txq, xdpf->len); + cppi5_hdesc_set_pktlen(first_desc, xdpf->len); desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); if (ret) { netdev_err(ndev, "xdp tx: push failed: %d\n", ret); + netdev_tx_completed_queue(netif_txq, 1, xdpf->len); goto drop_free_descs; } @@ -650,6 +655,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, struct page *page, u32 *len) { struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); struct bpf_prog *xdp_prog; struct xdp_frame *xdpf; u32 pkt_len = *len; @@ -669,8 +676,11 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, goto drop; } - q_idx = smp_processor_id() % emac->tx_ch_num; + q_idx = cpu % emac->tx_ch_num; + netif_txq = netdev_get_tx_queue(ndev, q_idx); + __netif_tx_lock(netif_txq, cpu); result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); + __netif_tx_unlock(netif_txq); if (result == ICSSG_XDP_CONSUMED) { ndev->stats.tx_dropped++; goto drop; @@ -979,6 +989,7 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); if (ret) { netdev_err(ndev, "tx: push failed: %d\n", ret); + netdev_tx_completed_queue(netif_txq, 1, pkt_len); goto drop_free_descs; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 443f90fa6557..86fc1278127c 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1075,17 +1075,21 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame { struct prueth_emac *emac = netdev_priv(dev); struct net_device *ndev = emac->ndev; + struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); struct xdp_frame *xdpf; unsigned int q_idx; int nxmit = 0; u32 err; int i; - q_idx = smp_processor_id() % emac->tx_ch_num; + q_idx = cpu % emac->tx_ch_num; + netif_txq = netdev_get_tx_queue(ndev, q_idx); if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; + __netif_tx_lock(netif_txq, cpu); for (i = 0; i < n; i++) { xdpf = frames[i]; err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); @@ -1095,6 +1099,7 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame } nxmit++; } + __netif_tx_unlock(netif_txq); return nxmit; } @@ -1109,11 +1114,6 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) { struct bpf_prog *prog = bpf->prog; - xdp_features_t val; - - val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; - xdp_set_features_flag(emac->ndev, val); if (!emac->xdpi.prog && !prog) return 0; @@ -1291,6 +1291,10 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; ndev->hw_features |= NETIF_PRUETH_HSR_OFFLOAD_FEATURES; + xdp_set_features_flag(ndev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index 89dc4c401a8d..e4d993f31374 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> @@ -33,7 +34,7 @@ #define CMD_CTR (0x2 << CMD_SHIFT) #define CMD_MASK GENMASK(15, CMD_SHIFT) -#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) +#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0) #define DET_CMD_LEN 4 #define DET_SOF_LEN 2 @@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, } static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, - unsigned int frame_len) + unsigned int frame_len, bool drop) { struct mse102x_net_spi *mses = to_mse102x_spi(mse); struct spi_transfer *xfer = &mses->spi_xfer; @@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", __func__, ret); mse->stats.xfer_err++; + } else if (drop) { + netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__); + ret = -EINVAL; } else if (*sof != cpu_to_be16(DET_SOF)) { netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", __func__, *sof); @@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) struct sk_buff *skb; unsigned int rxalign; unsigned int rxlen; + bool drop = false; __be16 rx = 0; u16 cmd_resp; u8 *rxpkt; @@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", __func__, cmd_resp); mse->stats.invalid_rts++; - return; + drop = true; + goto drop; } net_dbg_ratelimited("%s: Unexpected response to first CMD\n", @@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) } rxlen = cmd_resp & LEN_MASK; - if (!rxlen) { - net_dbg_ratelimited("%s: No frame length defined\n", __func__); + if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) { + net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__, + rxlen); mse->stats.invalid_len++; - return; + drop = true; } + /* In case of a invalid CMD_RTS, the frame must be consumed anyway. + * So assume the maximum possible frame length. + */ +drop: + if (drop) + rxlen = VLAN_ETH_FRAME_LEN; + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); if (!skb) @@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) * They are copied, but ignored. */ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; - if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { mse->ndev->stats.rx_errors++; dev_kfree_skb(skb); return; @@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) static int mse102x_net_open(struct net_device *ndev) { struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); int ret; ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, @@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev) netif_carrier_on(ndev); + /* The SPI interrupt can stuck in case of pending packet(s). + * So poll for possible packet(s) to re-arm the interrupt. + */ + mutex_lock(&mses->lock); + mse102x_rx_pkt_spi(mse); + mutex_unlock(&mses->lock); + netif_dbg(mse, ifup, ndev, "network device up\n"); return 0; diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c index 00c66240136b..3dd12a8c8b03 100644 --- a/drivers/net/mdio/mdio-mux-meson-gxl.c +++ b/drivers/net/mdio/mdio-mux-meson-gxl.c @@ -17,6 +17,7 @@ #define REG2_LEDACT GENMASK(23, 22) #define REG2_LEDLINK GENMASK(25, 24) #define REG2_DIV4SEL BIT(27) +#define REG2_REVERSED BIT(28) #define REG2_ADCBYPASS BIT(30) #define REG2_CLKINSEL BIT(31) #define ETH_REG3 0x4 @@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv) * The only constraint is that it must match the one in * drivers/net/phy/meson-gxl.c to properly match the PHY. */ - writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), + writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), priv->regs + ETH_REG2); /* Enable the internal phy */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index bb0bf1415872..7b3739b29c8f 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -630,16 +630,6 @@ static const struct driver_info zte_rndis_info = { .tx_fixup = rndis_tx_fixup, }; -static const struct driver_info wwan_rndis_info = { - .description = "Mobile Broadband RNDIS device", - .flags = FLAG_WWAN | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT, - .bind = rndis_bind, - .unbind = rndis_unbind, - .status = rndis_status, - .rx_fixup = rndis_rx_fixup, - .tx_fixup = rndis_tx_fixup, -}; - /*-------------------------------------------------------------------------*/ static const struct usb_device_id products [] = { @@ -676,11 +666,9 @@ static const struct usb_device_id products [] = { USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), .driver_info = (unsigned long) &rndis_info, }, { - /* Mobile Broadband Modem, seen in Novatel Verizon USB730L and - * Telit FN990A (RNDIS) - */ + /* Novatel Verizon USB730L */ USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), - .driver_info = (unsigned long)&wwan_rndis_info, + .driver_info = (unsigned long) &rndis_info, }, { }, // END }; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 848fab51dfa1..e53ba600605a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3383,12 +3383,15 @@ static void __virtnet_rx_resume(struct virtnet_info *vi, bool refill) { bool running = netif_running(vi->dev); + bool schedule_refill = false; if (refill && !try_fill_recv(vi, rq, GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - + schedule_refill = true; if (running) virtnet_napi_enable(rq); + + if (schedule_refill) + schedule_delayed_work(&vi->refill, 0); } static void virtnet_rx_resume_all(struct virtnet_info *vi) @@ -3728,8 +3731,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) succ: vi->curr_queue_pairs = queue_pairs; /* virtnet_open() will refill when device is going to up. */ - if (dev->flags & IFF_UP) + spin_lock_bh(&vi->refill_lock); + if (dev->flags & IFF_UP && vi->refill_enabled) schedule_delayed_work(&vi->refill, 0); + spin_unlock_bh(&vi->refill_lock); return 0; } @@ -5673,6 +5678,10 @@ static void virtnet_get_base_stats(struct net_device *dev, if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) tx->hw_drop_ratelimits = 0; + + netdev_stat_queue_sum(dev, + dev->real_num_rx_queues, vi->max_queue_pairs, rx, + dev->real_num_tx_queues, vi->max_queue_pairs, tx); } static const struct netdev_stat_ops virtnet_stat_ops = { @@ -5885,8 +5894,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev, hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len, DMA_TO_DEVICE, 0); - if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) - return -ENOMEM; + if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) { + err = -ENOMEM; + goto err_free_buffs; + } err = xsk_pool_dma_map(pool, dma_dev, 0); if (err) @@ -5914,6 +5925,8 @@ err_rq: err_xsk_map: virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len, DMA_TO_DEVICE, 0); +err_free_buffs: + kvfree(rq->xsk_buffs); return err; } diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c index 616ecc38d172..5f470499e600 100644 --- a/drivers/net/vmxnet3/vmxnet3_xdp.c +++ b/drivers/net/vmxnet3/vmxnet3_xdp.c @@ -397,7 +397,7 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter, xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq); xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset, - rbi->len, false); + rcd->len, false); xdp_buff_clear_frags_flag(&xdp); xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog); diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 6e6e9f05509a..06d19e90eadb 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, * default dst remote_ip previously added for this vni */ if (!vxlan_addr_any(&vninode->remote_ip) || - !vxlan_addr_any(&dst->remote_ip)) + !vxlan_addr_any(&dst->remote_ip)) { + u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, + vninode->vni); + + spin_lock_bh(&vxlan->hash_lock[hash_index]); __vxlan_fdb_delete(vxlan, all_zeros_mac, (vxlan_addr_any(&vninode->remote_ip) ? dst->remote_ip : vninode->remote_ip), @@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, vninode->vni, vninode->vni, dst->remote_ifindex, true); + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + } if (vxlan->dev->flags & IFF_UP) { if (vxlan_addr_multicast(&vninode->remote_ip) && diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 2821c27f317e..d06c724f63d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) } /* 1) Prepare USB boot loader for runtime image */ - brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + if (err) + goto fail; rdlstate = le32_to_cpu(state.state); rdlbytes = le32_to_cpu(state.bytes); /* 2) Check we are in the Waiting state */ if (rdlstate != DL_WAITING) { - brcmf_err("Failed to DL_START\n"); + brcmf_err("Invalid DL state: %u\n", rdlstate); err = -EINVAL; goto fail; } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 670031fd60dc..59af36960f9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -142,8 +142,6 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; - const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", IWL_DEVICE_SC, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index b9bd89bfdd74..acafee538b8a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2025 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -451,8 +451,11 @@ struct iwl_cfg { #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 -#define IWL_CFG_BW_NO_LIM (U16_MAX - 1) -#define IWL_CFG_BW_ANY U16_MAX +#define IWL_CFG_NO_160 0x1 +#define IWL_CFG_160 0x0 + +#define IWL_CFG_NO_320 0x1 +#define IWL_CFG_320 0x0 #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 @@ -464,7 +467,7 @@ struct iwl_cfg { #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) -#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9) +#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { @@ -472,10 +475,10 @@ struct iwl_dev_info { u16 subdevice; u16 mac_type; u16 rf_type; - u16 bw_limit; u8 mac_step; u8 rf_step; u8 rf_id; + u8 no_160; u8 cores; u8 cdb; u8 jacket; @@ -489,7 +492,7 @@ extern const unsigned int iwl_dev_info_table_size; const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step); + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); extern const struct pci_device_id iwl_hw_card_ids[]; #endif @@ -550,7 +553,6 @@ extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; extern const char iwl_fm_name[]; extern const char iwl_wh_name[]; -extern const char iwl_sp_name[]; extern const char iwl_gl_name[]; extern const char iwl_mtp_name[]; extern const char iwl_dr_name[]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index be9e464c9b7b..3ff493e920d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -148,6 +148,7 @@ * during a error FW error. */ #define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101) +#define CSR_FUNC_SCRATCH_POWER_OFF_MASK 0xFFFF /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH (0x0000000F) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index cd1b0048bb6d..c381511e9ec6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -944,8 +944,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); break; case NL80211_BAND_6GHZ: - if (!trans->reduced_cap_sku && - trans->bw_limit >= 320) { + if (!trans->reduced_cap_sku) { iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= @@ -1095,22 +1094,19 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; } - if (trans->bw_limit < 160) + if (trans->no_160) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; - if (trans->bw_limit < 320 || trans->reduced_cap_sku) { + if (trans->reduced_cap_sku) { memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); - iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= - ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; - } - - if (trans->reduced_cap_sku) { iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index c1607b6d0759..e7b2e08645ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -21,6 +21,7 @@ struct iwl_trans_dev_restart_data { struct list_head list; unsigned int restart_count; time64_t last_error; + bool backoff; char name[]; }; @@ -125,13 +126,20 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) if (!data) return at_least; - if (ktime_get_boottime_seconds() - data->last_error >= + if (!data->backoff && + ktime_get_boottime_seconds() - data->last_error >= IWL_TRANS_RESET_OK_TIME) data->restart_count = 0; index = data->restart_count; - if (index >= ARRAY_SIZE(escalation_list)) + if (index >= ARRAY_SIZE(escalation_list)) { index = ARRAY_SIZE(escalation_list) - 1; + if (!data->backoff) { + data->backoff = true; + return IWL_RESET_MODE_BACKOFF; + } + data->backoff = false; + } return max(at_least, escalation_list[index]); } @@ -140,7 +148,8 @@ iwl_trans_determine_restart_mode(struct iwl_trans *trans) static void iwl_trans_restart_wk(struct work_struct *wk) { - struct iwl_trans *trans = container_of(wk, typeof(*trans), restart.wk); + struct iwl_trans *trans = container_of(wk, typeof(*trans), + restart.wk.work); struct iwl_trans_reprobe *reprobe; enum iwl_reset_mode mode; @@ -168,6 +177,12 @@ static void iwl_trans_restart_wk(struct work_struct *wk) return; mode = iwl_trans_determine_restart_mode(trans); + if (mode == IWL_RESET_MODE_BACKOFF) { + IWL_ERR(trans, "Too many device errors - delay next reset\n"); + queue_delayed_work(system_unbound_wq, &trans->restart.wk, + IWL_TRANS_RESET_DELAY); + return; + } iwl_trans_inc_restart_count(trans->dev); @@ -227,7 +242,7 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, trans->dev = dev; trans->num_rx_queues = 1; - INIT_WORK(&trans->restart.wk, iwl_trans_restart_wk); + INIT_DELAYED_WORK(&trans->restart.wk, iwl_trans_restart_wk); return trans; } @@ -271,7 +286,7 @@ int iwl_trans_init(struct iwl_trans *trans) void iwl_trans_free(struct iwl_trans *trans) { - cancel_work_sync(&trans->restart.wk); + cancel_delayed_work_sync(&trans->restart.wk); kmem_cache_destroy(trans->dev_cmd_pool); } @@ -403,7 +418,7 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans) iwl_trans_pcie_op_mode_leave(trans); - cancel_work_sync(&trans->restart.wk); + cancel_delayed_work_sync(&trans->restart.wk); trans->op_mode = NULL; @@ -540,7 +555,6 @@ void __releases(nic_access) iwl_trans_release_nic_access(struct iwl_trans *trans) { iwl_trans_pcie_release_nic_access(trans); - __release(nic_access); } IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 25fb4c50e38b..ce4954b0d524 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -876,7 +876,7 @@ struct iwl_txq { * only valid for discrete (not integrated) NICs * @invalid_tx_cmd: invalid TX command buffer * @reduced_cap_sku: reduced capability supported SKU - * @bw_limit: the max bandwidth + * @no_160: device not supporting 160 MHz * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz * @restart: restart worker data * @restart.wk: restart worker @@ -911,8 +911,7 @@ struct iwl_trans { char hw_id_str[52]; u32 sku_id[3]; bool reduced_cap_sku; - u16 bw_limit; - bool step_urm; + u8 no_160:1, step_urm:1; u8 dsbr_urm_fw_dependent:1, dsbr_urm_permanent:1; @@ -962,7 +961,7 @@ struct iwl_trans { struct iwl_dma_ptr invalid_tx_cmd; struct { - struct work_struct wk; + struct delayed_work wk; struct iwl_fw_error_dump_mode mode; bool during_reset; } restart; @@ -1163,7 +1162,7 @@ static inline void iwl_trans_schedule_reset(struct iwl_trans *trans, */ trans->restart.during_reset = test_bit(STATUS_IN_SW_RESET, &trans->status); - queue_work(system_unbound_wq, &trans->restart.wk); + queue_delayed_work(system_unbound_wq, &trans->restart.wk, 0); } static inline void iwl_trans_fw_error(struct iwl_trans *trans, @@ -1262,6 +1261,9 @@ enum iwl_reset_mode { IWL_RESET_MODE_RESCAN, IWL_RESET_MODE_FUNC_RESET, IWL_RESET_MODE_PROD_RESET, + + /* keep last - special backoff value */ + IWL_RESET_MODE_BACKOFF, }; void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c index db9e0f04f4b7..687a9450ac98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/agg.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c @@ -124,9 +124,9 @@ void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, rcu_read_lock(); baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); - if (!IWL_FW_CHECK(mld, !baid_data, - "Got valid BAID %d but not allocated, invalid BAR release!\n", - baid)) + if (IWL_FW_CHECK(mld, !baid_data, + "Got valid BAID %d but not allocated, invalid BAR release!\n", + baid)) goto out_unlock; if (IWL_FW_CHECK(mld, tid != baid_data->tid || diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c index 89d95e9b4f30..93f9f78e4276 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c @@ -949,8 +949,9 @@ void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw, snprintf(name, sizeof(name), "%pd", vif->debugfs_dir); snprintf(target, sizeof(target), "../../../%pd3/iwlmld", vif->debugfs_dir); - mld_vif->dbgfs_slink = - debugfs_create_symlink(name, mld->debugfs_dir, target); + if (!mld_vif->dbgfs_slink) + mld_vif->dbgfs_slink = + debugfs_create_symlink(name, mld->debugfs_dir, target); if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && vif->type == NL80211_IFTYPE_STATION) { diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c index 62da137e1024..4b083d447ee2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c @@ -333,19 +333,22 @@ int iwl_mld_load_fw(struct iwl_mld *mld) ret = iwl_trans_start_hw(mld->trans); if (ret) - return ret; + goto err; ret = iwl_mld_run_fw_init_sequence(mld); if (ret) - return ret; + goto err; ret = iwl_mld_init_mcc(mld); if (ret) - return ret; + goto err; mld->fw_status.running = true; return 0; +err: + iwl_mld_stop_fw(mld); + return ret; } void iwl_mld_stop_fw(struct iwl_mld *mld) @@ -358,6 +361,10 @@ void iwl_mld_stop_fw(struct iwl_mld *mld) iwl_trans_stop_device(mld->trans); + wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk); + + iwl_mld_purge_async_handlers_list(mld); + mld->fw_status.running = false; } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c index 99e13cfd1e5f..68d97d3b8f02 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -651,6 +651,7 @@ void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw, #ifdef CONFIG_IWLWIFI_DEBUGFS debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink); + iwl_mld_vif_from_mac80211(vif)->dbgfs_slink = NULL; #endif iwl_mld_rm_vif(mld, vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c index d4a99ae64074..73d2166a4c25 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -75,6 +75,7 @@ void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, /* Setup async RX handling */ spin_lock_init(&mld->async_handlers_lock); + INIT_LIST_HEAD(&mld->async_handlers_list); wiphy_work_init(&mld->async_handlers_wk, iwl_mld_async_handlers_wk); @@ -414,9 +415,14 @@ iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, wiphy_unlock(mld->wiphy); rtnl_unlock(); iwl_fw_flush_dumps(&mld->fwrt); - goto free_hw; + goto err; } + /* We are about to stop the FW. Notifications may require an + * operational FW, so handle them all here before we stop. + */ + wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk); + iwl_mld_stop_fw(mld); wiphy_unlock(mld->wiphy); @@ -455,7 +461,8 @@ leds_exit: iwl_mld_leds_exit(mld); free_nvm: kfree(mld->nvm_data); -free_hw: +err: + iwl_trans_op_mode_leave(mld->trans); ieee80211_free_hw(mld->hw); return ERR_PTR(ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h index 5eceaaf7696d..a4a16da6ebf3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/mld.h +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h @@ -298,11 +298,6 @@ iwl_cleanup_mld(struct iwl_mld *mld) #endif iwl_mld_low_latency_restart_cleanup(mld); - - /* Empty the list of async notification handlers so we won't process - * notifications from the dead fw after the reconfig flow. - */ - iwl_mld_purge_async_handlers_list(mld); } enum iwl_power_scheme { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 93446c374008..00056e76ea3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2025 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -552,17 +552,16 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \ + _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ - .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \ + .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \ - _cfg, _name) + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_ANY, _cfg, _name) VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -589,6 +588,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma, iwl_ax411_killer_1690s_name), @@ -725,66 +726,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ @@ -792,132 +793,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ @@ -925,189 +926,189 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax231_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), #endif /* CONFIG_IWLMVM */ @@ -1116,13 +1117,13 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1134,119 +1135,104 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), /* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_gl_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_mtp_name), /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_sp_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_sp_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_sc2f, iwl_wh_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - 160, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_sp_name), /* Dr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_dr, iwl_dr_name), /* Br */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_br, iwl_br_name), #endif /* CONFIG_IWLMLD */ }; @@ -1398,7 +1384,7 @@ out: VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step) + u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; @@ -1441,15 +1427,8 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->rf_id != rf_id) continue; - /* - * Check that bw_limit have the same "boolean" value since - * IWL_SUBDEVICE_BW_LIM can only return a boolean value and - * dev_info->bw_limit encodes a non-boolean value. - * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to - * !bw_limit to have a match. - */ - if (dev_info->bw_limit != IWL_CFG_BW_ANY && - (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit) + if (dev_info->no_160 != (u8)IWL_CFG_ANY && + dev_info->no_160 != no_160) continue; if (dev_info->cores != (u8)IWL_CFG_ANY && @@ -1587,13 +1566,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), - IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), + IWL_SUBDEVICE_NO_160(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device), CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; - iwl_trans->bw_limit = dev_info->bw_limit; + iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; } #if IS_ENABLED(CONFIG_IWLMVM) @@ -1759,11 +1738,27 @@ static int _iwl_pci_resume(struct device *device, bool restore) * Scratch value was altered, this means the device was powered off, we * need to reset it completely. * Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan, - * so assume that any bits there mean that the device is usable. + * but not bits [15:8]. So if we have bits set in lower word, assume + * the device is alive. + * For older devices, just try silently to grab the NIC. */ - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ && - !iwl_read32(trans, CSR_FUNC_SCRATCH)) - device_was_powered_off = true; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) & + CSR_FUNC_SCRATCH_POWER_OFF_MASK)) + device_was_powered_off = true; + } else { + /* + * bh are re-enabled by iwl_trans_pcie_release_nic_access, + * so re-enable them if _iwl_trans_pcie_grab_nic_access fails. + */ + local_bh_disable(); + if (_iwl_trans_pcie_grab_nic_access(trans, true)) { + iwl_trans_pcie_release_nic_access(trans); + } else { + device_was_powered_off = true; + local_bh_enable(); + } + } if (restore || device_was_powered_off) { trans->state = IWL_TRANS_NO_FW; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 45460f93d24a..114a9195ad7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -558,10 +558,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, struct device *dev); -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -#define _iwl_trans_pcie_grab_nic_access(trans) \ +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent); +#define _iwl_trans_pcie_grab_nic_access(trans, silent) \ __cond_lock(nic_access_nobh, \ - likely(__iwl_trans_pcie_grab_nic_access(trans))) + likely(__iwl_trans_pcie_grab_nic_access(trans, silent))) void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev); void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev); @@ -1105,7 +1105,8 @@ void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, u32 *val); bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); /* transport gen 1 exported functions */ void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index c917ed4c19bc..102a6123bba0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2351,7 +2351,8 @@ void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) struct iwl_trans_pcie_removal *removal; char _msg = 0, *msg = &_msg; - if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY)) + if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY || + mode == IWL_RESET_MODE_BACKOFF)) return; if (test_bit(STATUS_TRANS_DEAD, &trans->status)) @@ -2405,7 +2406,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_reset); * This version doesn't disable BHs but rather assumes they're * already disabled. */ -bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) { int ret; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2457,6 +2458,11 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) if (unlikely(ret < 0)) { u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); + if (silent) { + spin_unlock(&trans_pcie->reg_lock); + return false; + } + WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", cntrl); @@ -2488,7 +2494,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) bool ret; local_bh_disable(); - ret = __iwl_trans_pcie_grab_nic_access(trans); + ret = __iwl_trans_pcie_grab_nic_access(trans, false); if (ret) { /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ return ret; @@ -2497,7 +2503,8 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) return false; } -void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +void __releases(nic_access_nobh) +iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2524,6 +2531,7 @@ void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) * scheduled on different CPUs (after we drop reg_lock). */ out: + __release(nic_access_nobh); spin_unlock_bh(&trans_pcie->reg_lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index bb90bcfc6763..9fc4e98693eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1021,7 +1021,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set (see above.) */ - if (!_iwl_trans_pcie_grab_nic_access(trans)) + if (!_iwl_trans_pcie_grab_nic_access(trans, false)) return -EIO; /* diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index 7ef5e89c6af2..d0bda23c628a 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -2,7 +2,7 @@ /* * KUnit tests for the iwlwifi device info table * - * Copyright (C) 2023-2025 Intel Corporation + * Copyright (C) 2023-2024 Intel Corporation */ #include <kunit/test.h> #include <linux/pci.h> @@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) { - printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n", + printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", pfx, di->device, di->subdevice, di->mac_type, di->mac_step, - di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit, + di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, di->cores); } @@ -31,13 +31,8 @@ static void devinfo_table_order(struct kunit *test) di->mac_type, di->mac_step, di->rf_type, di->cdb, di->jacket, di->rf_id, - di->bw_limit != IWL_CFG_BW_NO_LIM, - di->cores, di->rf_step); - if (!ret) { - iwl_pci_print_dev_info("No entry found for: ", di); - KUNIT_FAIL(test, - "No entry found for entry at index %d\n", idx); - } else if (ret != di) { + di->no_160, di->cores, di->rf_step); + if (ret != di) { iwl_pci_print_dev_info("searched: ", di); iwl_pci_print_dev_info("found: ", ret); KUNIT_FAIL(test, diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c index eae93efa6150..82d1bf7edba2 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.c +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -102,7 +102,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw) void plfxlc_mac_release(struct plfxlc_mac *mac) { plfxlc_chip_release(&mac->chip); - lockdep_assert_held(&mac->lock); } int plfxlc_op_start(struct ieee80211_hw *hw) diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index d47dfa80fb95..4d64b6935bb9 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -102,6 +102,7 @@ config NVME_TCP_TLS depends on NVME_TCP select NET_HANDSHAKE select KEYS + select TLS help Enables TLS encryption for NVMe TCP using the netlink handshake API. diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index eb6ea8acb3cc..ac53629fce68 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4493,7 +4493,8 @@ static void nvme_fw_act_work(struct work_struct *work) msleep(100); } - if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || + !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_unquiesce_io_queues(ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b178d52eac1b..2e30e9be7408 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3575,7 +3575,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); - if (!nvme_try_sched_reset(&dev->ctrl)) + if (nvme_try_sched_reset(&dev->ctrl)) nvme_unquiesce_io_queues(&dev->ctrl); return PCI_ERS_RESULT_RECOVERED; } @@ -3623,6 +3623,9 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, + { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_BOGUS_NID, }, @@ -3646,6 +3649,9 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ .driver_data = NVME_QUIRK_BROKEN_MSI }, + { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ + .driver_data = NVME_QUIRK_BROKEN_MSI | + NVME_QUIRK_NO_DEEPEST_PS }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 72d260201d8c..aba365f97cf6 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1946,7 +1946,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) cancel_work_sync(&queue->io_work); } -static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1965,6 +1965,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) mutex_unlock(&queue->queue_lock); } +static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int timeout = 100; + + while (timeout > 0) { + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || + !sk_wmem_alloc_get(queue->sock->sk)) + return; + msleep(2); + timeout -= 2; + } + dev_warn(nctrl->device, + "qid %d: timeout draining sock wmem allocation expired\n", + qid); +} + +static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +{ + nvme_tcp_stop_queue_nowait(nctrl, qid); + nvme_tcp_wait_queue(nctrl, qid); +} + + static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) { write_lock_bh(&queue->sock->sk->sk_callback_lock); @@ -2032,7 +2057,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) int i; for (i = 1; i < ctrl->queue_count; i++) - nvme_tcp_stop_queue(ctrl, i); + nvme_tcp_stop_queue_nowait(ctrl, i); + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_wait_queue(ctrl, i); } static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index fb7446d6d682..4c253b433bf7 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -98,6 +98,7 @@ config NVME_TARGET_TCP_TLS bool "NVMe over Fabrics TCP target TLS encryption support" depends on NVME_TARGET_TCP select NET_HANDSHAKE + select TLS help Enables TLS encryption for the NVMe TCP target using the netlink handshake API. diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index cef8d77f477b..9429b8218408 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -600,13 +600,12 @@ void nvmet_auth_insert_psk(struct nvmet_sq *sq) pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); tls_key = NULL; - kfree_sensitive(tls_psk); } if (sq->ctrl->tls_key) key_put(sq->ctrl->tls_key); sq->ctrl->tls_key = tls_key; #endif - + kfree_sensitive(tls_psk); out_free_digest: kfree_sensitive(digest); out_free_psk: diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index f2d0c920269b..12a5cb8641ca 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1560,6 +1560,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; + if (!queue->state_change) + return; + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 055518ee354d..e9e9aaa91770 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -59,7 +59,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { - pci_dev_put(pdev); rc = -EBUSY; goto out; } diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 842a1e6cbfc4..18de31328540 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -37,16 +37,16 @@ static inline const struct group_desc *imx_pinctrl_find_group_by_name( struct pinctrl_dev *pctldev, const char *name) { - const struct group_desc *grp = NULL; + const struct group_desc *grp; int i; for (i = 0; i < pctldev->num_groups; i++) { grp = pinctrl_generic_get_group(pctldev, i); if (grp && !strcmp(grp->grp.name, name)) - break; + return grp; } - return grp; + return NULL; } static void imx_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index ced4ee509b5b..b4eb2beab691 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -449,7 +449,7 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num, return -EOPNOTSUPP; virq = irq_find_mapping(eint->domain, eint_num); - eint_offset = (eint_num % 4) * 8; + eint_offset = (idx % 4) * 8; d = irq_get_irq_data(virq); set_offset = (idx / 4) * 4 + eint->regs->dbnc_set; diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c index 547a798b71c8..5d84a778683d 100644 --- a/drivers/pinctrl/mediatek/pinctrl-airoha.c +++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c @@ -6,6 +6,7 @@ */ #include <dt-bindings/pinctrl/mt65xx.h> +#include <linux/bitfield.h> #include <linux/bits.h> #include <linux/cleanup.h> #include <linux/gpio/driver.h> @@ -112,39 +113,19 @@ #define REG_LAN_LED1_MAPPING 0x0280 #define LAN4_LED_MAPPING_MASK GENMASK(18, 16) -#define LAN4_PHY4_LED_MAP BIT(18) -#define LAN4_PHY2_LED_MAP BIT(17) -#define LAN4_PHY1_LED_MAP BIT(16) -#define LAN4_PHY0_LED_MAP 0 -#define LAN4_PHY3_LED_MAP GENMASK(17, 16) +#define LAN4_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN4_LED_MAPPING_MASK, (_n)) #define LAN3_LED_MAPPING_MASK GENMASK(14, 12) -#define LAN3_PHY4_LED_MAP BIT(14) -#define LAN3_PHY2_LED_MAP BIT(13) -#define LAN3_PHY1_LED_MAP BIT(12) -#define LAN3_PHY0_LED_MAP 0 -#define LAN3_PHY3_LED_MAP GENMASK(13, 12) +#define LAN3_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN3_LED_MAPPING_MASK, (_n)) #define LAN2_LED_MAPPING_MASK GENMASK(10, 8) -#define LAN2_PHY4_LED_MAP BIT(12) -#define LAN2_PHY2_LED_MAP BIT(11) -#define LAN2_PHY1_LED_MAP BIT(10) -#define LAN2_PHY0_LED_MAP 0 -#define LAN2_PHY3_LED_MAP GENMASK(11, 10) +#define LAN2_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN2_LED_MAPPING_MASK, (_n)) #define LAN1_LED_MAPPING_MASK GENMASK(6, 4) -#define LAN1_PHY4_LED_MAP BIT(6) -#define LAN1_PHY2_LED_MAP BIT(5) -#define LAN1_PHY1_LED_MAP BIT(4) -#define LAN1_PHY0_LED_MAP 0 -#define LAN1_PHY3_LED_MAP GENMASK(5, 4) +#define LAN1_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN1_LED_MAPPING_MASK, (_n)) #define LAN0_LED_MAPPING_MASK GENMASK(2, 0) -#define LAN0_PHY4_LED_MAP BIT(3) -#define LAN0_PHY2_LED_MAP BIT(2) -#define LAN0_PHY1_LED_MAP BIT(1) -#define LAN0_PHY0_LED_MAP 0 -#define LAN0_PHY3_LED_MAP GENMASK(2, 1) +#define LAN0_PHY_LED_MAP(_n) FIELD_PREP_CONST(LAN0_LED_MAPPING_MASK, (_n)) /* CONF */ #define REG_I2C_SDA_E2 0x001c @@ -1476,8 +1457,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY1_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1491,8 +1472,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY1_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1506,8 +1487,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY1_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1521,8 +1502,8 @@ static const struct airoha_pinctrl_func_group phy1_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY1_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(0) }, .regmap_size = 2, }, @@ -1540,8 +1521,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY2_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1555,8 +1536,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY2_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1570,8 +1551,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY2_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1585,8 +1566,8 @@ static const struct airoha_pinctrl_func_group phy2_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY2_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(1) }, .regmap_size = 2, }, @@ -1604,8 +1585,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY3_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1619,8 +1600,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY3_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1634,8 +1615,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY3_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1649,8 +1630,8 @@ static const struct airoha_pinctrl_func_group phy3_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY3_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(2) }, .regmap_size = 2, }, @@ -1668,8 +1649,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY4_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1683,8 +1664,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY4_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1698,8 +1679,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY4_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1713,8 +1694,8 @@ static const struct airoha_pinctrl_func_group phy4_led0_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED0_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY4_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(3) }, .regmap_size = 2, }, @@ -1732,8 +1713,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY1_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1747,8 +1728,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY1_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1762,8 +1743,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY1_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(0) }, .regmap_size = 2, }, { @@ -1777,8 +1758,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY1_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(0) }, .regmap_size = 2, }, @@ -1796,8 +1777,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY2_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1811,8 +1792,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY2_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1826,8 +1807,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY2_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(1) }, .regmap_size = 2, }, { @@ -1841,8 +1822,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY2_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(1) }, .regmap_size = 2, }, @@ -1860,8 +1841,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY3_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1875,8 +1856,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY3_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1890,8 +1871,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY3_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(2) }, .regmap_size = 2, }, { @@ -1905,8 +1886,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY3_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(2) }, .regmap_size = 2, }, @@ -1924,8 +1905,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN1_LED_MAPPING_MASK, - LAN1_PHY4_LED_MAP + LAN0_LED_MAPPING_MASK, + LAN0_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1939,8 +1920,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN2_LED_MAPPING_MASK, - LAN2_PHY4_LED_MAP + LAN1_LED_MAPPING_MASK, + LAN1_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1954,8 +1935,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN3_LED_MAPPING_MASK, - LAN3_PHY4_LED_MAP + LAN2_LED_MAPPING_MASK, + LAN2_PHY_LED_MAP(3) }, .regmap_size = 2, }, { @@ -1969,8 +1950,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = { .regmap[1] = { AIROHA_FUNC_MUX, REG_LAN_LED1_MAPPING, - LAN4_LED_MAPPING_MASK, - LAN4_PHY4_LED_MAP + LAN3_LED_MAPPING_MASK, + LAN3_PHY_LED_MAP(3) }, .regmap_size = 2, }, diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 91edb539925a..8596f3541265 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1015,9 +1015,15 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev) if (!pctl->eint) return -ENOMEM; - pctl->eint->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(pctl->eint->base)) - return PTR_ERR(pctl->eint->base); + pctl->eint->nbase = 1; + /* mtk-eint expects an array */ + pctl->eint->base = devm_kzalloc(pctl->dev, sizeof(pctl->eint->base), GFP_KERNEL); + if (!pctl->eint->base) + return -ENOMEM; + + pctl->eint->base[0] = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pctl->eint->base[0])) + return PTR_ERR(pctl->eint->base[0]); pctl->eint->irq = irq_of_parse_and_map(np, 0); if (!pctl->eint->irq) diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 253a0cc57e39..e5a32a0532ee 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: if (meson_pinconf_get_pull(pc, pin) == param) - arg = 1; + arg = 60000; else return -EINVAL; break; diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c index 1af11cd95fb0..b94fb4ee0ec3 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8750.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c @@ -46,7 +46,9 @@ .out_bit = 1, \ .intr_enable_bit = 0, \ .intr_status_bit = 0, \ - .intr_target_bit = 5, \ + .intr_wakeup_present_bit = 6, \ + .intr_wakeup_enable_bit = 7, \ + .intr_target_bit = 8, \ .intr_target_kpss_val = 3, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ diff --git a/drivers/platform/x86/amd/pmc/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c index d789d6cab794..0329fafe14eb 100644 --- a/drivers/platform/x86/amd/pmc/pmc.c +++ b/drivers/platform/x86/amd/pmc/pmc.c @@ -644,10 +644,9 @@ static void amd_pmc_s2idle_check(void) struct smu_metrics table; int rc; - /* CZN: Ensure that future s0i3 entry attempts at least 10ms passed */ - if (pdev->cpu_id == AMD_CPU_ID_CZN && !get_metrics_table(pdev, &table) && - table.s0i3_last_entry_status) - usleep_range(10000, 20000); + /* Avoid triggering OVP */ + if (!get_metrics_table(pdev, &table) && table.s0i3_last_entry_status) + msleep(2500); /* Dump the IdleMask before we add to the STB */ amd_pmc_idlemask_read(pdev, pdev->dev, NULL); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 38ef778e8c19..0c697b46f436 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -304,6 +304,7 @@ struct asus_wmi { u32 kbd_rgb_dev; bool kbd_rgb_state_available; + bool oobe_state_available; u8 throttle_thermal_policy_mode; u32 throttle_thermal_policy_dev; @@ -1826,7 +1827,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) goto error; } - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE)) { + if (asus->oobe_state_available) { /* * Disable OOBE state, so that e.g. the keyboard backlight * works. @@ -4723,6 +4724,7 @@ static int asus_wmi_add(struct platform_device *pdev) asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); + asus->oobe_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_OOBE); asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) && dmi_check_system(asus_ally_mcu_quirk); @@ -4970,6 +4972,13 @@ static int asus_hotk_restore(struct device *device) } if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) kbd_led_update(asus); + if (asus->oobe_state_available) { + /* + * Disable OOBE state, so that e.g. the keyboard backlight + * works. + */ + asus_wmi_set_devstate(ASUS_WMI_DEVID_OOBE, 1, NULL); + } if (asus_wmi_has_fnlock_key(asus)) asus_wmi_fnlock_update(asus); diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index 0c3be03385f8..08b82c151e07 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -70,6 +70,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { .driver_data = &generic_quirks, }, { + .ident = "Alienware m15 R7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R7"), + }, + .driver_data = &generic_quirks, + }, + { .ident = "Alienware m16 R1", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), @@ -655,12 +663,10 @@ static int thermal_profile_probe(void *drvdata, unsigned long *choices) for (u32 i = 0; i < sys_desc[3]; i++) { ret = wmax_thermal_information(priv->wdev, WMAX_OPERATION_LIST_IDS, i + first_mode, &out_data); - - if (ret == -EIO) - return ret; - if (ret == -EBADRQC) break; + if (ret) + return ret; if (!is_wmax_thermal_code(out_data)) continue; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 17a09b7784ed..ede483573fe0 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1294,6 +1294,16 @@ static const struct key_entry ideapad_keymap[] = { /* Specific to some newer models */ { KE_KEY, 0x3e | IDEAPAD_WMI_KEY, { KEY_MICMUTE } }, { KE_KEY, 0x3f | IDEAPAD_WMI_KEY, { KEY_RFKILL } }, + /* Star- (User Assignable Key) */ + { KE_KEY, 0x44 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, + /* Eye */ + { KE_KEY, 0x45 | IDEAPAD_WMI_KEY, { KEY_PROG3 } }, + /* Performance toggle also Fn+Q, handled inside ideapad_wmi_notify() */ + { KE_KEY, 0x3d | IDEAPAD_WMI_KEY, { KEY_PROG4 } }, + /* shift + prtsc */ + { KE_KEY, 0x2d | IDEAPAD_WMI_KEY, { KEY_CUT } }, + { KE_KEY, 0x29 | IDEAPAD_WMI_KEY, { KEY_TOUCHPAD_TOGGLE } }, + { KE_KEY, 0x2a | IDEAPAD_WMI_KEY, { KEY_ROOT_MENU } }, { KE_END }, }; @@ -2080,6 +2090,12 @@ static void ideapad_wmi_notify(struct wmi_device *wdev, union acpi_object *data) dev_dbg(&wdev->dev, "WMI fn-key event: 0x%llx\n", data->integer.value); + /* performance button triggered by 0x3d */ + if (data->integer.value == 0x3d && priv->dytc) { + platform_profile_cycle(); + break; + } + /* 0x02 FnLock, 0x03 Esc */ if (data->integer.value == 0x02 || data->integer.value == 0x03) ideapad_fn_lock_led_notify(priv, data->integer.value == 0x02); diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 88a1a9ff2f34..0b5e43444ed6 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -44,16 +44,17 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { - {"INT33D5", 0}, - {"INTC1051", 0}, - {"INTC1054", 0}, - {"INTC1070", 0}, - {"INTC1076", 0}, - {"INTC1077", 0}, - {"INTC1078", 0}, - {"INTC107B", 0}, - {"INTC10CB", 0}, - {"", 0}, + { "INT33D5" }, + { "INTC1051" }, + { "INTC1054" }, + { "INTC1070" }, + { "INTC1076" }, + { "INTC1077" }, + { "INTC1078" }, + { "INTC107B" }, + { "INTC10CB" }, + { "INTC10CC" }, + { } }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 40bbf8e45fa4..bdee5d00f30b 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -146,15 +146,13 @@ static int uncore_event_cpu_online(unsigned int cpu) { struct uncore_data *data; int target; + int ret; /* Check if there is an online cpu in the package for uncore MSR */ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); if (target < nr_cpu_ids) return 0; - /* Use this CPU on this die as a control CPU */ - cpumask_set_cpu(cpu, &uncore_cpu_mask); - data = uncore_get_instance(cpu); if (!data) return 0; @@ -163,7 +161,14 @@ static int uncore_event_cpu_online(unsigned int cpu) data->die_id = topology_die_id(cpu); data->domain_id = UNCORE_DOMAIN_ID_INVALID; - return uncore_freq_add_entry(data, cpu); + ret = uncore_freq_add_entry(data, cpu); + if (ret) + return ret; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + return 0; } static int uncore_event_cpu_offline(unsigned int cpu) diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index faf6e027f89a..2ccdca4f6960 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2578,12 +2578,60 @@ static const struct ocp_sma_op ocp_fb_sma_op = { .set_output = ptp_ocp_sma_fb_set_output, }; +static int +ptp_ocp_sma_adva_set_output(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map1->gpio2 : &bp->sma_map2->gpio2; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + +static int +ptp_ocp_sma_adva_set_inputs(struct ptp_ocp *bp, int sma_nr, u32 val) +{ + u32 reg, mask, shift; + unsigned long flags; + u32 __iomem *gpio; + + gpio = sma_nr > 2 ? &bp->sma_map2->gpio1 : &bp->sma_map1->gpio1; + shift = sma_nr & 1 ? 0 : 16; + + mask = 0xffff << (16 - shift); + + spin_lock_irqsave(&bp->lock, flags); + + reg = ioread32(gpio); + reg = (reg & mask) | (val << shift); + + iowrite32(reg, gpio); + + spin_unlock_irqrestore(&bp->lock, flags); + + return 0; +} + static const struct ocp_sma_op ocp_adva_sma_op = { .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out }, .init = ptp_ocp_sma_fb_init, .get = ptp_ocp_sma_fb_get, - .set_inputs = ptp_ocp_sma_fb_set_inputs, - .set_output = ptp_ocp_sma_fb_set_output, + .set_inputs = ptp_ocp_sma_adva_set_inputs, + .set_output = ptp_ocp_sma_adva_set_output, }; static int diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 4bfe469c04aa..8c1c908d2c6e 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -5,7 +5,7 @@ comment "S/390 block device drivers" config DCSSBLK def_tristate m prompt "DCSSBLK support" - depends on S390 && BLOCK + depends on S390 && BLOCK && (DAX || DAX=n) help Support for dcss block device @@ -14,7 +14,6 @@ config DCSSBLK_DAX depends on DCSSBLK # requires S390 ZONE_DEVICE support depends on BROKEN - select DAX prompt "DCSSBLK DAX support" help Enable DAX operation for the dcss block device diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index dc4bd422b601..486db5b2f05d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -891,7 +891,7 @@ static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn) status = mmio_init_fn(pdev, base, &mbox); if (status != MYRB_STATUS_SUCCESS) { dev_err(&pdev->dev, - "Failed to enable mailbox, statux %02X\n", + "Failed to enable mailbox, status %02X\n", status); return false; } diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c index 5ea6399e6c9b..10a602d4843a 100644 --- a/drivers/soundwire/intel_auxdevice.c +++ b/drivers/soundwire/intel_auxdevice.c @@ -353,9 +353,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev, /* use generic bandwidth allocation algorithm */ sdw->cdns.bus.compute_params = sdw_compute_params; - /* avoid resuming from pm_runtime suspend if it's not required */ - dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); - ret = sdw_bus_master_add(bus, dev, dev->fwnode); if (ret) { dev_err(dev, "sdw_bus_master_add fail: %d\n", ret); @@ -640,7 +637,10 @@ static int __maybe_unused intel_suspend(struct device *dev) return 0; } - if (pm_runtime_suspended(dev)) { + /* Prevent runtime PM from racing with the code below. */ + pm_runtime_disable(dev); + + if (pm_runtime_status_suspended(dev)) { dev_dbg(dev, "pm_runtime status: suspended\n"); clock_stop_quirks = sdw->link_res->clock_stop_quirks; @@ -648,7 +648,7 @@ static int __maybe_unused intel_suspend(struct device *dev) if ((clock_stop_quirks & SDW_INTEL_CLK_STOP_BUS_RESET) || !clock_stop_quirks) { - if (pm_runtime_suspended(dev->parent)) { + if (pm_runtime_status_suspended(dev->parent)) { /* * paranoia check: this should not happen with the .prepare * resume to full power @@ -715,7 +715,6 @@ static int __maybe_unused intel_resume(struct device *dev) struct sdw_cdns *cdns = dev_get_drvdata(dev); struct sdw_intel *sdw = cdns_to_intel(cdns); struct sdw_bus *bus = &cdns->bus; - int link_flags; int ret; if (bus->prop.hw_disabled || !sdw->startup_done) { @@ -724,23 +723,6 @@ static int __maybe_unused intel_resume(struct device *dev) return 0; } - if (pm_runtime_suspended(dev)) { - dev_dbg(dev, "pm_runtime status was suspended, forcing active\n"); - - /* follow required sequence from runtime_pm.rst */ - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_mark_last_busy(dev); - pm_runtime_enable(dev); - - pm_runtime_resume(bus->dev); - - link_flags = md_flags >> (bus->link_id * 8); - - if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) - pm_runtime_idle(dev); - } - ret = sdw_intel_link_power_up(sdw); if (ret) { dev_err(dev, "%s failed: %d\n", __func__, ret); @@ -761,6 +743,14 @@ static int __maybe_unused intel_resume(struct device *dev) } /* + * Runtime PM has been disabled in intel_suspend(), so set the status + * to active because the device has just been resumed and re-enable + * runtime PM. + */ + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + /* * after system resume, the pm_runtime suspend() may kick in * during the enumeration, before any children device force the * master device to remain active. Using pm_runtime_get() diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index a31a1db07aa4..5db0639d3b01 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -596,7 +596,11 @@ u64 spi_mem_calc_op_duration(struct spi_mem_op *op) ns_per_cycles = 1000000000 / op->max_freq; ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); - ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + + /* Dummy bytes are optional for some SPI flash memory operations */ + if (op->dummy.nbytes) + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); return ncycles * ns_per_cycles; diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index 17eb67e19132..94948c8781e8 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -142,7 +142,7 @@ static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, else if (reg == NAND_READ_LOCATION_1) snandc->regs->read_location1 = locreg_val; else if (reg == NAND_READ_LOCATION_2) - snandc->regs->read_location1 = locreg_val; + snandc->regs->read_location2 = locreg_val; else if (reg == NAND_READ_LOCATION_3) snandc->regs->read_location3 = locreg_val; } @@ -1307,8 +1307,7 @@ static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); snandc->qspi->cmd = cpu_to_le32(cmd); - qcom_spi_block_erase(snandc); - return 0; + return qcom_spi_block_erase(snandc); default: break; } diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c index 668022098b1e..9ec9823409cc 100644 --- a/drivers/spi/spi-stm32-ospi.c +++ b/drivers/spi/spi-stm32-ospi.c @@ -960,6 +960,10 @@ err_pm_resume: err_pm_enable: pm_runtime_force_suspend(ospi->dev); mutex_destroy(&ospi->lock); + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); return ret; } diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 3822d7c8d8ed..2a8bb798e95b 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) u32 inactive_cycles; u8 cs_state; - if (setup->unit != SPI_DELAY_UNIT_SCK || - hold->unit != SPI_DELAY_UNIT_SCK || - inactive->unit != SPI_DELAY_UNIT_SCK) { + if ((setup->unit && setup->unit != SPI_DELAY_UNIT_SCK) || + (hold->unit && hold->unit != SPI_DELAY_UNIT_SCK) || + (inactive->unit && inactive->unit != SPI_DELAY_UNIT_SCK)) { dev_err(&spi->dev, "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", SPI_DELAY_UNIT_SCK); diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c index 7540c20090c7..351f983ef914 100644 --- a/drivers/staging/axis-fifo/axis-fifo.c +++ b/drivers/staging/axis-fifo/axis-fifo.c @@ -393,16 +393,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); if (!bytes_available) { - dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet of length 0\n"); ret = -EIO; goto end_unlock; } if (bytes_available > len) { - dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", + dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", bytes_available, len); - reset_ip_core(fifo); ret = -EINVAL; goto end_unlock; } @@ -411,8 +409,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, /* this probably can't happen unless IP * registers were previously mishandled */ - dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); ret = -EIO; goto end_unlock; } @@ -433,7 +430,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -542,7 +538,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -775,9 +770,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo) goto end; } - /* IP sets TDFV to fifo depth - 4 so we will do the same */ - fifo->tx_fifo_depth -= 4; - ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); if (ret) { dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 6c14d7bcdd67..081b17f49863 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); - if (strcmp(buf, "full")) { + if (strcmp(buf, "full") == 0) { gpiod_set_value(chip->rdwr_pin, 1); chip->mode = AD7816_FULL; } else { diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c index b839b50ac26a..fa7ea4ca4c36 100644 --- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c +++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c @@ -1900,6 +1900,7 @@ static int bcm2835_mmal_probe(struct vchiq_device *device) __func__, ret); goto free_dev; } + dev->v4l2_dev.dev = &device->dev; /* setup v4l controls */ ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler); diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index cd15e84c47f4..1db2e951b53f 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -151,6 +151,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req) struct tb_ctl *ctl = req->ctl; mutex_lock(&ctl->request_queue_lock); + if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) { + mutex_unlock(&ctl->request_queue_lock); + return; + } + list_del(&req->list); clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 144d0232a70c..a3a7c8059eee 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -217,7 +217,7 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); if (!ret) { /* Notify userspace about the change */ - kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); + tb_domain_event(tb, NULL); } mutex_unlock(&tb->lock); diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 7859bccc592d..f213d9174dc5 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -22,6 +22,7 @@ #include "ctl.h" #include "nhi_regs.h" #include "tb.h" +#include "tunnel.h" #define PCIE2CIO_CMD 0x30 #define PCIE2CIO_CMD_TIMEOUT BIT(31) @@ -379,6 +380,27 @@ static bool icm_firmware_running(const struct tb_nhi *nhi) return !!(val & REG_FW_STS_ICM_EN); } +static void icm_xdomain_activated(struct tb_xdomain *xd, bool activated) +{ + struct tb_port *nhi_port, *dst_port; + struct tb *tb = xd->tb; + + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); + dst_port = tb_xdomain_downstream_port(xd); + + if (activated) + tb_tunnel_event(tb, TB_TUNNEL_ACTIVATED, TB_TUNNEL_DMA, + nhi_port, dst_port); + else + tb_tunnel_event(tb, TB_TUNNEL_DEACTIVATED, TB_TUNNEL_DMA, + nhi_port, dst_port); +} + +static void icm_dp_event(struct tb *tb) +{ + tb_tunnel_event(tb, TB_TUNNEL_CHANGED, TB_TUNNEL_DP, NULL, NULL); +} + static bool icm_fr_is_supported(struct tb *tb) { return !x86_apple_machine; @@ -584,6 +606,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, if (reply.hdr.flags & ICM_FLAGS_ERROR) return -EIO; + icm_xdomain_activated(xd, true); return 0; } @@ -603,6 +626,8 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, nhi_mailbox_cmd(tb->nhi, cmd, 1); usleep_range(10, 50); nhi_mailbox_cmd(tb->nhi, cmd, 2); + + icm_xdomain_activated(xd, false); return 0; } @@ -1151,6 +1176,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, if (reply.hdr.flags & ICM_FLAGS_ERROR) return -EIO; + icm_xdomain_activated(xd, true); return 0; } @@ -1191,7 +1217,12 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, return ret; usleep_range(10, 50); - return icm_tr_xdomain_tear_down(tb, xd, 2); + ret = icm_tr_xdomain_tear_down(tb, xd, 2); + if (ret) + return ret; + + icm_xdomain_activated(xd, false); + return 0; } static void @@ -1718,6 +1749,9 @@ static void icm_handle_notification(struct work_struct *work) if (tb_is_xdomain_enabled()) icm->xdomain_disconnected(tb, n->pkg); break; + case ICM_EVENT_DP_CONFIG_CHANGED: + icm_dp_event(tb); + break; case ICM_EVENT_RTD3_VETO: icm->rtd3_veto(tb, n->pkg); break; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 6a2116cbb06f..28febb95f8fa 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -3599,6 +3599,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime) flags |= TB_WAKE_ON_USB4; flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; } else if (device_may_wakeup(&sw->dev)) { + flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; } diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 8c527af98927..c14ab1fbeeaf 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -952,6 +952,15 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", available_up, available_down); + /* + * If the available bandwidth is less than 1.5 Gb/s notify + * userspace that the connected isochronous device may not work + * properly. + */ + if (available_up < 1500 || available_down < 1500) + tb_tunnel_event(tb, TB_TUNNEL_LOW_BANDWIDTH, TB_TUNNEL_USB3, + down, up); + tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, available_down); if (!tunnel) { @@ -2000,8 +2009,10 @@ static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in, ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down, true); - if (ret) + if (ret) { + tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out); goto err_reclaim_usb; + } tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", available_up, available_down); @@ -2622,8 +2633,12 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, } } - return tb_tunnel_alloc_bandwidth(tunnel, requested_up, - requested_down); + ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, + requested_down); + if (ret) + goto fail; + + return 0; } /* @@ -2699,6 +2714,7 @@ fail: "failing the request by rewriting allocated %d/%d Mb/s\n", allocated_up, allocated_down); tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down); + tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out); } return ret; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index b54147a1ba87..87afd5a7c504 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -804,6 +804,19 @@ static inline void tb_domain_put(struct tb *tb) put_device(&tb->dev); } +/** + * tb_domain_event() - Notify userspace about an event in domain + * @tb: Domain where event occurred + * @envp: Array of uevent environment strings (can be %NULL) + * + * This function provides a way to notify userspace about any events + * that take place in the domain. + */ +static inline void tb_domain_event(struct tb *tb, char *envp[]) +{ + kobject_uevent_env(&tb->dev.kobj, KOBJ_CHANGE, envp); +} + struct tb_nvm *tb_nvm_alloc(struct device *dev); int tb_nvm_read_version(struct tb_nvm *nvm); int tb_nvm_validate(struct tb_nvm *nvm); @@ -1468,6 +1481,7 @@ static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) struct usb4_port *usb4_port_device_add(struct tb_port *port); void usb4_port_device_remove(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4); +int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port); static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4) { diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index a1670a96cbdc..144f7332d5d2 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -118,6 +118,7 @@ enum icm_event_code { ICM_EVENT_DEVICE_DISCONNECTED = 0x4, ICM_EVENT_XDOMAIN_CONNECTED = 0x6, ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7, + ICM_EVENT_DP_CONFIG_CHANGED = 0x8, ICM_EVENT_RTD3_VETO = 0xa, }; diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 76254ed3f47f..d52efe3f658c 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -100,6 +100,14 @@ MODULE_PARM_DESC(bw_alloc_mode, static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; +static const char * const tb_event_names[] = { + [TB_TUNNEL_ACTIVATED] = "activated", + [TB_TUNNEL_CHANGED] = "changed", + [TB_TUNNEL_DEACTIVATED] = "deactivated", + [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth", + [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth", +}; + /* Synchronizes kref_get()/put() of struct tb_tunnel */ static DEFINE_MUTEX(tb_tunnel_lock); @@ -220,6 +228,72 @@ void tb_tunnel_put(struct tb_tunnel *tunnel) mutex_unlock(&tb_tunnel_lock); } +/** + * tb_tunnel_event() - Notify userspace about tunneling event + * @tb: Domain where the event occurred + * @event: Event that happened + * @type: Type of the tunnel in question + * @src_port: Tunnel source port (can be %NULL) + * @dst_port: Tunnel destination port (can be %NULL) + * + * Notifies userspace about tunneling @event in the domain. The tunnel + * does not need to exist (e.g the tunnel was not activated because + * there is not enough bandwidth). If the @src_port and @dst_port are + * given fill in full %TUNNEL_DETAILS environment variable. Otherwise + * uses the shorter one (just the tunnel type). + */ +void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event, + enum tb_tunnel_type type, + const struct tb_port *src_port, + const struct tb_port *dst_port) +{ + char *envp[3] = { NULL }; + + if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names))) + return; + if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names))) + return; + + envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]); + if (!envp[0]) + return; + + if (src_port != NULL && dst_port != NULL) { + envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)", + tb_route(src_port->sw), src_port->port, + tb_route(dst_port->sw), dst_port->port, + tb_tunnel_names[type]); + } else { + envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)", + tb_tunnel_names[type]); + } + + if (envp[1]) + tb_domain_event(tb, envp); + + kfree(envp[1]); + kfree(envp[0]); +} + +static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active) +{ + if (active) { + tunnel->state = TB_TUNNEL_ACTIVE; + tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type, + tunnel->src_port, tunnel->dst_port); + } else { + tunnel->state = TB_TUNNEL_INACTIVE; + tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type, + tunnel->src_port, tunnel->dst_port); + } +} + +static inline void tb_tunnel_changed(struct tb_tunnel *tunnel) +{ + tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type, + tunnel->src_port, tunnel->dst_port); +} + static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) { struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); @@ -992,7 +1066,7 @@ static void tb_dp_dprx_work(struct work_struct *work) return; } } else { - tunnel->state = TB_TUNNEL_ACTIVE; + tb_tunnel_set_active(tunnel, true); } mutex_unlock(&tb->lock); } @@ -2326,7 +2400,7 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel) } } - tunnel->state = TB_TUNNEL_ACTIVE; + tb_tunnel_set_active(tunnel, true); return 0; err: @@ -2356,7 +2430,7 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) if (tunnel->post_deactivate) tunnel->post_deactivate(tunnel); - tunnel->state = TB_TUNNEL_INACTIVE; + tb_tunnel_set_active(tunnel, false); } /** @@ -2449,8 +2523,16 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, if (!tb_tunnel_is_active(tunnel)) return -ENOTCONN; - if (tunnel->alloc_bandwidth) - return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); + if (tunnel->alloc_bandwidth) { + int ret; + + ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); + if (ret) + return ret; + + tb_tunnel_changed(tunnel); + return 0; + } return -EOPNOTSUPP; } diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index 8a0a0cb21a89..5e9fb73d5220 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -194,6 +194,29 @@ static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel tunnel->dst_port); } +/** + * enum tb_tunnel_event - Tunnel related events + * @TB_TUNNEL_ACTIVATED: A tunnel was activated + * @TB_TUNNEL_CHANGED: There is a tunneling change in the domain. Includes + * full %TUNNEL_DETAILS if the tunnel in question is known + * (ICM does not provide that information). + * @TB_TUNNEL_DEACTIVATED: A tunnel was torn down + * @TB_TUNNEL_LOW_BANDWIDTH: Tunnel bandwidth is not optimal + * @TB_TUNNEL_NO_BANDWIDTH: There is not enough bandwidth for a tunnel + */ +enum tb_tunnel_event { + TB_TUNNEL_ACTIVATED, + TB_TUNNEL_CHANGED, + TB_TUNNEL_DEACTIVATED, + TB_TUNNEL_LOW_BANDWIDTH, + TB_TUNNEL_NO_BANDWIDTH, +}; + +void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event, + enum tb_tunnel_type type, + const struct tb_port *src_port, + const struct tb_port *dst_port); + const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index e51d01671d8e..fce3c0f2354a 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -440,10 +440,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) bool configured = val & PORT_CS_19_PC; usb4 = port->usb4; - if (((flags & TB_WAKE_ON_CONNECT) | + if (((flags & TB_WAKE_ON_CONNECT) && device_may_wakeup(&usb4->dev)) && !configured) val |= PORT_CS_19_WOC; - if (((flags & TB_WAKE_ON_DISCONNECT) | + if (((flags & TB_WAKE_ON_DISCONNECT) && device_may_wakeup(&usb4->dev)) && configured) val |= PORT_CS_19_WOD; if ((flags & TB_WAKE_ON_USB4) && configured) @@ -935,7 +935,15 @@ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) return status ? -EIO : 0; } -static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) +/** + * usb4_port_index() - Finds matching USB4 port index + * @sw: USB4 router + * @port: USB4 protocol or lane adapter + * + * Finds matching USB4 port index (starting from %0) that given @port goes + * through. + */ +int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port) { struct tb_port *p; int usb4_idx = 0; @@ -969,7 +977,7 @@ static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port) { - int usb4_idx = usb4_port_idx(sw, port); + int usb4_idx = usb4_port_index(sw, port); struct tb_port *p; int pcie_idx = 0; @@ -1000,7 +1008,7 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port) { - int usb4_idx = usb4_port_idx(sw, port); + int usb4_idx = usb4_port_index(sw, port); struct tb_port *p; int usb_idx = 0; diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c index 5150879888ca..852a45fcd19d 100644 --- a/drivers/thunderbolt/usb4_port.c +++ b/drivers/thunderbolt/usb4_port.c @@ -105,6 +105,49 @@ static void usb4_port_online(struct usb4_port *usb4) tb_acpi_power_off_retimers(port); } +/** + * usb4_usb3_port_match() - Matches USB4 port device with USB 3.x port device + * @usb4_port_dev: USB4 port device + * @usb3_port_fwnode: USB 3.x port firmware node + * + * Checks if USB 3.x port @usb3_port_fwnode is tunneled through USB4 port @usb4_port_dev. + * Returns true if match is found, false otherwise. + * + * Function is designed to be used with component framework (component_match_add). + */ +bool usb4_usb3_port_match(struct device *usb4_port_dev, + const struct fwnode_handle *usb3_port_fwnode) +{ + struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = NULL; + struct usb4_port *usb4; + struct tb_switch *sw; + struct tb_nhi *nhi; + u8 usb4_port_num; + struct tb *tb; + + usb4 = tb_to_usb4_port_device(usb4_port_dev); + if (!usb4) + return false; + + sw = usb4->port->sw; + tb = sw->tb; + nhi = tb->nhi; + + nhi_fwnode = fwnode_find_reference(usb3_port_fwnode, "usb4-host-interface", 0); + if (IS_ERR(nhi_fwnode)) + return false; + + /* Check if USB3 fwnode references same NHI where USB4 port resides */ + if (!device_match_fwnode(&nhi->pdev->dev, nhi_fwnode)) + return false; + + if (fwnode_property_read_u8(usb3_port_fwnode, "usb4-port-number", &usb4_port_num)) + return false; + + return usb4_port_index(sw, usb4->port) == usb4_port_num; +} +EXPORT_SYMBOL_GPL(usb4_usb3_port_match); + static ssize_t offline_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -276,12 +319,10 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port) return ERR_PTR(ret); } - if (dev_fwnode(&usb4->dev)) { - ret = component_add(&usb4->dev, &connector_ops); - if (ret) { - dev_err(&usb4->dev, "failed to add component\n"); - device_unregister(&usb4->dev); - } + ret = component_add(&usb4->dev, &connector_ops); + if (ret) { + dev_err(&usb4->dev, "failed to add component\n"); + device_unregister(&usb4->dev); } if (!tb_is_upstream_port(port)) @@ -306,8 +347,7 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port) */ void usb4_port_device_remove(struct usb4_port *usb4) { - if (dev_fwnode(&usb4->dev)) - component_del(&usb4->dev, &connector_ops); + component_del(&usb4->dev, &connector_ops); device_unregister(&usb4->dev); } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 5cb6132b8147..7735421e3991 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -7265,8 +7265,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, err = -EINVAL; } } - ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, - (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); return err; } diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 1b19b5647495..69c1df0f4ca5 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -131,15 +131,12 @@ static void hv_uio_rescind(struct vmbus_channel *channel) vmbus_device_unregister(channel->device_obj); } -/* Sysfs API to allow mmap of the ring buffers +/* Function used for mmap of ring buffer sysfs interface. * The ring buffer is allocated as contiguous memory by vmbus_open */ -static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, - const struct bin_attribute *attr, - struct vm_area_struct *vma) +static int +hv_uio_ring_mmap(struct vmbus_channel *channel, struct vm_area_struct *vma) { - struct vmbus_channel *channel - = container_of(kobj, struct vmbus_channel, kobj); void *ring_buffer = page_address(channel->ringbuffer_page); if (channel->state != CHANNEL_OPENED_STATE) @@ -149,15 +146,6 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, channel->ringbuffer_pagecount << PAGE_SHIFT); } -static const struct bin_attribute ring_buffer_bin_attr = { - .attr = { - .name = "ring", - .mode = 0600, - }, - .size = 2 * SZ_2M, - .mmap = hv_uio_ring_mmap, -}; - /* Callback from VMBUS subsystem when new channel created. */ static void hv_uio_new_channel(struct vmbus_channel *new_sc) @@ -178,8 +166,7 @@ hv_uio_new_channel(struct vmbus_channel *new_sc) /* Disable interrupts on sub channel */ new_sc->inbound.ring_buffer->interrupt_mask = 1; set_channel_read_mode(new_sc, HV_CALL_ISR); - - ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr); + ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap); if (ret) { dev_err(device, "sysfs create ring bin file failed; %d\n", ret); vmbus_close(new_sc); @@ -350,10 +337,18 @@ hv_uio_probe(struct hv_device *dev, goto fail_close; } - ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr); - if (ret) - dev_notice(&dev->device, - "sysfs create ring bin file failed; %d\n", ret); + /* + * This internally calls sysfs_update_group, which returns a non-zero value if it executes + * before sysfs_create_group. This is expected as the 'ring' will be created later in + * vmbus_device_register() -> vmbus_add_channel_kobj(). Thus, no need to check the return + * value and print warning. + * + * Creating/exposing sysfs in driver probe is not encouraged as it can lead to race + * conditions with userspace. For backward compatibility, "ring" sysfs could not be removed + * or decoupled from uio_hv_generic probe. Userspace programs can make use of inotify + * APIs to make sure that ring is created. + */ + hv_create_ring_sysfs(channel, hv_uio_ring_mmap); hv_set_drvdata(dev, pdata); @@ -375,7 +370,7 @@ hv_uio_remove(struct hv_device *dev) if (!pdata) return; - sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); + hv_remove_ring_sysfs(dev->channel); uio_unregister_device(&pdata->info); hv_uio_cleanup(dev, pdata); diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c index 59ec505e198a..735df88774e4 100644 --- a/drivers/usb/cdns3/cdns3-plat.c +++ b/drivers/usb/cdns3/cdns3-plat.c @@ -179,8 +179,6 @@ err_phy3_init: /** * cdns3_plat_remove() - unbind drd driver and clean up * @pdev: Pointer to Linux platform device - * - * Returns 0 on success otherwise negative errno */ static void cdns3_plat_remove(struct platform_device *pdev) { diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index 87f310841735..55f95f41b3b4 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -29,7 +29,8 @@ unsigned int cdnsp_port_speed(unsigned int port_status) { /*Detect gadget speed based on PORTSC register*/ - if (DEV_SUPERSPEEDPLUS(port_status)) + if (DEV_SUPERSPEEDPLUS(port_status) || + DEV_SSP_GEN1x2(port_status) || DEV_SSP_GEN2x2(port_status)) return USB_SPEED_SUPER_PLUS; else if (DEV_SUPERSPEED(port_status)) return USB_SPEED_SUPER; @@ -139,6 +140,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, (portsc & PORT_CHANGE_BITS), port_regs); } +static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) +{ + struct cdns *cdns = dev_get_drvdata(pdev->dev); + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + u32 val; + + if (!cdns->override_apb_timeout) + return; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; + + val = le32_to_cpu(readl(reg)); + val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); + writel(cpu_to_le32(val), reg); +} + static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) { __le32 __iomem *reg; @@ -527,6 +548,7 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) dma_addr_t cmd_deq_dma; union cdnsp_trb *event; u32 cycle_state; + u32 retry = 10; int ret, val; u64 cmd_dma; u32 flags; @@ -558,8 +580,23 @@ int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev) flags = le32_to_cpu(event->event_cmd.flags); /* Check the owner of the TRB. */ - if ((flags & TRB_CYCLE) != cycle_state) + if ((flags & TRB_CYCLE) != cycle_state) { + /* + * Give some extra time to get chance controller + * to finish command before returning error code. + * Checking CMD_RING_BUSY is not sufficient because + * this bit is cleared to '0' when the Command + * Descriptor has been executed by controller + * and not when command completion event has + * be added to event ring. + */ + if (retry--) { + udelay(20); + continue; + } + return -EINVAL; + } cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb); @@ -1773,6 +1810,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); pdev->rev_cap = reg; + pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", readl(&pdev->rev_cap->ctrl_revision), readl(&pdev->rev_cap->rtl_revision), @@ -1798,6 +1837,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev) pdev->hci_version = HC_VERSION(pdev->hcc_params); pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + /* + * Override the APB timeout value to give the controller more time for + * enabling UTMI clock and synchronizing APB and UTMI clock domains. + * This fix is platform specific and is required to fixes issue with + * reading incorrect value from PORTSC register after resuming + * from L1 state. + */ + cdnsp_set_apb_timeout_value(pdev); + cdnsp_get_rev_cap(pdev); /* Make sure the Device Controller is halted. */ diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index 84887dfea763..2afa3e558f85 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -285,11 +285,15 @@ struct cdnsp_port_regs { #define XDEV_HS (0x3 << 10) #define XDEV_SS (0x4 << 10) #define XDEV_SSP (0x5 << 10) +#define XDEV_SSP1x2 (0x6 << 10) +#define XDEV_SSP2x2 (0x7 << 10) #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10)) #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) #define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP) +#define DEV_SSP_GEN1x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP1x2) +#define DEV_SSP_GEN2x2(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP2x2) #define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS) #define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f) /* Port Link State Write Strobe - set this when changing link state */ @@ -520,6 +524,9 @@ struct cdnsp_rev_cap { #define REG_CHICKEN_BITS_2_OFFSET 0x48 #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) +#define REG_CHICKEN_BITS_3_OFFSET 0x4C +#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val)) + /* XBUF Extended Capability ID. */ #define XBUF_CAP_ID 0xCB #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C @@ -1357,6 +1364,7 @@ struct cdnsp_port { * @rev_cap: Controller Capabilities Registers. * @hcs_params1: Cached register copies of read-only HCSPARAMS1 * @hcc_params: Cached register copies of read-only HCCPARAMS1 + * @rtl_revision: Cached controller rtl revision. * @setup: Temporary buffer for setup packet. * @ep0_preq: Internal allocated request used during enumeration. * @ep0_stage: ep0 stage during enumeration process. @@ -1411,6 +1419,8 @@ struct cdnsp_device { __u32 hcs_params1; __u32 hcs_params3; __u32 hcc_params; + #define RTL_REVISION_NEW_LPM 0x2700 + __u32 rtl_revision; /* Lock used in interrupt thread context. */ spinlock_t lock; struct usb_ctrlrequest setup; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index a51144504ff3..8c361b8394e9 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -28,6 +28,8 @@ #define PCI_DRIVER_NAME "cdns-pci-usbssp" #define PLAT_DRIVER_NAME "cdns-usbssp" +#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20 + static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) { /* @@ -139,6 +141,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, cdnsp->otg_irq = pdev->irq; } + /* + * Cadence PCI based platform require some longer timeout for APB + * to fixes domain clock synchronization issue after resuming + * controller from L1 state. + */ + cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE; + pci_set_drvdata(pdev, cdnsp); + if (pci_is_enabled(func)) { cdnsp->dev = dev; cdnsp->gadget_init = cdnsp_gadget_init; @@ -148,8 +158,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, goto free_cdnsp; } - pci_set_drvdata(pdev, cdnsp); - device_wakeup_enable(&pdev->dev); if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 46852529499d..fd06cb85c4ea 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, writel(db_value, reg_addr); - cdnsp_force_l0_go(pdev); + if (pdev->rtl_revision < RTL_REVISION_NEW_LPM) + cdnsp_force_l0_go(pdev); /* Doorbell was set. */ return true; diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 921cccf1ca9d..801be9e61340 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -79,6 +79,8 @@ struct cdns3_platform_data { * @pdata: platform data from glue layer * @lock: spinlock structure * @xhci_plat_data: xhci private data structure pointer + * @override_apb_timeout: hold value of APB timeout. For value 0 the default + * value in CHICKEN_BITS_3 will be preserved. * @gadget_init: pointer to gadget initialization function */ struct cdns { @@ -117,6 +119,7 @@ struct cdns { struct cdns3_platform_data *pdata; spinlock_t lock; struct xhci_plat_priv *xhci_plat_data; + u32 override_apb_timeout; int (*gadget_init)(struct cdns *cdns); }; diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 4f8bfd242b59..780f4d151345 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -6,6 +6,7 @@ */ #include <linux/module.h> +#include <linux/irq.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> @@ -98,6 +99,7 @@ struct ci_hdrc_imx_data { struct clk *clk; struct clk *clk_wakeup; struct imx_usbmisc_data *usbmisc_data; + int wakeup_irq; bool supports_runtime_pm; bool override_phy_control; bool in_lpm; @@ -336,6 +338,16 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) return ret; } +static irqreturn_t ci_wakeup_irq_handler(int irq, void *data) +{ + struct ci_hdrc_imx_data *imx_data = data; + + disable_irq_nosync(irq); + pm_runtime_resume(&imx_data->ci_pdev->dev); + + return IRQ_HANDLED; +} + static void ci_hdrc_imx_disable_regulator(void *arg) { struct ci_hdrc_imx_data *data = arg; @@ -494,6 +506,16 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM) data->supports_runtime_pm = true; + data->wakeup_irq = platform_get_irq_optional(pdev, 1); + if (data->wakeup_irq > 0) { + ret = devm_request_threaded_irq(dev, data->wakeup_irq, + NULL, ci_wakeup_irq_handler, + IRQF_ONESHOT | IRQF_NO_AUTOEN, + pdata.name, data); + if (ret) + goto err_clk; + } + ret = imx_usbmisc_init(data->usbmisc_data); if (ret) { dev_err(dev, "usbmisc init failed, ret=%d\n", ret); @@ -602,6 +624,10 @@ static int imx_controller_suspend(struct device *dev, } imx_disable_unprepare_clks(dev); + + if (data->wakeup_irq > 0) + enable_irq(data->wakeup_irq); + if (data->plat_data->flags & CI_HDRC_PMQOS) cpu_latency_qos_remove_request(&data->pm_qos_req); @@ -626,6 +652,10 @@ static int imx_controller_resume(struct device *dev, if (data->plat_data->flags & CI_HDRC_PMQOS) cpu_latency_qos_add_request(&data->pm_qos_req, 0); + if (data->wakeup_irq > 0 && + !irqd_irq_disabled(irq_get_irq_data(data->wakeup_irq))) + disable_irq_nosync(data->wakeup_irq); + ret = imx_prepare_enable_clks(dev); if (ret) return ret; @@ -661,6 +691,10 @@ static int ci_hdrc_imx_suspend(struct device *dev) return ret; pinctrl_pm_select_sleep_state(dev); + + if (data->wakeup_irq > 0 && device_may_wakeup(dev)) + enable_irq_wake(data->wakeup_irq); + return ret; } @@ -669,6 +703,9 @@ static int ci_hdrc_imx_resume(struct device *dev) struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); int ret; + if (data->wakeup_irq > 0 && device_may_wakeup(dev)) + disable_irq_wake(data->wakeup_irq); + pinctrl_pm_select_default_state(dev); ret = imx_controller_resume(dev, PMSG_RESUME); if (!ret && data->supports_runtime_pm) { diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 6243d8005f5d..118b9a68496b 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -139,6 +139,22 @@ #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \ MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN) +/* + * HSIO Block Control Register + */ + +#define BLKCTL_USB_WAKEUP_CTRL 0x0 +#define BLKCTL_OTG_WAKE_ENABLE BIT(31) +#define BLKCTL_OTG_VBUS_SESSVALID BIT(4) +#define BLKCTL_OTG_ID_WAKEUP_EN BIT(2) +#define BLKCTL_OTG_VBUS_WAKEUP_EN BIT(1) +#define BLKCTL_OTG_DPDM_WAKEUP_EN BIT(0) + +#define BLKCTL_WAKEUP_SOURCE (BLKCTL_OTG_WAKE_ENABLE | \ + BLKCTL_OTG_ID_WAKEUP_EN | \ + BLKCTL_OTG_VBUS_WAKEUP_EN | \ + BLKCTL_OTG_DPDM_WAKEUP_EN) + struct usbmisc_ops { /* It's called once when probe a usb device */ int (*init)(struct imx_usbmisc_data *data); @@ -159,6 +175,7 @@ struct usbmisc_ops { struct imx_usbmisc { void __iomem *base; + void __iomem *blkctl; spinlock_t lock; const struct usbmisc_ops *ops; }; @@ -1016,6 +1033,44 @@ static int usbmisc_imx6sx_power_lost_check(struct imx_usbmisc_data *data) return 0; } +static u32 usbmisc_blkctl_wakeup_setting(struct imx_usbmisc_data *data) +{ + u32 wakeup_setting = BLKCTL_WAKEUP_SOURCE; + + if (data->ext_id || data->available_role != USB_DR_MODE_OTG) + wakeup_setting &= ~BLKCTL_OTG_ID_WAKEUP_EN; + + if (data->ext_vbus || data->available_role == USB_DR_MODE_HOST) + wakeup_setting &= ~BLKCTL_OTG_VBUS_WAKEUP_EN; + + /* Select session valid as VBUS wakeup source */ + wakeup_setting |= BLKCTL_OTG_VBUS_SESSVALID; + + return wakeup_setting; +} + +static int usbmisc_imx95_set_wakeup(struct imx_usbmisc_data *data, bool enabled) +{ + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); + unsigned long flags; + u32 val; + + if (!usbmisc->blkctl) + return 0; + + spin_lock_irqsave(&usbmisc->lock, flags); + val = readl(usbmisc->blkctl + BLKCTL_USB_WAKEUP_CTRL); + val &= ~BLKCTL_WAKEUP_SOURCE; + + if (enabled) + val |= usbmisc_blkctl_wakeup_setting(data); + + writel(val, usbmisc->blkctl + BLKCTL_USB_WAKEUP_CTRL); + spin_unlock_irqrestore(&usbmisc->lock, flags); + + return 0; +} + static const struct usbmisc_ops imx25_usbmisc_ops = { .init = usbmisc_imx25_init, .post = usbmisc_imx25_post, @@ -1068,6 +1123,14 @@ static const struct usbmisc_ops imx7ulp_usbmisc_ops = { .power_lost_check = usbmisc_imx7d_power_lost_check, }; +static const struct usbmisc_ops imx95_usbmisc_ops = { + .init = usbmisc_imx7d_init, + .set_wakeup = usbmisc_imx95_set_wakeup, + .charger_detection = imx7d_charger_detection, + .power_lost_check = usbmisc_imx7d_power_lost_check, + .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on, +}; + static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data) { struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); @@ -1289,6 +1352,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = { .compatible = "fsl,imx8ulp-usbmisc", .data = &imx7ulp_usbmisc_ops, }, + { + .compatible = "fsl,imx95-usbmisc", + .data = &imx95_usbmisc_ops, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids); @@ -1296,6 +1363,7 @@ MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids); static int usbmisc_imx_probe(struct platform_device *pdev) { struct imx_usbmisc *data; + struct resource *res; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -1307,6 +1375,15 @@ static int usbmisc_imx_probe(struct platform_device *pdev) if (IS_ERR(data->base)) return PTR_ERR(data->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + data->blkctl = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->blkctl)) + return PTR_ERR(data->blkctl); + } else if (device_is_compatible(&pdev->dev, "fsl,imx95-usbmisc")) { + dev_warn(&pdev->dev, "wakeup setting is missing\n"); + } + data->ops = of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, data); diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 16e7fa4d488d..ecd6d1f39e49 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -92,7 +92,6 @@ struct wdm_device { u16 wMaxCommand; u16 wMaxPacketSize; __le16 inum; - int reslength; int length; int read; int count; @@ -214,6 +213,11 @@ static void wdm_in_callback(struct urb *urb) if (desc->rerr == 0 && status != -EPIPE) desc->rerr = status; + if (length == 0) { + dev_dbg(&desc->intf->dev, "received ZLP\n"); + goto skip_zlp; + } + if (length + desc->length > desc->wMaxCommand) { /* The buffer would overflow */ set_bit(WDM_OVERFLOW, &desc->flags); @@ -222,18 +226,18 @@ static void wdm_in_callback(struct urb *urb) if (!test_bit(WDM_OVERFLOW, &desc->flags)) { memmove(desc->ubuf + desc->length, desc->inbuf, length); desc->length += length; - desc->reslength = length; } } skip_error: if (desc->rerr) { /* - * Since there was an error, userspace may decide to not read - * any data after poll'ing. + * If there was a ZLP or an error, userspace may decide to not + * read any data after poll'ing. * We should respond to further attempts from the device to send * data, so that we can get unstuck. */ +skip_zlp: schedule_work(&desc->service_outs_intr); } else { set_bit(WDM_READ, &desc->flags); @@ -585,15 +589,6 @@ retry: goto retry; } - if (!desc->reslength) { /* zero length read */ - dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n"); - clear_bit(WDM_READ, &desc->flags); - rv = service_outstanding_interrupt(desc); - spin_unlock_irq(&desc->iuspin); - if (rv < 0) - goto err; - goto retry; - } cntr = desc->length; spin_unlock_irq(&desc->iuspin); } @@ -1016,7 +1011,7 @@ static void service_interrupt_work(struct work_struct *work) spin_lock_irq(&desc->iuspin); service_outstanding_interrupt(desc); - if (!desc->resp_count) { + if (!desc->resp_count && (desc->length || desc->rerr)) { set_bit(WDM_READ, &desc->flags); wake_up(&desc->wait); } diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 34e46ef308ab..75de29725a45 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -482,6 +482,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) u8 *buffer; u8 tag; int rv; + long wait_rv; + unsigned long expire; dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", data->iin_ep_present); @@ -511,16 +513,18 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) } if (data->iin_ep_present) { - rv = wait_event_interruptible_timeout( + expire = msecs_to_jiffies(file_data->timeout); + wait_rv = wait_event_interruptible_timeout( data->waitq, atomic_read(&data->iin_data_valid) != 0, - file_data->timeout); - if (rv < 0) { - dev_dbg(dev, "wait interrupted %d\n", rv); + expire); + if (wait_rv < 0) { + dev_dbg(dev, "wait interrupted %ld\n", wait_rv); + rv = wait_rv; goto exit; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "wait timed out\n"); rv = -ETIMEDOUT; goto exit; @@ -539,6 +543,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); + rv = 0; + exit: /* bump interrupt bTag */ data->iin_bTag += 1; @@ -559,14 +565,15 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data, rv = usbtmc_get_stb(file_data, &stb); - if (rv > 0) { - srq_asserted = atomic_xchg(&file_data->srq_asserted, - srq_asserted); - if (srq_asserted) - stb |= 0x40; /* Set RQS bit */ + if (rv < 0) + return rv; + + srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted); + if (srq_asserted) + stb |= 0x40; /* Set RQS bit */ + + rv = put_user(stb, (__u8 __user *)arg); - rv = put_user(stb, (__u8 __user *)arg); - } return rv; } @@ -602,9 +609,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, { struct usbtmc_device_data *data = file_data->data; struct device *dev = &data->intf->dev; - int rv; u32 timeout; unsigned long expire; + long wait_rv; if (!data->iin_ep_present) { dev_dbg(dev, "no interrupt endpoint present\n"); @@ -618,25 +625,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, mutex_unlock(&data->io_mutex); - rv = wait_event_interruptible_timeout( - data->waitq, - atomic_read(&file_data->srq_asserted) != 0 || - atomic_read(&file_data->closing), - expire); + wait_rv = wait_event_interruptible_timeout( + data->waitq, + atomic_read(&file_data->srq_asserted) != 0 || + atomic_read(&file_data->closing), + expire); mutex_lock(&data->io_mutex); /* Note! disconnect or close could be called in the meantime */ if (atomic_read(&file_data->closing) || data->zombie) - rv = -ENODEV; + return -ENODEV; - if (rv < 0) { - /* dev can be invalid now! */ - pr_debug("%s - wait interrupted %d\n", __func__, rv); - return rv; + if (wait_rv < 0) { + dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv); + return wait_rv; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "%s - wait timed out\n", __func__); return -ETIMEDOUT; } @@ -830,6 +836,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, unsigned long expire; int bufcount = 1; int again = 0; + long wait_rv; /* mutex already locked */ @@ -942,19 +949,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, if (!(flags & USBTMC_FLAG_ASYNC)) { dev_dbg(dev, "%s: before wait time %lu\n", __func__, expire); - retval = wait_event_interruptible_timeout( + wait_rv = wait_event_interruptible_timeout( file_data->wait_bulk_in, usbtmc_do_transfer(file_data), expire); - dev_dbg(dev, "%s: wait returned %d\n", - __func__, retval); + dev_dbg(dev, "%s: wait returned %ld\n", + __func__, wait_rv); + + if (wait_rv < 0) { + retval = wait_rv; + goto error; + } - if (retval <= 0) { - if (retval == 0) - retval = -ETIMEDOUT; + if (wait_rv == 0) { + retval = -ETIMEDOUT; goto error; } + } urb = usb_get_from_anchor(&file_data->in_anchor); @@ -1380,7 +1392,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, if (!buffer) return -ENOMEM; - mutex_lock(&data->io_mutex); + retval = mutex_lock_interruptible(&data->io_mutex); + if (retval < 0) + goto exit_nolock; + if (data->zombie) { retval = -ENODEV; goto exit; @@ -1503,6 +1518,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, exit: mutex_unlock(&data->io_mutex); +exit_nolock: kfree(buffer); return retval; } @@ -2186,7 +2202,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case USBTMC_IOCTL_GET_STB: retval = usbtmc_get_stb(file_data, &tmp_byte); - if (retval > 0) + if (!retval) retval = put_user(tmp_byte, (__u8 __user *)arg); break; diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 1e36be2a28fd..421c3af38e06 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -21,6 +21,9 @@ #include <linux/regulator/consumer.h> #include <linux/string_choices.h> #include <linux/usb/role.h> +#include <linux/idr.h> + +static DEFINE_IDA(usb_conn_ida); #define USB_GPIO_DEB_MS 20 /* ms */ #define USB_GPIO_DEB_US ((USB_GPIO_DEB_MS) * 1000) /* us */ @@ -30,6 +33,7 @@ struct usb_conn_info { struct device *dev; + int conn_id; /* store the IDA-allocated ID */ struct usb_role_switch *role_sw; enum usb_role last_role; struct regulator *vbus; @@ -161,7 +165,17 @@ static int usb_conn_psy_register(struct usb_conn_info *info) .fwnode = dev_fwnode(dev), }; - desc->name = "usb-charger"; + info->conn_id = ida_alloc(&usb_conn_ida, GFP_KERNEL); + if (info->conn_id < 0) + return info->conn_id; + + desc->name = devm_kasprintf(dev, GFP_KERNEL, "usb-charger-%d", + info->conn_id); + if (!desc->name) { + ida_free(&usb_conn_ida, info->conn_id); + return -ENOMEM; + } + desc->properties = usb_charger_properties; desc->num_properties = ARRAY_SIZE(usb_charger_properties); desc->get_property = usb_charger_get_property; @@ -169,8 +183,10 @@ static int usb_conn_psy_register(struct usb_conn_info *info) cfg.drv_data = info; info->charger = devm_power_supply_register(dev, desc, &cfg); - if (IS_ERR(info->charger)) - dev_err(dev, "Unable to register charger\n"); + if (IS_ERR(info->charger)) { + dev_err(dev, "Unable to register charger %d\n", info->conn_id); + ida_free(&usb_conn_ida, info->conn_id); + } return PTR_ERR_OR_ZERO(info->charger); } @@ -278,6 +294,9 @@ static void usb_conn_remove(struct platform_device *pdev) cancel_delayed_work_sync(&info->dw_det); + if (info->charger) + ida_free(&usb_conn_ida, info->conn_id); + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 13bd4ec4ea5f..fc0cfd94cbab 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -307,7 +307,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, goto skip_to_next_endpoint_or_interface_descriptor; } - i = d->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; + i = usb_endpoint_num(d); if (i == 0) { dev_notice(ddev, "config %d interface %d altsetting %d has an " "invalid descriptor for endpoint zero, skipping\n", diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 0e1dd6ef60a7..416af6d76374 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4160,7 +4160,7 @@ static int usb_set_device_initiated_lpm(struct usb_device *udev, "for unconfigured device.\n", __func__, str_enable_disable(enable), usb3_lpm_names[state]); - return 0; + return -EINVAL; } if (enable) { @@ -4234,9 +4234,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev, } /* - * Don't allow device intiated U1/U2 if the system exit latency + one bus - * interval is greater than the minimum service interval of any active - * periodic endpoint. See USB 3.2 section 9.4.9 + * Don't allow device intiated U1/U2 if device isn't in the configured state, + * or the system exit latency + one bus interval is greater than the minimum + * service interval of any active periodic endpoint. See USB 3.2 section 9.4.9 */ static bool usb_device_may_initiate_lpm(struct usb_device *udev, enum usb3_link_state state) @@ -4244,7 +4244,7 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev, unsigned int sel; /* us */ int i, j; - if (!udev->lpm_devinit_allow) + if (!udev->lpm_devinit_allow || !udev->actconfig) return false; if (state == USB3_LPM_U1) @@ -4292,7 +4292,7 @@ static bool usb_device_may_initiate_lpm(struct usb_device *udev, * driver know about it. If that call fails, it should be harmless, and just * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. */ -static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, +static int usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout; @@ -4301,7 +4301,7 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, /* Skip if the device BOS descriptor couldn't be read */ if (!udev->bos) - return; + return -EINVAL; u1_mel = udev->bos->ss_cap->bU1devExitLat; u2_mel = udev->bos->ss_cap->bU2DevExitLat; @@ -4312,7 +4312,7 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, */ if ((state == USB3_LPM_U1 && u1_mel == 0) || (state == USB3_LPM_U2 && u2_mel == 0)) - return; + return -EINVAL; /* We allow the host controller to set the U1/U2 timeout internally * first, so that it can change its schedule to account for the @@ -4323,13 +4323,13 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, /* xHCI host controller doesn't want to enable this LPM state. */ if (timeout == 0) - return; + return -EINVAL; if (timeout < 0) { dev_warn(&udev->dev, "Could not enable %s link state, " "xHCI error %i.\n", usb3_lpm_names[state], timeout); - return; + return timeout; } if (usb_set_lpm_timeout(udev, state, timeout)) { @@ -4338,29 +4338,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, * host know that this link state won't be enabled. */ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); - return; - } - - /* Only a configured device will accept the Set Feature - * U1/U2_ENABLE - */ - if (udev->actconfig && - usb_device_may_initiate_lpm(udev, state)) { - if (usb_set_device_initiated_lpm(udev, state, true)) { - /* - * Request to enable device initiated U1/U2 failed, - * better to turn off lpm in this case. - */ - usb_set_lpm_timeout(udev, state, 0); - hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); - return; - } + return -EBUSY; } if (state == USB3_LPM_U1) udev->usb3_lpm_u1_enabled = 1; else if (state == USB3_LPM_U2) udev->usb3_lpm_u2_enabled = 1; + + return 0; } /* * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated @@ -4393,8 +4379,6 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, if (usb_set_lpm_timeout(udev, state, 0)) return -EBUSY; - usb_set_device_initiated_lpm(udev, state, false); - if (hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state)) dev_warn(&udev->dev, "Could not disable xHCI %s timeout, " "bus schedule bandwidth may be impacted.\n", @@ -4424,6 +4408,7 @@ static int usb_disable_link_state(struct usb_hcd *hcd, struct usb_device *udev, int usb_disable_lpm(struct usb_device *udev) { struct usb_hcd *hcd; + int err; if (!udev || !udev->parent || udev->speed < USB_SPEED_SUPER || @@ -4441,14 +4426,19 @@ int usb_disable_lpm(struct usb_device *udev) /* If LPM is enabled, attempt to disable it. */ if (usb_disable_link_state(hcd, udev, USB3_LPM_U1)) - goto enable_lpm; + goto disable_failed; if (usb_disable_link_state(hcd, udev, USB3_LPM_U2)) - goto enable_lpm; + goto disable_failed; + + err = usb_set_device_initiated_lpm(udev, USB3_LPM_U1, false); + if (!err) + usb_set_device_initiated_lpm(udev, USB3_LPM_U2, false); return 0; -enable_lpm: - usb_enable_lpm(udev); +disable_failed: + udev->lpm_disable_count--; + return -EBUSY; } EXPORT_SYMBOL_GPL(usb_disable_lpm); @@ -4509,10 +4499,24 @@ void usb_enable_lpm(struct usb_device *udev) port_dev = hub->ports[udev->portnum - 1]; if (port_dev->usb3_lpm_u1_permit) - usb_enable_link_state(hcd, udev, USB3_LPM_U1); + if (usb_enable_link_state(hcd, udev, USB3_LPM_U1)) + return; if (port_dev->usb3_lpm_u2_permit) - usb_enable_link_state(hcd, udev, USB3_LPM_U2); + if (usb_enable_link_state(hcd, udev, USB3_LPM_U2)) + return; + + /* + * Enable device initiated U1/U2 with a SetFeature(U1/U2_ENABLE) request + * if system exit latency is short enough and device is configured + */ + if (usb_device_may_initiate_lpm(udev, USB3_LPM_U1)) { + if (usb_set_device_initiated_lpm(udev, USB3_LPM_U1, true)) + return; + + if (usb_device_may_initiate_lpm(udev, USB3_LPM_U2)) + usb_set_device_initiated_lpm(udev, USB3_LPM_U2, true); + } } EXPORT_SYMBOL_GPL(usb_enable_lpm); @@ -6133,6 +6137,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) struct usb_hub *parent_hub; struct usb_hcd *hcd = bus_to_hcd(udev->bus); struct usb_device_descriptor descriptor; + struct usb_interface *intf; struct usb_host_bos *bos; int i, j, ret = 0; int port1 = udev->portnum; @@ -6190,6 +6195,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev) if (!udev->actconfig) goto done; + /* + * Some devices can't handle setting default altsetting 0 with a + * Set-Interface request. Disable host-side endpoints of those + * interfaces here. Enable and reset them back after host has set + * its internal endpoint structures during usb_hcd_alloc_bandwith() + */ + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + intf = udev->actconfig->interface[i]; + if (intf->cur_altsetting->desc.bAlternateSetting == 0) + usb_disable_interface(udev, intf, true); + } + mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL); if (ret < 0) { @@ -6221,12 +6238,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev) */ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { struct usb_host_config *config = udev->actconfig; - struct usb_interface *intf = config->interface[i]; struct usb_interface_descriptor *desc; + intf = config->interface[i]; desc = &intf->cur_altsetting->desc; if (desc->bAlternateSetting == 0) { - usb_disable_interface(udev, intf, true); usb_enable_interface(udev, intf, true); ret = 0; } else { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 36d3df7d040c..53d68d20fb62 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -372,6 +372,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* SanDisk Corp. SanDisk 3.2Gen1 */ { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, + /* SanDisk Extreme 55AE */ + { USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM }, + /* Realforce 87U Keyboard */ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 935c0efea0b6..ea1ce8beb0cb 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -165,6 +165,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) return 0; hub = usb_hub_to_struct_hub(udev->parent); + if (!hub) + return 0; port_dev = hub->ports[udev->portnum - 1]; struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 0b4685aad2d5..118fa4c93a79 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -695,15 +695,16 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, device_set_of_node_from_dev(&dev->dev, bus->sysdev); dev_set_name(&dev->dev, "usb%d", bus->busnum); } else { + int n; + /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') { - snprintf(dev->devpath, sizeof dev->devpath, - "%d", port1); + n = snprintf(dev->devpath, sizeof(dev->devpath), "%d", port1); /* Root ports are not counted in route string */ dev->route = 0; } else { - snprintf(dev->devpath, sizeof dev->devpath, - "%s.%d", parent->devpath, port1); + n = snprintf(dev->devpath, sizeof(dev->devpath), "%s.%d", + parent->devpath, port1); /* Route string assumes hubs have less than 16 ports */ if (port1 < 15) dev->route = parent->route + @@ -712,6 +713,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, dev->route = parent->route + (15 << ((parent->level - 1)*4)); } + if (n >= sizeof(dev->devpath)) { + usb_put_hcd(bus_to_hcd(bus)); + usb_put_dev(dev); + return NULL; + } dev->dev.parent = &parent->dev; dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 300ea4969f0c..d5b622f78cf3 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -4049,7 +4049,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep, return -EINVAL; } - ep_type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + ep_type = usb_endpoint_type(desc); mps = usb_endpoint_maxp(desc); mc = usb_endpoint_maxp_mult(desc); @@ -4604,6 +4604,12 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) if (!hsotg) return -ENODEV; + /* Exit clock gating when driver is stopped. */ + if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE && + hsotg->bus_suspended && !hsotg->params.no_clock_gating) { + dwc2_gadget_exit_clock_gating(hsotg, 0); + } + /* all endpoints should be shutdown */ for (ep = 1; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile index 124eda2522d9..830e6c9e5fe0 100644 --- a/drivers/usb/dwc3/Makefile +++ b/drivers/usb/dwc3/Makefile @@ -52,6 +52,7 @@ obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o +obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom-legacy.o obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o obj-$(CONFIG_USB_DWC3_OCTEON) += dwc3-octeon.o diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 66a08b527165..2bc775a747f2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -26,6 +26,7 @@ #include <linux/of_graph.h> #include <linux/acpi.h> #include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/devinfo.h> #include <linux/reset.h> #include <linux/bitfield.h> @@ -36,6 +37,7 @@ #include "core.h" #include "gadget.h" +#include "glue.h" #include "io.h" #include "debug.h" @@ -1699,6 +1701,7 @@ static void dwc3_get_properties(struct dwc3 *dwc) u8 tx_thr_num_pkt_prd = 0; u8 tx_max_burst_prd = 0; u8 tx_fifo_resize_max_num; + u16 num_hc_interrupters; /* default to highest possible threshold */ lpm_nyet_threshold = 0xf; @@ -1719,6 +1722,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) */ tx_fifo_resize_max_num = 6; + /* default to a single XHCI interrupter */ + num_hc_interrupters = 1; + dwc->maximum_speed = usb_get_maximum_speed(dev); dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev); dwc->dr_mode = usb_get_dr_mode(dev); @@ -1765,6 +1771,12 @@ static void dwc3_get_properties(struct dwc3 *dwc) &tx_thr_num_pkt_prd); device_property_read_u8(dev, "snps,tx-max-burst-prd", &tx_max_burst_prd); + device_property_read_u16(dev, "num-hc-interrupters", + &num_hc_interrupters); + /* DWC3 core allowed to have a max of 8 interrupters */ + if (num_hc_interrupters > 8) + num_hc_interrupters = 8; + dwc->do_fifo_resize = device_property_read_bool(dev, "tx-fifo-resize"); if (dwc->do_fifo_resize) @@ -1851,6 +1863,8 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->tx_max_burst_prd = tx_max_burst_prd; dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; + + dwc->num_hc_interrupters = num_hc_interrupters; } /* check whether the core supports IMOD */ @@ -2148,27 +2162,16 @@ static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) return usb_psy; } -static int dwc3_probe(struct platform_device *pdev) +int dwc3_core_probe(const struct dwc3_probe_data *data) { - struct device *dev = &pdev->dev; - struct resource *res, dwc_res; + struct dwc3 *dwc = data->dwc; + struct device *dev = dwc->dev; + struct resource dwc_res; unsigned int hw_mode; void __iomem *regs; - struct dwc3 *dwc; + struct resource *res = data->res; int ret; - dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL); - if (!dwc) - return -ENOMEM; - - dwc->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing memory resource\n"); - return -ENODEV; - } - dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; @@ -2208,15 +2211,17 @@ static int dwc3_probe(struct platform_device *pdev) if (IS_ERR(dwc->usb_psy)) return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); - dwc->reset = devm_reset_control_array_get_optional_shared(dev); - if (IS_ERR(dwc->reset)) { - ret = PTR_ERR(dwc->reset); - goto err_put_psy; - } + if (!data->ignore_clocks_and_resets) { + dwc->reset = devm_reset_control_array_get_optional_shared(dev); + if (IS_ERR(dwc->reset)) { + ret = PTR_ERR(dwc->reset); + goto err_put_psy; + } - ret = dwc3_get_clocks(dwc); - if (ret) - goto err_put_psy; + ret = dwc3_get_clocks(dwc); + if (ret) + goto err_put_psy; + } ret = reset_control_deassert(dwc->reset); if (ret) @@ -2232,7 +2237,7 @@ static int dwc3_probe(struct platform_device *pdev) goto err_disable_clks; } - platform_set_drvdata(pdev, dwc); + dev_set_drvdata(dev, dwc); dwc3_cache_hwparams(dwc); if (!dwc->sysdev_is_parent && @@ -2327,12 +2332,35 @@ err_put_psy: return ret; } +EXPORT_SYMBOL_GPL(dwc3_core_probe); -static void dwc3_remove(struct platform_device *pdev) +static int dwc3_probe(struct platform_device *pdev) { - struct dwc3 *dwc = platform_get_drvdata(pdev); + struct dwc3_probe_data probe_data = {}; + struct resource *res; + struct dwc3 *dwc; - pm_runtime_get_sync(&pdev->dev); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "missing memory resource\n"); + return -ENODEV; + } + + dwc = devm_kzalloc(&pdev->dev, sizeof(*dwc), GFP_KERNEL); + if (!dwc) + return -ENOMEM; + + dwc->dev = &pdev->dev; + + probe_data.dwc = dwc; + probe_data.res = res; + + return dwc3_core_probe(&probe_data); +} + +void dwc3_core_remove(struct dwc3 *dwc) +{ + pm_runtime_get_sync(dwc->dev); dwc3_core_exit_mode(dwc); dwc3_debugfs_exit(dwc); @@ -2340,22 +2368,28 @@ static void dwc3_remove(struct platform_device *pdev) dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); - pm_runtime_allow(&pdev->dev); - pm_runtime_disable(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_allow(dwc->dev); + pm_runtime_disable(dwc->dev); + pm_runtime_dont_use_autosuspend(dwc->dev); + pm_runtime_put_noidle(dwc->dev); /* * HACK: Clear the driver data, which is currently accessed by parent * glue drivers, before allowing the parent to suspend. */ - platform_set_drvdata(pdev, NULL); - pm_runtime_set_suspended(&pdev->dev); + dev_set_drvdata(dwc->dev, NULL); + pm_runtime_set_suspended(dwc->dev); dwc3_free_event_buffers(dwc); if (dwc->usb_psy) power_supply_put(dwc->usb_psy); } +EXPORT_SYMBOL_GPL(dwc3_core_remove); + +static void dwc3_remove(struct platform_device *pdev) +{ + dwc3_core_remove(platform_get_drvdata(pdev)); +} #ifdef CONFIG_PM static int dwc3_core_init_for_resume(struct dwc3 *dwc) @@ -2544,9 +2578,8 @@ static int dwc3_runtime_checks(struct dwc3 *dwc) return 0; } -static int dwc3_runtime_suspend(struct device *dev) +int dwc3_runtime_suspend(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); int ret; if (dwc3_runtime_checks(dwc)) @@ -2558,10 +2591,11 @@ static int dwc3_runtime_suspend(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(dwc3_runtime_suspend); -static int dwc3_runtime_resume(struct device *dev) +int dwc3_runtime_resume(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); + struct device *dev = dwc->dev; int ret; ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); @@ -2571,7 +2605,7 @@ static int dwc3_runtime_resume(struct device *dev) switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: if (dwc->pending_events) { - pm_runtime_put(dwc->dev); + pm_runtime_put(dev); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } @@ -2586,10 +2620,11 @@ static int dwc3_runtime_resume(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(dwc3_runtime_resume); -static int dwc3_runtime_idle(struct device *dev) +int dwc3_runtime_idle(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); + struct device *dev = dwc->dev; switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: @@ -2607,12 +2642,28 @@ static int dwc3_runtime_idle(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(dwc3_runtime_idle); + +static int dwc3_plat_runtime_suspend(struct device *dev) +{ + return dwc3_runtime_suspend(dev_get_drvdata(dev)); +} + +static int dwc3_plat_runtime_resume(struct device *dev) +{ + return dwc3_runtime_resume(dev_get_drvdata(dev)); +} + +static int dwc3_plat_runtime_idle(struct device *dev) +{ + return dwc3_runtime_idle(dev_get_drvdata(dev)); +} #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP -static int dwc3_suspend(struct device *dev) +int dwc3_pm_suspend(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); + struct device *dev = dwc->dev; int ret; ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); @@ -2623,10 +2674,11 @@ static int dwc3_suspend(struct device *dev) return 0; } +EXPORT_SYMBOL_GPL(dwc3_pm_suspend); -static int dwc3_resume(struct device *dev) +int dwc3_pm_resume(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); + struct device *dev = dwc->dev; int ret = 0; pinctrl_pm_select_default_state(dev); @@ -2645,10 +2697,10 @@ out: return ret; } +EXPORT_SYMBOL_GPL(dwc3_pm_resume); -static void dwc3_complete(struct device *dev) +void dwc3_pm_complete(struct dwc3 *dwc) { - struct dwc3 *dwc = dev_get_drvdata(dev); u32 reg; if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && @@ -2658,21 +2710,60 @@ static void dwc3_complete(struct device *dev) dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); } } +EXPORT_SYMBOL_GPL(dwc3_pm_complete); + +int dwc3_pm_prepare(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + + /* + * Indicate to the PM core that it may safely leave the device in + * runtime suspend if runtime-suspended already in device mode. + */ + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE && + pm_runtime_suspended(dev) && + !dev_pinctrl(dev)) + return 1; + + return 0; +} +EXPORT_SYMBOL_GPL(dwc3_pm_prepare); + +static int dwc3_plat_suspend(struct device *dev) +{ + return dwc3_pm_suspend(dev_get_drvdata(dev)); +} + +static int dwc3_plat_resume(struct device *dev) +{ + return dwc3_pm_resume(dev_get_drvdata(dev)); +} + +static void dwc3_plat_complete(struct device *dev) +{ + dwc3_pm_complete(dev_get_drvdata(dev)); +} + +static int dwc3_plat_prepare(struct device *dev) +{ + return dwc3_pm_prepare(dev_get_drvdata(dev)); +} #else -#define dwc3_complete NULL +#define dwc3_plat_complete NULL +#define dwc3_plat_prepare NULL #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dwc3_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) - .complete = dwc3_complete, - + SET_SYSTEM_SLEEP_PM_OPS(dwc3_plat_suspend, dwc3_plat_resume) + .complete = dwc3_plat_complete, + .prepare = dwc3_plat_prepare, /* * Runtime suspend halts the controller on disconnection. It relies on * platforms with custom connection notification to start the controller * again. */ - SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, - dwc3_runtime_idle) + SET_RUNTIME_PM_OPS(dwc3_plat_runtime_suspend, dwc3_plat_runtime_resume, + dwc3_plat_runtime_idle) }; #ifdef CONFIG_OF diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index aaa39e663f60..d5b985fa12f4 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1083,6 +1083,7 @@ struct dwc3_scratchpad_array { * @tx_max_burst_prd: max periodic ESS transmit burst size * @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize * @clear_stall_protocol: endpoint number that requires a delayed status phase + * @num_hc_interrupters: number of host controller interrupters * @hsphy_interface: "utmi" or "ulpi" * @connected: true when we're connected to a host, false otherwise * @softconnect: true when gadget connect is called, false when disconnect runs @@ -1164,6 +1165,9 @@ struct dwc3_scratchpad_array { * @gsbuscfg0_reqinfo: store GSBUSCFG0.DATRDREQINFO, DESRDREQINFO, * DATWRREQINFO, and DESWRREQINFO value passed from * glue driver. + * @wakeup_pending_funcs: Indicates whether any interface has requested for + * function wakeup in bitmap format where bit position + * represents interface_id. */ struct dwc3 { struct work_struct drd_work; @@ -1330,6 +1334,7 @@ struct dwc3 { u8 tx_max_burst_prd; u8 tx_fifo_resize_max_num; u8 clear_stall_protocol; + u16 num_hc_interrupters; const char *hsphy_interface; @@ -1394,6 +1399,7 @@ struct dwc3 { int num_ep_resized; struct dentry *debug_root; u32 gsbuscfg0_reqinfo; + u32 wakeup_pending_funcs; }; #define INCRX_BURST_MODE 0 diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index de686b9e6404..e934f94e8fd8 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -145,6 +145,12 @@ static void dwc3_exynos_remove(struct platform_device *pdev) regulator_disable(exynos->vdd10); } +static const struct dwc3_exynos_driverdata exynos2200_drvdata = { + .clk_names = { "link_aclk" }, + .num_clks = 1, + .suspend_clk_idx = -1, +}; + static const struct dwc3_exynos_driverdata exynos5250_drvdata = { .clk_names = { "usbdrd30" }, .num_clks = 1, @@ -181,8 +187,17 @@ static const struct dwc3_exynos_driverdata gs101_drvdata = { .suspend_clk_idx = 1, }; +static const struct dwc3_exynos_driverdata exynosautov920_drvdata = { + .clk_names = { "ref", "susp_clk"}, + .num_clks = 2, + .suspend_clk_idx = 1, +}; + static const struct of_device_id exynos_dwc3_match[] = { { + .compatible = "samsung,exynos2200-dwusb3", + .data = &exynos2200_drvdata, + }, { .compatible = "samsung,exynos5250-dwusb3", .data = &exynos5250_drvdata, }, { @@ -198,6 +213,9 @@ static const struct of_device_id exynos_dwc3_match[] = { .compatible = "samsung,exynos850-dwusb3", .data = &exynos850_drvdata, }, { + .compatible = "samsung,exynosautov920-dwusb3", + .data = &exynosautov920_drvdata, + }, { .compatible = "google,gs101-dwusb3", .data = &gs101_drvdata, }, { diff --git a/drivers/usb/dwc3/dwc3-qcom-legacy.c b/drivers/usb/dwc3/dwc3-qcom-legacy.c new file mode 100644 index 000000000000..d3fad0fcfdac --- /dev/null +++ b/drivers/usb/dwc3/dwc3-qcom-legacy.c @@ -0,0 +1,935 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Inspired by dwc3-of-simple.c + */ + +#include <linux/cleanup.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/clk.h> +#include <linux/irq.h> +#include <linux/of_clk.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/extcon.h> +#include <linux/interconnect.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/usb/of.h> +#include <linux/reset.h> +#include <linux/iopoll.h> +#include <linux/usb/hcd.h> +#include <linux/usb.h> +#include "core.h" + +/* USB QSCRATCH Hardware registers */ +#define QSCRATCH_HS_PHY_CTRL 0x10 +#define UTMI_OTG_VBUS_VALID BIT(20) +#define SW_SESSVLD_SEL BIT(28) + +#define QSCRATCH_SS_PHY_CTRL 0x30 +#define LANE0_PWR_PRESENT BIT(24) + +#define QSCRATCH_GENERAL_CFG 0x08 +#define PIPE_UTMI_CLK_SEL BIT(0) +#define PIPE3_PHYSTATUS_SW BIT(3) +#define PIPE_UTMI_CLK_DIS BIT(8) + +#define PWR_EVNT_LPM_IN_L2_MASK BIT(4) +#define PWR_EVNT_LPM_OUT_L2_MASK BIT(5) + +#define SDM845_QSCRATCH_BASE_OFFSET 0xf8800 +#define SDM845_QSCRATCH_SIZE 0x400 +#define SDM845_DWC3_CORE_SIZE 0xcd00 + +/* Interconnect path bandwidths in MBps */ +#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240) +#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700) +#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000) +#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500) +#define APPS_USB_AVG_BW 0 +#define APPS_USB_PEAK_BW MBps_to_icc(40) + +/* Qualcomm SoCs with multiport support has up to 4 ports */ +#define DWC3_QCOM_MAX_PORTS 4 + +static const u32 pwr_evnt_irq_stat_reg[DWC3_QCOM_MAX_PORTS] = { + 0x58, + 0x1dc, + 0x228, + 0x238, +}; + +struct dwc3_qcom_port { + int qusb2_phy_irq; + int dp_hs_phy_irq; + int dm_hs_phy_irq; + int ss_phy_irq; + enum usb_device_speed usb2_speed; +}; + +struct dwc3_qcom { + struct device *dev; + void __iomem *qscratch_base; + struct platform_device *dwc3; + struct clk **clks; + int num_clocks; + struct reset_control *resets; + struct dwc3_qcom_port ports[DWC3_QCOM_MAX_PORTS]; + u8 num_ports; + + struct extcon_dev *edev; + struct extcon_dev *host_edev; + struct notifier_block vbus_nb; + struct notifier_block host_nb; + + enum usb_dr_mode mode; + bool is_suspended; + bool pm_suspended; + struct icc_path *icc_path_ddr; + struct icc_path *icc_path_apps; +}; + +static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg |= val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val) +{ + u32 reg; + + reg = readl(base + offset); + reg &= ~val; + writel(reg, base + offset); + + /* ensure that above write is through */ + readl(base + offset); +} + +static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable) +{ + if (enable) { + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, + LANE0_PWR_PRESENT); + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL, + UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL); + } else { + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL, + LANE0_PWR_PRESENT); + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_HS_PHY_CTRL, + UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL); + } +} + +static int dwc3_qcom_vbus_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb); + + /* enable vbus override for device mode */ + dwc3_qcom_vbus_override_enable(qcom, event); + qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST; + + return NOTIFY_DONE; +} + +static int dwc3_qcom_host_notifier(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb); + + /* disable vbus override in host mode */ + dwc3_qcom_vbus_override_enable(qcom, !event); + qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL; + + return NOTIFY_DONE; +} + +static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom) +{ + struct device *dev = qcom->dev; + struct extcon_dev *host_edev; + int ret; + + if (!of_property_present(dev->of_node, "extcon")) + return 0; + + qcom->edev = extcon_get_edev_by_phandle(dev, 0); + if (IS_ERR(qcom->edev)) + return dev_err_probe(dev, PTR_ERR(qcom->edev), + "Failed to get extcon\n"); + + qcom->vbus_nb.notifier_call = dwc3_qcom_vbus_notifier; + + qcom->host_edev = extcon_get_edev_by_phandle(dev, 1); + if (IS_ERR(qcom->host_edev)) + qcom->host_edev = NULL; + + ret = devm_extcon_register_notifier(dev, qcom->edev, EXTCON_USB, + &qcom->vbus_nb); + if (ret < 0) { + dev_err(dev, "VBUS notifier register failed\n"); + return ret; + } + + if (qcom->host_edev) + host_edev = qcom->host_edev; + else + host_edev = qcom->edev; + + qcom->host_nb.notifier_call = dwc3_qcom_host_notifier; + ret = devm_extcon_register_notifier(dev, host_edev, EXTCON_USB_HOST, + &qcom->host_nb); + if (ret < 0) { + dev_err(dev, "Host notifier register failed\n"); + return ret; + } + + /* Update initial VBUS override based on extcon state */ + if (extcon_get_state(qcom->edev, EXTCON_USB) || + !extcon_get_state(host_edev, EXTCON_USB_HOST)) + dwc3_qcom_vbus_notifier(&qcom->vbus_nb, true, qcom->edev); + else + dwc3_qcom_vbus_notifier(&qcom->vbus_nb, false, qcom->edev); + + return 0; +} + +static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom) +{ + int ret; + + ret = icc_enable(qcom->icc_path_ddr); + if (ret) + return ret; + + ret = icc_enable(qcom->icc_path_apps); + if (ret) + icc_disable(qcom->icc_path_ddr); + + return ret; +} + +static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom) +{ + int ret; + + ret = icc_disable(qcom->icc_path_ddr); + if (ret) + return ret; + + ret = icc_disable(qcom->icc_path_apps); + if (ret) + icc_enable(qcom->icc_path_ddr); + + return ret; +} + +/** + * dwc3_qcom_interconnect_init() - Get interconnect path handles + * and set bandwidth. + * @qcom: Pointer to the concerned usb core. + * + */ +static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) +{ + enum usb_device_speed max_speed; + struct device *dev = qcom->dev; + int ret; + + qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr"); + if (IS_ERR(qcom->icc_path_ddr)) { + return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr), + "failed to get usb-ddr path\n"); + } + + qcom->icc_path_apps = of_icc_get(dev, "apps-usb"); + if (IS_ERR(qcom->icc_path_apps)) { + ret = dev_err_probe(dev, PTR_ERR(qcom->icc_path_apps), + "failed to get apps-usb path\n"); + goto put_path_ddr; + } + + max_speed = usb_get_maximum_speed(&qcom->dwc3->dev); + if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) { + ret = icc_set_bw(qcom->icc_path_ddr, + USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW); + } else { + ret = icc_set_bw(qcom->icc_path_ddr, + USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW); + } + if (ret) { + dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret); + goto put_path_apps; + } + + ret = icc_set_bw(qcom->icc_path_apps, APPS_USB_AVG_BW, APPS_USB_PEAK_BW); + if (ret) { + dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret); + goto put_path_apps; + } + + return 0; + +put_path_apps: + icc_put(qcom->icc_path_apps); +put_path_ddr: + icc_put(qcom->icc_path_ddr); + return ret; +} + +/** + * dwc3_qcom_interconnect_exit() - Release interconnect path handles + * @qcom: Pointer to the concerned usb core. + * + * This function is used to release interconnect path handle. + */ +static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) +{ + icc_put(qcom->icc_path_ddr); + icc_put(qcom->icc_path_apps); +} + +/* Only usable in contexts where the role can not change. */ +static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) +{ + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; + + return dwc->xhci; +} + +static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom, int port_index) +{ + struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct usb_device *udev; + struct usb_hcd __maybe_unused *hcd; + + /* + * FIXME: Fix this layering violation. + */ + hcd = platform_get_drvdata(dwc->xhci); + +#ifdef CONFIG_USB + udev = usb_hub_find_child(hcd->self.root_hub, port_index + 1); +#else + udev = NULL; +#endif + if (!udev) + return USB_SPEED_UNKNOWN; + + return udev->speed; +} + +static void dwc3_qcom_enable_wakeup_irq(int irq, unsigned int polarity) +{ + if (!irq) + return; + + if (polarity) + irq_set_irq_type(irq, polarity); + + enable_irq(irq); + enable_irq_wake(irq); +} + +static void dwc3_qcom_disable_wakeup_irq(int irq) +{ + if (!irq) + return; + + disable_irq_wake(irq); + disable_irq_nosync(irq); +} + +static void dwc3_qcom_disable_port_interrupts(struct dwc3_qcom_port *port) +{ + dwc3_qcom_disable_wakeup_irq(port->qusb2_phy_irq); + + if (port->usb2_speed == USB_SPEED_LOW) { + dwc3_qcom_disable_wakeup_irq(port->dm_hs_phy_irq); + } else if ((port->usb2_speed == USB_SPEED_HIGH) || + (port->usb2_speed == USB_SPEED_FULL)) { + dwc3_qcom_disable_wakeup_irq(port->dp_hs_phy_irq); + } else { + dwc3_qcom_disable_wakeup_irq(port->dp_hs_phy_irq); + dwc3_qcom_disable_wakeup_irq(port->dm_hs_phy_irq); + } + + dwc3_qcom_disable_wakeup_irq(port->ss_phy_irq); +} + +static void dwc3_qcom_enable_port_interrupts(struct dwc3_qcom_port *port) +{ + dwc3_qcom_enable_wakeup_irq(port->qusb2_phy_irq, 0); + + /* + * Configure DP/DM line interrupts based on the USB2 device attached to + * the root hub port. When HS/FS device is connected, configure the DP line + * as falling edge to detect both disconnect and remote wakeup scenarios. When + * LS device is connected, configure DM line as falling edge to detect both + * disconnect and remote wakeup. When no device is connected, configure both + * DP and DM lines as rising edge to detect HS/HS/LS device connect scenario. + */ + + if (port->usb2_speed == USB_SPEED_LOW) { + dwc3_qcom_enable_wakeup_irq(port->dm_hs_phy_irq, + IRQ_TYPE_EDGE_FALLING); + } else if ((port->usb2_speed == USB_SPEED_HIGH) || + (port->usb2_speed == USB_SPEED_FULL)) { + dwc3_qcom_enable_wakeup_irq(port->dp_hs_phy_irq, + IRQ_TYPE_EDGE_FALLING); + } else { + dwc3_qcom_enable_wakeup_irq(port->dp_hs_phy_irq, + IRQ_TYPE_EDGE_RISING); + dwc3_qcom_enable_wakeup_irq(port->dm_hs_phy_irq, + IRQ_TYPE_EDGE_RISING); + } + + dwc3_qcom_enable_wakeup_irq(port->ss_phy_irq, 0); +} + +static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) +{ + int i; + + for (i = 0; i < qcom->num_ports; i++) + dwc3_qcom_disable_port_interrupts(&qcom->ports[i]); +} + +static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom) +{ + int i; + + for (i = 0; i < qcom->num_ports; i++) + dwc3_qcom_enable_port_interrupts(&qcom->ports[i]); +} + +static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup) +{ + u32 val; + int i, ret; + + if (qcom->is_suspended) + return 0; + + for (i = 0; i < qcom->num_ports; i++) { + val = readl(qcom->qscratch_base + pwr_evnt_irq_stat_reg[i]); + if (!(val & PWR_EVNT_LPM_IN_L2_MASK)) + dev_err(qcom->dev, "port-%d HS-PHY not in L2\n", i + 1); + } + + for (i = qcom->num_clocks - 1; i >= 0; i--) + clk_disable_unprepare(qcom->clks[i]); + + ret = dwc3_qcom_interconnect_disable(qcom); + if (ret) + dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret); + + /* + * The role is stable during suspend as role switching is done from a + * freezable workqueue. + */ + if (dwc3_qcom_is_host(qcom) && wakeup) { + for (i = 0; i < qcom->num_ports; i++) + qcom->ports[i].usb2_speed = dwc3_qcom_read_usb2_speed(qcom, i); + dwc3_qcom_enable_interrupts(qcom); + } + + qcom->is_suspended = true; + + return 0; +} + +static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup) +{ + int ret; + int i; + + if (!qcom->is_suspended) + return 0; + + if (dwc3_qcom_is_host(qcom) && wakeup) + dwc3_qcom_disable_interrupts(qcom); + + for (i = 0; i < qcom->num_clocks; i++) { + ret = clk_prepare_enable(qcom->clks[i]); + if (ret < 0) { + while (--i >= 0) + clk_disable_unprepare(qcom->clks[i]); + return ret; + } + } + + ret = dwc3_qcom_interconnect_enable(qcom); + if (ret) + dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret); + + /* Clear existing events from PHY related to L2 in/out */ + for (i = 0; i < qcom->num_ports; i++) { + dwc3_qcom_setbits(qcom->qscratch_base, + pwr_evnt_irq_stat_reg[i], + PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK); + } + + qcom->is_suspended = false; + + return 0; +} + +static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data) +{ + struct dwc3_qcom *qcom = data; + struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + + /* If pm_suspended then let pm_resume take care of resuming h/w */ + if (qcom->pm_suspended) + return IRQ_HANDLED; + + /* + * This is safe as role switching is done from a freezable workqueue + * and the wakeup interrupts are disabled as part of resume. + */ + if (dwc3_qcom_is_host(qcom)) + pm_runtime_resume(&dwc->xhci->dev); + + return IRQ_HANDLED; +} + +static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom) +{ + /* Configure dwc3 to use UTMI clock as PIPE clock not present */ + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, + PIPE_UTMI_CLK_DIS); + + usleep_range(100, 1000); + + dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, + PIPE_UTMI_CLK_SEL | PIPE3_PHYSTATUS_SW); + + usleep_range(100, 1000); + + dwc3_qcom_clrbits(qcom->qscratch_base, QSCRATCH_GENERAL_CFG, + PIPE_UTMI_CLK_DIS); +} + +static int dwc3_qcom_request_irq(struct dwc3_qcom *qcom, int irq, + const char *name) +{ + int ret; + + /* Keep wakeup interrupts disabled until suspend */ + ret = devm_request_threaded_irq(qcom->dev, irq, NULL, + qcom_dwc3_resume_irq, + IRQF_ONESHOT | IRQF_NO_AUTOEN, + name, qcom); + if (ret) + dev_err(qcom->dev, "failed to request irq %s: %d\n", name, ret); + + return ret; +} + +static int dwc3_qcom_setup_port_irq(struct platform_device *pdev, int port_index, bool is_multiport) +{ + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + const char *irq_name; + int irq; + int ret; + + if (is_multiport) + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dp_hs_phy_%d", port_index + 1); + else + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dp_hs_phy_irq"); + if (!irq_name) + return -ENOMEM; + + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq > 0) { + ret = dwc3_qcom_request_irq(qcom, irq, irq_name); + if (ret) + return ret; + qcom->ports[port_index].dp_hs_phy_irq = irq; + } + + if (is_multiport) + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dm_hs_phy_%d", port_index + 1); + else + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "dm_hs_phy_irq"); + if (!irq_name) + return -ENOMEM; + + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq > 0) { + ret = dwc3_qcom_request_irq(qcom, irq, irq_name); + if (ret) + return ret; + qcom->ports[port_index].dm_hs_phy_irq = irq; + } + + if (is_multiport) + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ss_phy_%d", port_index + 1); + else + irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ss_phy_irq"); + if (!irq_name) + return -ENOMEM; + + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq > 0) { + ret = dwc3_qcom_request_irq(qcom, irq, irq_name); + if (ret) + return ret; + qcom->ports[port_index].ss_phy_irq = irq; + } + + if (is_multiport) + return 0; + + irq = platform_get_irq_byname_optional(pdev, "qusb2_phy"); + if (irq > 0) { + ret = dwc3_qcom_request_irq(qcom, irq, "qusb2_phy"); + if (ret) + return ret; + qcom->ports[port_index].qusb2_phy_irq = irq; + } + + return 0; +} + +static int dwc3_qcom_find_num_ports(struct platform_device *pdev) +{ + char irq_name[14]; + int port_num; + int irq; + + irq = platform_get_irq_byname_optional(pdev, "dp_hs_phy_1"); + if (irq <= 0) + return 1; + + for (port_num = 2; port_num <= DWC3_QCOM_MAX_PORTS; port_num++) { + sprintf(irq_name, "dp_hs_phy_%d", port_num); + + irq = platform_get_irq_byname_optional(pdev, irq_name); + if (irq <= 0) + return port_num - 1; + } + + return DWC3_QCOM_MAX_PORTS; +} + +static int dwc3_qcom_setup_irq(struct platform_device *pdev) +{ + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + bool is_multiport; + int ret; + int i; + + qcom->num_ports = dwc3_qcom_find_num_ports(pdev); + is_multiport = (qcom->num_ports > 1); + + for (i = 0; i < qcom->num_ports; i++) { + ret = dwc3_qcom_setup_port_irq(pdev, i, is_multiport); + if (ret) + return ret; + } + + return 0; +} + +static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count) +{ + struct device *dev = qcom->dev; + struct device_node *np = dev->of_node; + int i; + + if (!np || !count) + return 0; + + if (count < 0) + return count; + + qcom->num_clocks = count; + + qcom->clks = devm_kcalloc(dev, qcom->num_clocks, + sizeof(struct clk *), GFP_KERNEL); + if (!qcom->clks) + return -ENOMEM; + + for (i = 0; i < qcom->num_clocks; i++) { + struct clk *clk; + int ret; + + clk = of_clk_get(np, i); + if (IS_ERR(clk)) { + while (--i >= 0) + clk_put(qcom->clks[i]); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret < 0) { + while (--i >= 0) { + clk_disable_unprepare(qcom->clks[i]); + clk_put(qcom->clks[i]); + } + clk_put(clk); + + return ret; + } + + qcom->clks[i] = clk; + } + + return 0; +} + +static int dwc3_qcom_of_register_core(struct platform_device *pdev) +{ + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + int ret; + + struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np, + "snps,dwc3"); + if (!dwc3_np) { + dev_err(dev, "failed to find dwc3 core child\n"); + return -ENODEV; + } + + ret = of_platform_populate(np, NULL, NULL, dev); + if (ret) { + dev_err(dev, "failed to register dwc3 core - %d\n", ret); + return ret; + } + + qcom->dwc3 = of_find_device_by_node(dwc3_np); + if (!qcom->dwc3) { + ret = -ENODEV; + dev_err(dev, "failed to get dwc3 platform device\n"); + of_platform_depopulate(dev); + } + + return ret; +} + +static int dwc3_qcom_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct dwc3_qcom *qcom; + int ret, i; + bool ignore_pipe_clk; + bool wakeup_source; + + qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL); + if (!qcom) + return -ENOMEM; + + platform_set_drvdata(pdev, qcom); + qcom->dev = &pdev->dev; + + qcom->resets = devm_reset_control_array_get_optional_exclusive(dev); + if (IS_ERR(qcom->resets)) { + return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets), + "failed to get resets\n"); + } + + ret = reset_control_assert(qcom->resets); + if (ret) { + dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret); + return ret; + } + + usleep_range(10, 1000); + + ret = reset_control_deassert(qcom->resets); + if (ret) { + dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret); + goto reset_assert; + } + + ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np)); + if (ret) { + dev_err_probe(dev, ret, "failed to get clocks\n"); + goto reset_assert; + } + + qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(qcom->qscratch_base)) { + ret = PTR_ERR(qcom->qscratch_base); + goto clk_disable; + } + + ret = dwc3_qcom_setup_irq(pdev); + if (ret) { + dev_err(dev, "failed to setup IRQs, err=%d\n", ret); + goto clk_disable; + } + + /* + * Disable pipe_clk requirement if specified. Used when dwc3 + * operates without SSPHY and only HS/FS/LS modes are supported. + */ + ignore_pipe_clk = device_property_read_bool(dev, + "qcom,select-utmi-as-pipe-clk"); + if (ignore_pipe_clk) + dwc3_qcom_select_utmi_clk(qcom); + + ret = dwc3_qcom_of_register_core(pdev); + if (ret) { + dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret); + goto clk_disable; + } + + ret = dwc3_qcom_interconnect_init(qcom); + if (ret) + goto depopulate; + + qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); + + /* enable vbus override for device mode */ + if (qcom->mode != USB_DR_MODE_HOST) + dwc3_qcom_vbus_override_enable(qcom, true); + + /* register extcon to override sw_vbus on Vbus change later */ + ret = dwc3_qcom_register_extcon(qcom); + if (ret) + goto interconnect_exit; + + wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source"); + device_init_wakeup(&pdev->dev, wakeup_source); + device_init_wakeup(&qcom->dwc3->dev, wakeup_source); + + qcom->is_suspended = false; + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_forbid(dev); + + return 0; + +interconnect_exit: + dwc3_qcom_interconnect_exit(qcom); +depopulate: + of_platform_depopulate(&pdev->dev); + platform_device_put(qcom->dwc3); +clk_disable: + for (i = qcom->num_clocks - 1; i >= 0; i--) { + clk_disable_unprepare(qcom->clks[i]); + clk_put(qcom->clks[i]); + } +reset_assert: + reset_control_assert(qcom->resets); + + return ret; +} + +static void dwc3_qcom_remove(struct platform_device *pdev) +{ + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int i; + + of_platform_depopulate(&pdev->dev); + platform_device_put(qcom->dwc3); + + for (i = qcom->num_clocks - 1; i >= 0; i--) { + clk_disable_unprepare(qcom->clks[i]); + clk_put(qcom->clks[i]); + } + qcom->num_clocks = 0; + + dwc3_qcom_interconnect_exit(qcom); + reset_control_assert(qcom->resets); + + pm_runtime_allow(dev); + pm_runtime_disable(dev); +} + +static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) +{ + struct dwc3_qcom *qcom = dev_get_drvdata(dev); + bool wakeup = device_may_wakeup(dev); + int ret; + + ret = dwc3_qcom_suspend(qcom, wakeup); + if (ret) + return ret; + + qcom->pm_suspended = true; + + return 0; +} + +static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) +{ + struct dwc3_qcom *qcom = dev_get_drvdata(dev); + bool wakeup = device_may_wakeup(dev); + int ret; + + ret = dwc3_qcom_resume(qcom, wakeup); + if (ret) + return ret; + + qcom->pm_suspended = false; + + return 0; +} + +static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev) +{ + struct dwc3_qcom *qcom = dev_get_drvdata(dev); + + return dwc3_qcom_suspend(qcom, true); +} + +static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev) +{ + struct dwc3_qcom *qcom = dev_get_drvdata(dev); + + return dwc3_qcom_resume(qcom, true); +} + +static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) + SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume, + NULL) +}; + +static const struct of_device_id dwc3_qcom_of_match[] = { + { .compatible = "qcom,dwc3" }, + { } +}; +MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match); + +static struct platform_driver dwc3_qcom_driver = { + .probe = dwc3_qcom_probe, + .remove = dwc3_qcom_remove, + .driver = { + .name = "dwc3-qcom-legacy", + .pm = &dwc3_qcom_dev_pm_ops, + .of_match_table = dwc3_qcom_of_match, + }, +}; + +module_platform_driver(dwc3_qcom_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DesignWare DWC3 QCOM legacy glue Driver"); diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 58683bb672e9..7334de85ad10 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -4,7 +4,6 @@ * Inspired by dwc3-of-simple.c */ -#include <linux/cleanup.h> #include <linux/io.h> #include <linux/of.h> #include <linux/clk.h> @@ -14,7 +13,6 @@ #include <linux/kernel.h> #include <linux/extcon.h> #include <linux/interconnect.h> -#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/usb/of.h> @@ -23,6 +21,7 @@ #include <linux/usb/hcd.h> #include <linux/usb.h> #include "core.h" +#include "glue.h" /* USB QSCRATCH Hardware registers */ #define QSCRATCH_HS_PHY_CTRL 0x10 @@ -73,8 +72,8 @@ struct dwc3_qcom_port { struct dwc3_qcom { struct device *dev; void __iomem *qscratch_base; - struct platform_device *dwc3; - struct clk **clks; + struct dwc3 dwc; + struct clk_bulk_data *clks; int num_clocks; struct reset_control *resets; struct dwc3_qcom_port ports[DWC3_QCOM_MAX_PORTS]; @@ -92,6 +91,8 @@ struct dwc3_qcom { struct icc_path *icc_path_apps; }; +#define to_dwc3_qcom(d) container_of((d), struct dwc3_qcom, dwc) + static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val) { u32 reg; @@ -116,6 +117,11 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val) readl(base + offset); } +/* + * TODO: Make the in-core role switching code invoke dwc3_qcom_vbus_override_enable(), + * validate that the in-core extcon support is functional, and drop extcon + * handling from the glue + */ static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable) { if (enable) { @@ -260,7 +266,7 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) goto put_path_ddr; } - max_speed = usb_get_maximum_speed(&qcom->dwc3->dev); + max_speed = usb_get_maximum_speed(qcom->dwc.dev); if (max_speed >= USB_SPEED_SUPER || max_speed == USB_SPEED_UNKNOWN) { ret = icc_set_bw(qcom->icc_path_ddr, USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW); @@ -303,25 +309,14 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc; - - /* - * FIXME: Fix this layering violation. - */ - dwc = platform_get_drvdata(qcom->dwc3); - - /* Core driver may not have probed yet. */ - if (!dwc) - return false; - - return dwc->xhci; + return qcom->dwc.xhci; } static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom, int port_index) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); struct usb_device *udev; struct usb_hcd __maybe_unused *hcd; + struct dwc3 *dwc = &qcom->dwc; /* * FIXME: Fix this layering violation. @@ -436,9 +431,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup) if (!(val & PWR_EVNT_LPM_IN_L2_MASK)) dev_err(qcom->dev, "port-%d HS-PHY not in L2\n", i + 1); } - - for (i = qcom->num_clocks - 1; i >= 0; i--) - clk_disable_unprepare(qcom->clks[i]); + clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks); ret = dwc3_qcom_interconnect_disable(qcom); if (ret) @@ -470,14 +463,9 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup) if (dwc3_qcom_is_host(qcom) && wakeup) dwc3_qcom_disable_interrupts(qcom); - for (i = 0; i < qcom->num_clocks; i++) { - ret = clk_prepare_enable(qcom->clks[i]); - if (ret < 0) { - while (--i >= 0) - clk_disable_unprepare(qcom->clks[i]); - return ret; - } - } + ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks); + if (ret < 0) + return ret; ret = dwc3_qcom_interconnect_enable(qcom); if (ret) @@ -498,7 +486,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup) static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data) { struct dwc3_qcom *qcom = data; - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc = &qcom->dwc; /* If pm_suspended then let pm_resume take care of resuming h/w */ if (qcom->pm_suspended) @@ -547,9 +535,10 @@ static int dwc3_qcom_request_irq(struct dwc3_qcom *qcom, int irq, return ret; } -static int dwc3_qcom_setup_port_irq(struct platform_device *pdev, int port_index, bool is_multiport) +static int dwc3_qcom_setup_port_irq(struct dwc3_qcom *qcom, + struct platform_device *pdev, + int port_index, bool is_multiport) { - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); const char *irq_name; int irq; int ret; @@ -634,9 +623,8 @@ static int dwc3_qcom_find_num_ports(struct platform_device *pdev) return DWC3_QCOM_MAX_PORTS; } -static int dwc3_qcom_setup_irq(struct platform_device *pdev) +static int dwc3_qcom_setup_irq(struct dwc3_qcom *qcom, struct platform_device *pdev) { - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); bool is_multiport; int ret; int i; @@ -645,7 +633,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) is_multiport = (qcom->num_ports > 1); for (i = 0; i < qcom->num_ports; i++) { - ret = dwc3_qcom_setup_port_irq(pdev, i, is_multiport); + ret = dwc3_qcom_setup_port_irq(qcom, pdev, i, is_multiport); if (ret) return ret; } @@ -653,89 +641,14 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) return 0; } -static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count) -{ - struct device *dev = qcom->dev; - struct device_node *np = dev->of_node; - int i; - - if (!np || !count) - return 0; - - if (count < 0) - return count; - - qcom->num_clocks = count; - - qcom->clks = devm_kcalloc(dev, qcom->num_clocks, - sizeof(struct clk *), GFP_KERNEL); - if (!qcom->clks) - return -ENOMEM; - - for (i = 0; i < qcom->num_clocks; i++) { - struct clk *clk; - int ret; - - clk = of_clk_get(np, i); - if (IS_ERR(clk)) { - while (--i >= 0) - clk_put(qcom->clks[i]); - return PTR_ERR(clk); - } - - ret = clk_prepare_enable(clk); - if (ret < 0) { - while (--i >= 0) { - clk_disable_unprepare(qcom->clks[i]); - clk_put(qcom->clks[i]); - } - clk_put(clk); - - return ret; - } - - qcom->clks[i] = clk; - } - - return 0; -} - -static int dwc3_qcom_of_register_core(struct platform_device *pdev) -{ - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - int ret; - - struct device_node *dwc3_np __free(device_node) = of_get_compatible_child(np, - "snps,dwc3"); - if (!dwc3_np) { - dev_err(dev, "failed to find dwc3 core child\n"); - return -ENODEV; - } - - ret = of_platform_populate(np, NULL, NULL, dev); - if (ret) { - dev_err(dev, "failed to register dwc3 core - %d\n", ret); - return ret; - } - - qcom->dwc3 = of_find_device_by_node(dwc3_np); - if (!qcom->dwc3) { - ret = -ENODEV; - dev_err(dev, "failed to get dwc3 platform device\n"); - of_platform_depopulate(dev); - } - - return ret; -} - static int dwc3_qcom_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct dwc3_probe_data probe_data = {}; struct device *dev = &pdev->dev; struct dwc3_qcom *qcom; - int ret, i; + struct resource res; + struct resource *r; + int ret; bool ignore_pipe_clk; bool wakeup_source; @@ -743,7 +656,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev) if (!qcom) return -ENOMEM; - platform_set_drvdata(pdev, qcom); qcom->dev = &pdev->dev; qcom->resets = devm_reset_control_array_get_optional_exclusive(dev); @@ -752,6 +664,11 @@ static int dwc3_qcom_probe(struct platform_device *pdev) "failed to get resets\n"); } + ret = devm_clk_bulk_get_all(&pdev->dev, &qcom->clks); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get clocks\n"); + qcom->num_clocks = ret; + ret = reset_control_assert(qcom->resets); if (ret) { dev_err(&pdev->dev, "failed to assert resets, err=%d\n", ret); @@ -766,19 +683,26 @@ static int dwc3_qcom_probe(struct platform_device *pdev) goto reset_assert; } - ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np)); - if (ret) { - dev_err_probe(dev, ret, "failed to get clocks\n"); + ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks); + if (ret < 0) goto reset_assert; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + ret = -EINVAL; + goto clk_disable; } + res = *r; + res.end = res.start + SDM845_QSCRATCH_BASE_OFFSET; - qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(qcom->qscratch_base)) { - ret = PTR_ERR(qcom->qscratch_base); + qcom->qscratch_base = devm_ioremap(dev, res.end, SDM845_QSCRATCH_SIZE); + if (!qcom->qscratch_base) { + dev_err(dev, "failed to map qscratch region\n"); + ret = -ENOMEM; goto clk_disable; } - ret = dwc3_qcom_setup_irq(pdev); + ret = dwc3_qcom_setup_irq(qcom, pdev); if (ret) { dev_err(dev, "failed to setup IRQs, err=%d\n", ret); goto clk_disable; @@ -793,17 +717,21 @@ static int dwc3_qcom_probe(struct platform_device *pdev) if (ignore_pipe_clk) dwc3_qcom_select_utmi_clk(qcom); - ret = dwc3_qcom_of_register_core(pdev); - if (ret) { - dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret); + qcom->dwc.dev = dev; + probe_data.dwc = &qcom->dwc; + probe_data.res = &res; + probe_data.ignore_clocks_and_resets = true; + ret = dwc3_core_probe(&probe_data); + if (ret) { + ret = dev_err_probe(dev, ret, "failed to register DWC3 Core\n"); goto clk_disable; } ret = dwc3_qcom_interconnect_init(qcom); if (ret) - goto depopulate; + goto remove_core; - qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); + qcom->mode = usb_get_dr_mode(dev); /* enable vbus override for device mode */ if (qcom->mode != USB_DR_MODE_HOST) @@ -816,25 +744,17 @@ static int dwc3_qcom_probe(struct platform_device *pdev) wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source"); device_init_wakeup(&pdev->dev, wakeup_source); - device_init_wakeup(&qcom->dwc3->dev, wakeup_source); qcom->is_suspended = false; - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - pm_runtime_forbid(dev); return 0; interconnect_exit: dwc3_qcom_interconnect_exit(qcom); -depopulate: - of_platform_depopulate(&pdev->dev); - platform_device_put(qcom->dwc3); +remove_core: + dwc3_core_remove(&qcom->dwc); clk_disable: - for (i = qcom->num_clocks - 1; i >= 0; i--) { - clk_disable_unprepare(qcom->clks[i]); - clk_put(qcom->clks[i]); - } + clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks); reset_assert: reset_control_assert(qcom->resets); @@ -843,32 +763,28 @@ reset_assert: static void dwc3_qcom_remove(struct platform_device *pdev) { - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); - struct device *dev = &pdev->dev; - int i; + struct dwc3 *dwc = platform_get_drvdata(pdev); + struct dwc3_qcom *qcom = to_dwc3_qcom(dwc); - of_platform_depopulate(&pdev->dev); - platform_device_put(qcom->dwc3); + dwc3_core_remove(&qcom->dwc); - for (i = qcom->num_clocks - 1; i >= 0; i--) { - clk_disable_unprepare(qcom->clks[i]); - clk_put(qcom->clks[i]); - } - qcom->num_clocks = 0; + clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks); dwc3_qcom_interconnect_exit(qcom); reset_control_assert(qcom->resets); - - pm_runtime_allow(dev); - pm_runtime_disable(dev); } -static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) +static int dwc3_qcom_pm_suspend(struct device *dev) { - struct dwc3_qcom *qcom = dev_get_drvdata(dev); + struct dwc3 *dwc = dev_get_drvdata(dev); + struct dwc3_qcom *qcom = to_dwc3_qcom(dwc); bool wakeup = device_may_wakeup(dev); int ret; + ret = dwc3_pm_suspend(&qcom->dwc); + if (ret) + return ret; + ret = dwc3_qcom_suspend(qcom, wakeup); if (ret) return ret; @@ -878,9 +794,10 @@ static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev) return 0; } -static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) +static int dwc3_qcom_pm_resume(struct device *dev) { - struct dwc3_qcom *qcom = dev_get_drvdata(dev); + struct dwc3 *dwc = dev_get_drvdata(dev); + struct dwc3_qcom *qcom = to_dwc3_qcom(dwc); bool wakeup = device_may_wakeup(dev); int ret; @@ -890,31 +807,68 @@ static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev) qcom->pm_suspended = false; + ret = dwc3_pm_resume(&qcom->dwc); + if (ret) + return ret; + return 0; } -static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev) +static void dwc3_qcom_complete(struct device *dev) { - struct dwc3_qcom *qcom = dev_get_drvdata(dev); + struct dwc3 *dwc = dev_get_drvdata(dev); + + dwc3_pm_complete(dwc); +} + +static int dwc3_qcom_prepare(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + + return dwc3_pm_prepare(dwc); +} + +static int dwc3_qcom_runtime_suspend(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + struct dwc3_qcom *qcom = to_dwc3_qcom(dwc); + int ret; + + ret = dwc3_runtime_suspend(&qcom->dwc); + if (ret) + return ret; return dwc3_qcom_suspend(qcom, true); } -static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev) +static int dwc3_qcom_runtime_resume(struct device *dev) { - struct dwc3_qcom *qcom = dev_get_drvdata(dev); + struct dwc3 *dwc = dev_get_drvdata(dev); + struct dwc3_qcom *qcom = to_dwc3_qcom(dwc); + int ret; - return dwc3_qcom_resume(qcom, true); + ret = dwc3_qcom_resume(qcom, true); + if (ret) + return ret; + + return dwc3_runtime_resume(&qcom->dwc); +} + +static int dwc3_qcom_runtime_idle(struct device *dev) +{ + return dwc3_runtime_idle(dev_get_drvdata(dev)); } static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) - SET_RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume, - NULL) + SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume) + RUNTIME_PM_OPS(dwc3_qcom_runtime_suspend, dwc3_qcom_runtime_resume, + dwc3_qcom_runtime_idle) + .complete = pm_sleep_ptr(dwc3_qcom_complete), + .prepare = pm_sleep_ptr(dwc3_qcom_prepare), }; static const struct of_device_id dwc3_qcom_of_match[] = { - { .compatible = "qcom,dwc3" }, + { .compatible = "qcom,snps-dwc3" }, { } }; MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match); @@ -924,7 +878,7 @@ static struct platform_driver dwc3_qcom_driver = { .remove = dwc3_qcom_remove, .driver = { .name = "dwc3-qcom", - .pm = &dwc3_qcom_dev_pm_ops, + .pm = pm_ptr(&dwc3_qcom_dev_pm_ops), .of_match_table = dwc3_qcom_of_match, }, }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8c30d86cc4e3..321361288935 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -276,8 +276,6 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, return ret; } -static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async); - /** * dwc3_send_gadget_ep_cmd - issue an endpoint command * @dep: the endpoint to which the command is going to be issued @@ -2359,10 +2357,8 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g) return __dwc3_gadget_get_frame(dwc); } -static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) +static int __dwc3_gadget_wakeup(struct dwc3 *dwc) { - int retries; - int ret; u32 reg; @@ -2390,8 +2386,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) return -EINVAL; } - if (async) - dwc3_gadget_enable_linksts_evts(dwc, true); + dwc3_gadget_enable_linksts_evts(dwc, true); ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV); if (ret < 0) { @@ -2410,27 +2405,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc, bool async) /* * Since link status change events are enabled we will receive - * an U0 event when wakeup is successful. So bail out. + * an U0 event when wakeup is successful. */ - if (async) - return 0; - - /* poll until Link State changes to ON */ - retries = 20000; - - while (retries--) { - reg = dwc3_readl(dwc->regs, DWC3_DSTS); - - /* in HS, means ON */ - if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0) - break; - } - - if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) { - dev_err(dwc->dev, "failed to send remote wakeup\n"); - return -EINVAL; - } - return 0; } @@ -2451,7 +2427,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g) spin_unlock_irqrestore(&dwc->lock, flags); return -EINVAL; } - ret = __dwc3_gadget_wakeup(dwc, true); + ret = __dwc3_gadget_wakeup(dwc); spin_unlock_irqrestore(&dwc->lock, flags); @@ -2479,14 +2455,10 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id) */ link_state = dwc3_gadget_get_link_state(dwc); if (link_state == DWC3_LINK_STATE_U3) { - ret = __dwc3_gadget_wakeup(dwc, false); - if (ret) { - spin_unlock_irqrestore(&dwc->lock, flags); - return -EINVAL; - } - dwc3_resume_gadget(dwc); - dwc->suspended = false; - dwc->link_state = DWC3_LINK_STATE_U0; + dwc->wakeup_pending_funcs |= BIT(intf_id); + ret = __dwc3_gadget_wakeup(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + return ret; } ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION, @@ -4353,6 +4325,8 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, { enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK; unsigned int pwropt; + int ret; + int intf_id; /* * WORKAROUND: DWC3 < 2.50a have an issue when configured without @@ -4428,7 +4402,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, switch (next) { case DWC3_LINK_STATE_U0: - if (dwc->gadget->wakeup_armed) { + if (dwc->gadget->wakeup_armed || dwc->wakeup_pending_funcs) { dwc3_gadget_enable_linksts_evts(dwc, false); dwc3_resume_gadget(dwc); dwc->suspended = false; @@ -4451,6 +4425,18 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, } dwc->link_state = next; + + /* Proceed with func wakeup if any interfaces that has requested */ + while (dwc->wakeup_pending_funcs && (next == DWC3_LINK_STATE_U0)) { + intf_id = ffs(dwc->wakeup_pending_funcs) - 1; + ret = dwc3_send_gadget_generic_command(dwc, DWC3_DGCMD_DEV_NOTIFICATION, + DWC3_DGCMDPAR_DN_FUNC_WAKE | + DWC3_DGCMDPAR_INTF_SEL(intf_id)); + if (ret) + dev_err(dwc->dev, "Failed to send DN wake for intf %d\n", intf_id); + + dwc->wakeup_pending_funcs &= ~BIT(intf_id); + } } static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, diff --git a/drivers/usb/dwc3/glue.h b/drivers/usb/dwc3/glue.h new file mode 100644 index 000000000000..2efd00e763be --- /dev/null +++ b/drivers/usb/dwc3/glue.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * glue.h - DesignWare USB3 DRD glue header + */ + +#ifndef __DRIVERS_USB_DWC3_GLUE_H +#define __DRIVERS_USB_DWC3_GLUE_H + +#include <linux/types.h> +#include "core.h" + +/** + * dwc3_probe_data: Initialization parameters passed to dwc3_core_probe() + * @dwc: Reference to dwc3 context structure + * @res: resource for the DWC3 core mmio region + * @ignore_clocks_and_resets: clocks and resets defined for the device should + * be ignored by the DWC3 core, as they are managed by the glue + */ +struct dwc3_probe_data { + struct dwc3 *dwc; + struct resource *res; + bool ignore_clocks_and_resets; +}; + +int dwc3_core_probe(const struct dwc3_probe_data *data); +void dwc3_core_remove(struct dwc3 *dwc); + +int dwc3_runtime_suspend(struct dwc3 *dwc); +int dwc3_runtime_resume(struct dwc3 *dwc); +int dwc3_runtime_idle(struct dwc3 *dwc); +int dwc3_pm_suspend(struct dwc3 *dwc); +int dwc3_pm_resume(struct dwc3 *dwc); +void dwc3_pm_complete(struct dwc3 *dwc); +int dwc3_pm_prepare(struct dwc3 *dwc); + +#endif diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index b48e108fc8fe..1c513bf8002e 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -182,6 +182,9 @@ int dwc3_host_init(struct dwc3 *dwc) if (DWC3_VER_IS_WITHIN(DWC3, ANY, 300A)) props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped"); + props[prop_idx++] = PROPERTY_ENTRY_U16("num-hc-interrupters", + dwc->num_hc_interrupters); + if (prop_idx) { ret = device_create_managed_software_node(&xhci->dev, props, NULL); if (ret) { diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 869ad99afb48..8dbc132a505e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2011,15 +2011,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) if (f->get_status) { status = f->get_status(f); + if (status < 0) break; - } else { - /* Set D0 and D1 bits based on func wakeup capability */ - if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) { - status |= USB_INTRF_STAT_FUNC_RW_CAP; - if (f->func_wakeup_armed) - status |= USB_INTRF_STAT_FUNC_RW; - } + + /* if D5 is not set, then device is not wakeup capable */ + if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP)) + status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW); } put_unaligned_le16(status & 0x0000ffff, req->buf); diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index ed5a92c474e5..30016b805bfd 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -158,7 +158,7 @@ struct usb_ep *usb_ep_autoconfig( if (!ep) return NULL; - type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + type = usb_endpoint_type(desc); /* report (variable) full speed bulk maxpacket */ if (type == USB_ENDPOINT_XFER_BULK) { diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c index 80841de845b0..027226325039 100644 --- a/drivers/usb/gadget/function/f_ecm.c +++ b/drivers/usb/gadget/function/f_ecm.c @@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f) gether_resume(&ecm->port); } +static int ecm_get_status(struct usb_function *f) +{ + return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) | + USB_INTRF_STAT_FUNC_RW_CAP; +} + static void ecm_free(struct usb_function *f) { struct f_ecm *ecm; @@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi) ecm->port.func.disable = ecm_disable; ecm->port.func.free_func = ecm_free; ecm->port.func.suspend = ecm_suspend; + ecm->port.func.get_status = ecm_get_status; ecm->port.func.resume = ecm_resume; return &ecm->port.func; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 740311c4fa24..1b2a363f3177 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -62,6 +62,9 @@ struct f_hidg { unsigned short report_desc_length; char *report_desc; unsigned short report_length; + unsigned char interval; + bool interval_user_set; + /* * use_out_ep - if true, the OUT Endpoint (interrupt out method) * will be used to receive reports from the host @@ -75,6 +78,7 @@ struct f_hidg { /* recv report */ spinlock_t read_spinlock; wait_queue_head_t read_queue; + bool disabled; /* recv report - interrupt out only (use_out_ep == 1) */ struct list_head completed_out_req; unsigned int qlen; @@ -156,10 +160,7 @@ static struct usb_endpoint_descriptor hidg_ss_in_ep_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 4, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /*.bInterval = DYNAMIC */ }; static struct usb_ss_ep_comp_descriptor hidg_ss_in_comp_desc = { @@ -177,10 +178,7 @@ static struct usb_endpoint_descriptor hidg_ss_out_ep_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 4, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /*.bInterval = DYNAMIC */ }; static struct usb_ss_ep_comp_descriptor hidg_ss_out_comp_desc = { @@ -218,10 +216,7 @@ static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 4, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /* .bInterval = DYNAMIC */ }; static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = { @@ -230,10 +225,7 @@ static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 4, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /*.bInterval = DYNAMIC */ }; static struct usb_descriptor_header *hidg_hs_descriptors_intout[] = { @@ -259,10 +251,7 @@ static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = { .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 10, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /*.bInterval = DYNAMIC */ }; static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = { @@ -271,10 +260,7 @@ static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = { .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_INT, /*.wMaxPacketSize = DYNAMIC */ - .bInterval = 10, /* FIXME: Add this field in the - * HID gadget configuration? - * (struct hidg_func_descriptor) - */ + /*.bInterval = DYNAMIC */ }; static struct usb_descriptor_header *hidg_fs_descriptors_intout[] = { @@ -329,7 +315,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, spin_lock_irqsave(&hidg->read_spinlock, flags); -#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req)) +#define READ_COND_INTOUT (!list_empty(&hidg->completed_out_req) || hidg->disabled) /* wait for at least one buffer to complete */ while (!READ_COND_INTOUT) { @@ -343,6 +329,11 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, spin_lock_irqsave(&hidg->read_spinlock, flags); } + if (hidg->disabled) { + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + return -ESHUTDOWN; + } + /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); @@ -387,7 +378,7 @@ static ssize_t f_hidg_intout_read(struct file *file, char __user *buffer, return count; } -#define READ_COND_SSREPORT (hidg->set_report_buf != NULL) +#define READ_COND_SSREPORT (hidg->set_report_buf != NULL || hidg->disabled) static ssize_t f_hidg_ssreport_read(struct file *file, char __user *buffer, size_t count, loff_t *ptr) @@ -1012,6 +1003,11 @@ static void hidg_disable(struct usb_function *f) } spin_unlock_irqrestore(&hidg->get_report_spinlock, flags); + spin_lock_irqsave(&hidg->read_spinlock, flags); + hidg->disabled = true; + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + wake_up(&hidg->read_queue); + spin_lock_irqsave(&hidg->write_spinlock, flags); if (!hidg->write_pending) { free_ep_req(hidg->in_ep, hidg->req); @@ -1097,6 +1093,10 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) } } + spin_lock_irqsave(&hidg->read_spinlock, flags); + hidg->disabled = false; + spin_unlock_irqrestore(&hidg->read_spinlock, flags); + if (hidg->in_ep != NULL) { spin_lock_irqsave(&hidg->write_spinlock, flags); hidg->req = req_in; @@ -1202,6 +1202,16 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); hidg_ss_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + + /* IN endpoints: FS default=10ms, HS default=4µ-frame; user override if set */ + if (!hidg->interval_user_set) { + hidg_fs_in_ep_desc.bInterval = 10; + hidg_hs_in_ep_desc.bInterval = 4; + } else { + hidg_fs_in_ep_desc.bInterval = hidg->interval; + hidg_hs_in_ep_desc.bInterval = hidg->interval; + } + hidg_ss_out_comp_desc.wBytesPerInterval = cpu_to_le16(hidg->report_length); hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); @@ -1224,19 +1234,27 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f) hidg_ss_out_ep_desc.bEndpointAddress = hidg_fs_out_ep_desc.bEndpointAddress; - if (hidg->use_out_ep) + if (hidg->use_out_ep) { + /* OUT endpoints: same defaults (FS=10, HS=4) unless user set */ + if (!hidg->interval_user_set) { + hidg_fs_out_ep_desc.bInterval = 10; + hidg_hs_out_ep_desc.bInterval = 4; + } else { + hidg_fs_out_ep_desc.bInterval = hidg->interval; + hidg_hs_out_ep_desc.bInterval = hidg->interval; + } status = usb_assign_descriptors(f, - hidg_fs_descriptors_intout, - hidg_hs_descriptors_intout, - hidg_ss_descriptors_intout, - hidg_ss_descriptors_intout); - else + hidg_fs_descriptors_intout, + hidg_hs_descriptors_intout, + hidg_ss_descriptors_intout, + hidg_ss_descriptors_intout); + } else { status = usb_assign_descriptors(f, hidg_fs_descriptors_ssreport, hidg_hs_descriptors_ssreport, hidg_ss_descriptors_ssreport, hidg_ss_descriptors_ssreport); - + } if (status) goto fail; @@ -1408,6 +1426,53 @@ end: CONFIGFS_ATTR(f_hid_opts_, report_desc); +static ssize_t f_hid_opts_interval_show(struct config_item *item, char *page) +{ + struct f_hid_opts *opts = to_f_hid_opts(item); + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d\n", opts->interval); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_hid_opts_interval_store(struct config_item *item, + const char *page, size_t len) +{ + struct f_hid_opts *opts = to_f_hid_opts(item); + int ret; + unsigned int tmp; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + /* parse into a wider type first */ + ret = kstrtouint(page, 0, &tmp); + if (ret) + goto end; + + /* range-check against unsigned char max */ + if (tmp > 255) { + ret = -EINVAL; + goto end; + } + + opts->interval = (unsigned char)tmp; + opts->interval_user_set = true; + ret = len; + +end: + mutex_unlock(&opts->lock); + return ret; +} + +CONFIGFS_ATTR(f_hid_opts_, interval); + static ssize_t f_hid_opts_dev_show(struct config_item *item, char *page) { struct f_hid_opts *opts = to_f_hid_opts(item); @@ -1422,6 +1487,7 @@ static struct configfs_attribute *hid_attrs[] = { &f_hid_opts_attr_protocol, &f_hid_opts_attr_no_out_endpoint, &f_hid_opts_attr_report_length, + &f_hid_opts_attr_interval, &f_hid_opts_attr_report_desc, &f_hid_opts_attr_dev, NULL, @@ -1468,6 +1534,10 @@ static struct usb_function_instance *hidg_alloc_inst(void) if (!opts) return ERR_PTR(-ENOMEM); mutex_init(&opts->lock); + + opts->interval = 4; + opts->interval_user_set = false; + opts->func_inst.free_func_inst = hidg_free_inst; ret = &opts->func_inst; @@ -1546,6 +1616,8 @@ static struct usb_function *hidg_alloc(struct usb_function_instance *fi) hidg->bInterfaceProtocol = opts->protocol; hidg->report_length = opts->report_length; hidg->report_desc_length = opts->report_desc_length; + hidg->interval = opts->interval; + hidg->interval_user_set = opts->interval_user_set; if (opts->report_desc) { hidg->report_desc = kmemdup(opts->report_desc, opts->report_desc_length, diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c index 8f7e7a2b2ff2..0f266bc067f5 100644 --- a/drivers/usb/gadget/function/f_serial.c +++ b/drivers/usb/gadget/function/f_serial.c @@ -364,6 +364,12 @@ static void gser_suspend(struct usb_function *f) gserial_suspend(&gser->port); } +static int gser_get_status(struct usb_function *f) +{ + return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) | + USB_INTRF_STAT_FUNC_RW_CAP; +} + static struct usb_function *gser_alloc(struct usb_function_instance *fi) { struct f_gser *gser; @@ -387,6 +393,7 @@ static struct usb_function *gser_alloc(struct usb_function_instance *fi) gser->port.func.free_func = gser_free; gser->port.func.resume = gser_resume; gser->port.func.suspend = gser_suspend; + gser->port.func.get_status = gser_get_status; return &gser->port.func; } diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 5a2e1237f85c..6e8804f04baa 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -1641,14 +1641,14 @@ static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn, struct usbg_tport *tport = container_of(wwn, struct usbg_tport, tport_wwn); struct usbg_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; struct f_tcm_opts *opts; unsigned i; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 0, &tpgt)) return ERR_PTR(-EINVAL); ret = -ENODEV; mutex_lock(&tpg_instances_lock); diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h index 84bb70292855..a9ed9720caee 100644 --- a/drivers/usb/gadget/function/u_hid.h +++ b/drivers/usb/gadget/function/u_hid.h @@ -25,6 +25,8 @@ struct f_hid_opts { unsigned short report_desc_length; unsigned char *report_desc; bool report_desc_alloc; + unsigned char interval; + bool interval_user_set; /* * Protect the data form concurrent access by read/write diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 36fff45e8c9b..ab544f6824be 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -592,6 +592,17 @@ static int gs_start_io(struct gs_port *port) return status; } +static int gserial_wakeup_host(struct gserial *gser) +{ + struct usb_function *func = &gser->func; + struct usb_gadget *gadget = func->config->cdev->gadget; + + if (func->func_suspended) + return usb_func_wakeup(func); + else + return usb_gadget_wakeup(gadget); +} + /*-------------------------------------------------------------------------*/ /* TTY Driver */ @@ -746,6 +757,8 @@ static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct gs_port *port = tty->driver_data; unsigned long flags; + int ret = 0; + struct gserial *gser = port->port_usb; pr_vdebug("gs_write: ttyGS%d (%p) writing %zu bytes\n", port->port_num, tty, count); @@ -753,6 +766,17 @@ static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count) spin_lock_irqsave(&port->port_lock, flags); if (count) count = kfifo_in(&port->port_write_buf, buf, count); + + if (port->suspended) { + spin_unlock_irqrestore(&port->port_lock, flags); + ret = gserial_wakeup_host(gser); + if (ret) { + pr_debug("ttyGS%d: Remote wakeup failed:%d\n", port->port_num, ret); + return count; + } + spin_lock_irqsave(&port->port_lock, flags); + } + /* treat count == 0 as flush_chars() */ if (port->port_usb) gs_start_tx(port); @@ -781,10 +805,22 @@ static void gs_flush_chars(struct tty_struct *tty) { struct gs_port *port = tty->driver_data; unsigned long flags; + int ret = 0; + struct gserial *gser = port->port_usb; pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty); spin_lock_irqsave(&port->port_lock, flags); + if (port->suspended) { + spin_unlock_irqrestore(&port->port_lock, flags); + ret = gserial_wakeup_host(gser); + if (ret) { + pr_debug("ttyGS%d: Remote wakeup failed:%d\n", port->port_num, ret); + return; + } + spin_lock_irqsave(&port->port_lock, flags); + } + if (port->port_usb) gs_start_tx(port); spin_unlock_irqrestore(&port->port_lock, flags); @@ -1464,6 +1500,20 @@ void gserial_suspend(struct gserial *gser) return; } + if (port->write_busy || port->write_started) { + /* Wakeup to host if there are ongoing transfers */ + spin_unlock_irqrestore(&serial_port_lock, flags); + if (!gserial_wakeup_host(gser)) + return; + + /* Check if port is valid after acquiring lock back */ + spin_lock_irqsave(&serial_port_lock, flags); + if (!port) { + spin_unlock_irqrestore(&serial_port_lock, flags); + return; + } + } + spin_lock(&port->port_lock); spin_unlock(&serial_port_lock); port->suspended = true; diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h index 2f78cd4f396f..9391614135e9 100644 --- a/drivers/usb/gadget/function/uvc_configfs.h +++ b/drivers/usb/gadget/function/uvc_configfs.h @@ -74,10 +74,12 @@ static inline struct uvcg_format *to_uvcg_format(struct config_item *item) struct uvcg_streaming_header { struct config_item item; - struct uvc_input_header_descriptor desc; unsigned linked; struct list_head formats; unsigned num_fmt; + + /* Must be last --ends in a flexible-array member. */ + struct uvc_input_header_descriptor desc; }; static inline struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item) diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c index a9544fea8723..578556422ea3 100644 --- a/drivers/usb/gadget/legacy/g_ffs.c +++ b/drivers/usb/gadget/legacy/g_ffs.c @@ -188,7 +188,7 @@ static int __init gfs_init(void) /* * Allocate in one chunk for easier maintenance */ - f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs), GFP_KERNEL); + f_ffs[0] = kcalloc(func_num * N_CONF, sizeof(*f_ffs[0]), GFP_KERNEL); if (!f_ffs[0]) { ret = -ENOMEM; goto no_func; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index b6a30d88a800..fcce84a726f2 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1615,7 +1615,7 @@ static int activate_ep_files (struct dev_data *dev) mutex_init(&data->lock); init_waitqueue_head (&data->wait); - strncpy (data->name, ep->name, sizeof (data->name) - 1); + strscpy(data->name, ep->name); refcount_set (&data->count, 1); data->dev = dev; get_dev (dev); diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index aae1787320d4..26460340fbc9 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -102,12 +102,6 @@ config USB_FSL_USB2 dynamically linked module called "fsl_usb2_udc" and force all gadget drivers to also be dynamically linked. -config USB_FUSB300 - tristate "Faraday FUSB300 USB Peripheral Controller" - depends on !PHYS_ADDR_T_64BIT && HAS_DMA - help - Faraday usb device controller FUSB300 driver - config USB_GR_UDC tristate "Aeroflex Gaisler GRUSBDC USB Peripheral Controller Driver" depends on HAS_DMA @@ -228,21 +222,6 @@ config USB_PXA27X dynamically linked module called "pxa27x_udc" and force all gadget drivers to also be dynamically linked. -config USB_MV_UDC - tristate "Marvell USB2.0 Device Controller" - depends on HAS_DMA - help - Marvell Socs (including PXA and MMP series) include a high speed - USB2.0 OTG controller, which can be configured as high speed or - full speed USB peripheral. - -config USB_MV_U3D - depends on HAS_DMA - tristate "MARVELL PXA2128 USB 3.0 controller" - help - MARVELL PXA2128 Processor series include a super speed USB3.0 device - controller, which support super speed USB peripheral. - config USB_SNP_CORE depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT) depends on HAS_DMA @@ -326,29 +305,6 @@ config USB_FSL_QE Set CONFIG_USB_GADGET to "m" to build this driver as a dynamically linked module called "fsl_qe_udc". -config USB_NET2272 - depends on HAS_IOMEM - tristate "PLX NET2272" - help - PLX NET2272 is a USB peripheral controller which supports - both full and high speed USB 2.0 data transfers. - - It has three configurable endpoints, as well as endpoint zero - (for control transfer). - Say "y" to link the driver statically, or "m" to build a - dynamically linked module called "net2272" and force all - gadget drivers to also be dynamically linked. - -config USB_NET2272_DMA - bool "Support external DMA controller" - depends on USB_NET2272 && HAS_DMA - help - The NET2272 part can optionally support an external DMA - controller, but your board has to have support in the - driver itself. - - If unsure, say "N" here. The driver works fine in PIO mode. - config USB_NET2280 tristate "NetChip NET228x / PLX USB3x8x" depends on USB_PCI diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile index b52f93e9c61d..1b9b1a4f9c57 100644 --- a/drivers/usb/gadget/udc/Makefile +++ b/drivers/usb/gadget/udc/Makefile @@ -9,7 +9,6 @@ udc-core-y := core.o trace.o # obj-$(CONFIG_USB_GADGET) += udc-core.o obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o -obj-$(CONFIG_USB_NET2272) += net2272.o obj-$(CONFIG_USB_NET2280) += net2280.o obj-$(CONFIG_USB_SNP_CORE) += snps_udc_core.o obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc_pci.o @@ -31,10 +30,6 @@ obj-$(CONFIG_USB_RENESAS_USBF) += renesas_usbf.o obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o obj-$(CONFIG_USB_EG20T) += pch_udc.o -obj-$(CONFIG_USB_MV_UDC) += mv_udc.o -mv_udc-y := mv_udc_core.o -obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o -obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o obj-$(CONFIG_USB_GR_UDC) += gr_udc.o obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o obj-$(CONFIG_USB_SNP_UDC_PLAT) += snps_udc_plat.o diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c deleted file mode 100644 index 5e94a99b3e53..000000000000 --- a/drivers/usb/gadget/udc/fusb300_udc.c +++ /dev/null @@ -1,1516 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Fusb300 UDC (USB gadget) - * - * Copyright (C) 2010 Faraday Technology Corp. - * - * Author : Yuan-hsin Chen <yhchen@faraday-tech.com> - */ -#include <linux/dma-mapping.h> -#include <linux/err.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> - -#include "fusb300_udc.h" - -MODULE_DESCRIPTION("FUSB300 USB gadget driver"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Yuan-Hsin Chen, Feng-Hsin Chiang <john453@faraday-tech.com>"); -MODULE_ALIAS("platform:fusb300_udc"); - -#define DRIVER_VERSION "20 October 2010" - -static const char udc_name[] = "fusb300_udc"; -static const char * const fusb300_ep_name[] = { - "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9", - "ep10", "ep11", "ep12", "ep13", "ep14", "ep15" -}; - -static void done(struct fusb300_ep *ep, struct fusb300_request *req, - int status); - -static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset, - u32 value) -{ - u32 reg = ioread32(fusb300->reg + offset); - - reg |= value; - iowrite32(reg, fusb300->reg + offset); -} - -static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset, - u32 value) -{ - u32 reg = ioread32(fusb300->reg + offset); - - reg &= ~value; - iowrite32(reg, fusb300->reg + offset); -} - - -static void fusb300_ep_setting(struct fusb300_ep *ep, - struct fusb300_ep_info info) -{ - ep->epnum = info.epnum; - ep->type = info.type; -} - -static int fusb300_ep_release(struct fusb300_ep *ep) -{ - if (!ep->epnum) - return 0; - ep->epnum = 0; - ep->stall = 0; - ep->wedged = 0; - return 0; -} - -static void fusb300_set_fifo_entry(struct fusb300 *fusb300, - u32 ep) -{ - u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); - - val &= ~FUSB300_EPSET1_FIFOENTRY_MSK; - val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM); - iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); -} - -static void fusb300_set_start_entry(struct fusb300 *fusb300, - u8 ep) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); - u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM; - - reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ; - reg |= FUSB300_EPSET1_START_ENTRY(start_entry); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); - if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) { - fusb300->fifo_entry_num = 0; - fusb300->addrofs = 0; - pr_err("fifo entry is over the maximum number!\n"); - } else - fusb300->fifo_entry_num++; -} - -/* set fusb300_set_start_entry first before fusb300_set_epaddrofs */ -static void fusb300_set_epaddrofs(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); - - reg &= ~FUSB300_EPSET2_ADDROFS_MSK; - reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); - fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM; -} - -static void ep_fifo_setting(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - fusb300_set_fifo_entry(fusb300, info.epnum); - fusb300_set_start_entry(fusb300, info.epnum); - fusb300_set_epaddrofs(fusb300, info); -} - -static void fusb300_set_eptype(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); - - reg &= ~FUSB300_EPSET1_TYPE_MSK; - reg |= FUSB300_EPSET1_TYPE(info.type); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); -} - -static void fusb300_set_epdir(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg; - - if (!info.dir_in) - return; - reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); - reg &= ~FUSB300_EPSET1_DIR_MSK; - reg |= FUSB300_EPSET1_DIRIN; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); -} - -static void fusb300_set_ep_active(struct fusb300 *fusb300, - u8 ep) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); - - reg |= FUSB300_EPSET1_ACTEN; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); -} - -static void fusb300_set_epmps(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); - - reg &= ~FUSB300_EPSET2_MPS_MSK; - reg |= FUSB300_EPSET2_MPS(info.maxpacket); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); -} - -static void fusb300_set_interval(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); - - reg &= ~FUSB300_EPSET1_INTERVAL(0x7); - reg |= FUSB300_EPSET1_INTERVAL(info.interval); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); -} - -static void fusb300_set_bwnum(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); - - reg &= ~FUSB300_EPSET1_BWNUM(0x3); - reg |= FUSB300_EPSET1_BWNUM(info.bw_num); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); -} - -static void set_ep_reg(struct fusb300 *fusb300, - struct fusb300_ep_info info) -{ - fusb300_set_eptype(fusb300, info); - fusb300_set_epdir(fusb300, info); - fusb300_set_epmps(fusb300, info); - - if (info.interval) - fusb300_set_interval(fusb300, info); - - if (info.bw_num) - fusb300_set_bwnum(fusb300, info); - - fusb300_set_ep_active(fusb300, info.epnum); -} - -static int config_ep(struct fusb300_ep *ep, - const struct usb_endpoint_descriptor *desc) -{ - struct fusb300 *fusb300 = ep->fusb300; - struct fusb300_ep_info info; - - ep->ep.desc = desc; - - info.interval = 0; - info.addrofs = 0; - info.bw_num = 0; - - info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; - info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; - info.maxpacket = usb_endpoint_maxp(desc); - info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; - - if ((info.type == USB_ENDPOINT_XFER_INT) || - (info.type == USB_ENDPOINT_XFER_ISOC)) { - info.interval = desc->bInterval; - if (info.type == USB_ENDPOINT_XFER_ISOC) - info.bw_num = usb_endpoint_maxp_mult(desc); - } - - ep_fifo_setting(fusb300, info); - - set_ep_reg(fusb300, info); - - fusb300_ep_setting(ep, info); - - fusb300->ep[info.epnum] = ep; - - return 0; -} - -static int fusb300_enable(struct usb_ep *_ep, - const struct usb_endpoint_descriptor *desc) -{ - struct fusb300_ep *ep; - - ep = container_of(_ep, struct fusb300_ep, ep); - - if (ep->fusb300->reenum) { - ep->fusb300->fifo_entry_num = 0; - ep->fusb300->addrofs = 0; - ep->fusb300->reenum = 0; - } - - return config_ep(ep, desc); -} - -static int fusb300_disable(struct usb_ep *_ep) -{ - struct fusb300_ep *ep; - struct fusb300_request *req; - unsigned long flags; - - ep = container_of(_ep, struct fusb300_ep, ep); - - BUG_ON(!ep); - - while (!list_empty(&ep->queue)) { - req = list_entry(ep->queue.next, struct fusb300_request, queue); - spin_lock_irqsave(&ep->fusb300->lock, flags); - done(ep, req, -ECONNRESET); - spin_unlock_irqrestore(&ep->fusb300->lock, flags); - } - - return fusb300_ep_release(ep); -} - -static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep, - gfp_t gfp_flags) -{ - struct fusb300_request *req; - - req = kzalloc(sizeof(struct fusb300_request), gfp_flags); - if (!req) - return NULL; - INIT_LIST_HEAD(&req->queue); - - return &req->req; -} - -static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req) -{ - struct fusb300_request *req; - - req = container_of(_req, struct fusb300_request, req); - kfree(req); -} - -static int enable_fifo_int(struct fusb300_ep *ep) -{ - struct fusb300 *fusb300 = ep->fusb300; - - if (ep->epnum) { - fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0, - FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); - } else { - pr_err("can't enable_fifo_int ep0\n"); - return -EINVAL; - } - - return 0; -} - -static int disable_fifo_int(struct fusb300_ep *ep) -{ - struct fusb300 *fusb300 = ep->fusb300; - - if (ep->epnum) { - fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0, - FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); - } else { - pr_err("can't disable_fifo_int ep0\n"); - return -EINVAL; - } - - return 0; -} - -static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length) -{ - u32 reg; - - reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); - reg &= ~FUSB300_CSR_LEN_MSK; - reg |= FUSB300_CSR_LEN(length); - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR); -} - -/* write data to cx fifo */ -static void fusb300_wrcxf(struct fusb300_ep *ep, - struct fusb300_request *req) -{ - int i = 0; - u8 *tmp; - u32 data; - struct fusb300 *fusb300 = ep->fusb300; - u32 length = req->req.length - req->req.actual; - - tmp = req->req.buf + req->req.actual; - - if (length > SS_CTL_MAX_PACKET_SIZE) { - fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE); - for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) { - data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | - *(tmp + 3) << 24; - iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); - tmp += 4; - } - req->req.actual += SS_CTL_MAX_PACKET_SIZE; - } else { /* length is less than max packet size */ - fusb300_set_cxlen(fusb300, length); - for (i = length >> 2; i > 0; i--) { - data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | - *(tmp + 3) << 24; - printk(KERN_DEBUG " 0x%x\n", data); - iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); - tmp = tmp + 4; - } - switch (length % 4) { - case 1: - data = *tmp; - printk(KERN_DEBUG " 0x%x\n", data); - iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); - break; - case 2: - data = *tmp | *(tmp + 1) << 8; - printk(KERN_DEBUG " 0x%x\n", data); - iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); - break; - case 3: - data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; - printk(KERN_DEBUG " 0x%x\n", data); - iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); - break; - default: - break; - } - req->req.actual += length; - } -} - -static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep) -{ - fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), - FUSB300_EPSET0_STL); -} - -static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); - - if (reg & FUSB300_EPSET0_STL) { - printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep); - reg |= FUSB300_EPSET0_STL_CLR; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); - } -} - -static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) -{ - if (ep->fusb300->ep0_dir) { /* if IN */ - if (req->req.length) { - fusb300_wrcxf(ep, req); - } else - printk(KERN_DEBUG "%s : req->req.length = 0x%x\n", - __func__, req->req.length); - if ((req->req.length == req->req.actual) || - (req->req.actual < ep->ep.maxpacket)) - done(ep, req, 0); - } else { /* OUT */ - if (!req->req.length) - done(ep, req, 0); - else - fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1, - FUSB300_IGER1_CX_OUT_INT); - } -} - -static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req, - gfp_t gfp_flags) -{ - struct fusb300_ep *ep; - struct fusb300_request *req; - unsigned long flags; - int request = 0; - - ep = container_of(_ep, struct fusb300_ep, ep); - req = container_of(_req, struct fusb300_request, req); - - if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - spin_lock_irqsave(&ep->fusb300->lock, flags); - - if (list_empty(&ep->queue)) - request = 1; - - list_add_tail(&req->queue, &ep->queue); - - req->req.actual = 0; - req->req.status = -EINPROGRESS; - - if (ep->ep.desc == NULL) /* ep0 */ - ep0_queue(ep, req); - else if (request && !ep->stall) - enable_fifo_int(ep); - - spin_unlock_irqrestore(&ep->fusb300->lock, flags); - - return 0; -} - -static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req) -{ - struct fusb300_ep *ep; - struct fusb300_request *req; - unsigned long flags; - - ep = container_of(_ep, struct fusb300_ep, ep); - req = container_of(_req, struct fusb300_request, req); - - spin_lock_irqsave(&ep->fusb300->lock, flags); - if (!list_empty(&ep->queue)) - done(ep, req, -ECONNRESET); - spin_unlock_irqrestore(&ep->fusb300->lock, flags); - - return 0; -} - -static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge) -{ - struct fusb300_ep *ep; - struct fusb300 *fusb300; - unsigned long flags; - int ret = 0; - - ep = container_of(_ep, struct fusb300_ep, ep); - - fusb300 = ep->fusb300; - - spin_lock_irqsave(&ep->fusb300->lock, flags); - - if (!list_empty(&ep->queue)) { - ret = -EAGAIN; - goto out; - } - - if (value) { - fusb300_set_epnstall(fusb300, ep->epnum); - ep->stall = 1; - if (wedge) - ep->wedged = 1; - } else { - fusb300_clear_epnstall(fusb300, ep->epnum); - ep->stall = 0; - ep->wedged = 0; - } - -out: - spin_unlock_irqrestore(&ep->fusb300->lock, flags); - return ret; -} - -static int fusb300_set_halt(struct usb_ep *_ep, int value) -{ - return fusb300_set_halt_and_wedge(_ep, value, 0); -} - -static int fusb300_set_wedge(struct usb_ep *_ep) -{ - return fusb300_set_halt_and_wedge(_ep, 1, 1); -} - -static void fusb300_fifo_flush(struct usb_ep *_ep) -{ -} - -static const struct usb_ep_ops fusb300_ep_ops = { - .enable = fusb300_enable, - .disable = fusb300_disable, - - .alloc_request = fusb300_alloc_request, - .free_request = fusb300_free_request, - - .queue = fusb300_queue, - .dequeue = fusb300_dequeue, - - .set_halt = fusb300_set_halt, - .fifo_flush = fusb300_fifo_flush, - .set_wedge = fusb300_set_wedge, -}; - -/*****************************************************************************/ -static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset, - u32 value) -{ - iowrite32(value, fusb300->reg + offset); -} - -static void fusb300_reset(void) -{ -} - -static void fusb300_set_cxstall(struct fusb300 *fusb300) -{ - fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, - FUSB300_CSR_STL); -} - -static void fusb300_set_cxdone(struct fusb300 *fusb300) -{ - fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, - FUSB300_CSR_DONE); -} - -/* read data from cx fifo */ -static void fusb300_rdcxf(struct fusb300 *fusb300, - u8 *buffer, u32 length) -{ - int i = 0; - u8 *tmp; - u32 data; - - tmp = buffer; - - for (i = (length >> 2); i > 0; i--) { - data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); - printk(KERN_DEBUG " 0x%x\n", data); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - *(tmp + 2) = (data >> 16) & 0xFF; - *(tmp + 3) = (data >> 24) & 0xFF; - tmp = tmp + 4; - } - - switch (length % 4) { - case 1: - data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); - printk(KERN_DEBUG " 0x%x\n", data); - *tmp = data & 0xFF; - break; - case 2: - data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); - printk(KERN_DEBUG " 0x%x\n", data); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - break; - case 3: - data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); - printk(KERN_DEBUG " 0x%x\n", data); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - *(tmp + 2) = (data >> 16) & 0xFF; - break; - default: - break; - } -} - -static void fusb300_rdfifo(struct fusb300_ep *ep, - struct fusb300_request *req, - u32 length) -{ - int i = 0; - u8 *tmp; - u32 data, reg; - struct fusb300 *fusb300 = ep->fusb300; - - tmp = req->req.buf + req->req.actual; - req->req.actual += length; - - if (req->req.actual > req->req.length) - printk(KERN_DEBUG "req->req.actual > req->req.length\n"); - - for (i = (length >> 2); i > 0; i--) { - data = ioread32(fusb300->reg + - FUSB300_OFFSET_EPPORT(ep->epnum)); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - *(tmp + 2) = (data >> 16) & 0xFF; - *(tmp + 3) = (data >> 24) & 0xFF; - tmp = tmp + 4; - } - - switch (length % 4) { - case 1: - data = ioread32(fusb300->reg + - FUSB300_OFFSET_EPPORT(ep->epnum)); - *tmp = data & 0xFF; - break; - case 2: - data = ioread32(fusb300->reg + - FUSB300_OFFSET_EPPORT(ep->epnum)); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - break; - case 3: - data = ioread32(fusb300->reg + - FUSB300_OFFSET_EPPORT(ep->epnum)); - *tmp = data & 0xFF; - *(tmp + 1) = (data >> 8) & 0xFF; - *(tmp + 2) = (data >> 16) & 0xFF; - break; - default: - break; - } - - do { - reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); - reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; - if (i) - printk(KERN_INFO "sync fifo is not empty!\n"); - i++; - } while (!reg); -} - -static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep) -{ - u8 value; - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); - - value = reg & FUSB300_EPSET0_STL; - - return value; -} - -static u8 fusb300_get_cxstall(struct fusb300 *fusb300) -{ - u8 value; - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); - - value = (reg & FUSB300_CSR_STL) >> 1; - - return value; -} - -static void request_error(struct fusb300 *fusb300) -{ - fusb300_set_cxstall(fusb300); - printk(KERN_DEBUG "request error!!\n"); -} - -static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) -__releases(fusb300->lock) -__acquires(fusb300->lock) -{ - u8 ep; - u16 status = 0; - u16 w_index = ctrl->wIndex; - - switch (ctrl->bRequestType & USB_RECIP_MASK) { - case USB_RECIP_DEVICE: - status = 1 << USB_DEVICE_SELF_POWERED; - break; - case USB_RECIP_INTERFACE: - status = 0; - break; - case USB_RECIP_ENDPOINT: - ep = w_index & USB_ENDPOINT_NUMBER_MASK; - if (ep) { - if (fusb300_get_epnstall(fusb300, ep)) - status = 1 << USB_ENDPOINT_HALT; - } else { - if (fusb300_get_cxstall(fusb300)) - status = 0; - } - break; - - default: - request_error(fusb300); - return; /* exit */ - } - - fusb300->ep0_data = cpu_to_le16(status); - fusb300->ep0_req->buf = &fusb300->ep0_data; - fusb300->ep0_req->length = 2; - - spin_unlock(&fusb300->lock); - fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL); - spin_lock(&fusb300->lock); -} - -static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) -{ - u8 ep; - - switch (ctrl->bRequestType & USB_RECIP_MASK) { - case USB_RECIP_DEVICE: - fusb300_set_cxdone(fusb300); - break; - case USB_RECIP_INTERFACE: - fusb300_set_cxdone(fusb300); - break; - case USB_RECIP_ENDPOINT: { - u16 w_index = le16_to_cpu(ctrl->wIndex); - - ep = w_index & USB_ENDPOINT_NUMBER_MASK; - if (ep) - fusb300_set_epnstall(fusb300, ep); - else - fusb300_set_cxstall(fusb300); - fusb300_set_cxdone(fusb300); - } - break; - default: - request_error(fusb300); - break; - } -} - -static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep) -{ - fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), - FUSB300_EPSET0_CLRSEQNUM); -} - -static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) -{ - struct fusb300_ep *ep = - fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; - - switch (ctrl->bRequestType & USB_RECIP_MASK) { - case USB_RECIP_DEVICE: - fusb300_set_cxdone(fusb300); - break; - case USB_RECIP_INTERFACE: - fusb300_set_cxdone(fusb300); - break; - case USB_RECIP_ENDPOINT: - if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) { - if (ep->wedged) { - fusb300_set_cxdone(fusb300); - break; - } - if (ep->stall) { - ep->stall = 0; - fusb300_clear_seqnum(fusb300, ep->epnum); - fusb300_clear_epnstall(fusb300, ep->epnum); - if (!list_empty(&ep->queue)) - enable_fifo_int(ep); - } - } - fusb300_set_cxdone(fusb300); - break; - default: - request_error(fusb300); - break; - } -} - -static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR); - - reg &= ~FUSB300_DAR_DRVADDR_MSK; - reg |= FUSB300_DAR_DRVADDR(addr); - - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR); -} - -static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) -{ - if (ctrl->wValue >= 0x0100) - request_error(fusb300); - else { - fusb300_set_dev_addr(fusb300, ctrl->wValue); - fusb300_set_cxdone(fusb300); - } -} - -#define UVC_COPY_DESCRIPTORS(mem, src) \ - do { \ - const struct usb_descriptor_header * const *__src; \ - for (__src = src; *__src; ++__src) { \ - memcpy(mem, *__src, (*__src)->bLength); \ - mem += (*__src)->bLength; \ - } \ - } while (0) - -static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) -{ - u8 *p = (u8 *)ctrl; - u8 ret = 0; - u8 i = 0; - - fusb300_rdcxf(fusb300, p, 8); - fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN; - fusb300->ep0_length = ctrl->wLength; - - /* check request */ - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { - switch (ctrl->bRequest) { - case USB_REQ_GET_STATUS: - get_status(fusb300, ctrl); - break; - case USB_REQ_CLEAR_FEATURE: - clear_feature(fusb300, ctrl); - break; - case USB_REQ_SET_FEATURE: - set_feature(fusb300, ctrl); - break; - case USB_REQ_SET_ADDRESS: - set_address(fusb300, ctrl); - break; - case USB_REQ_SET_CONFIGURATION: - fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR, - FUSB300_DAR_SETCONFG); - /* clear sequence number */ - for (i = 1; i <= FUSB300_MAX_NUM_EP; i++) - fusb300_clear_seqnum(fusb300, i); - fusb300->reenum = 1; - ret = 1; - break; - default: - ret = 1; - break; - } - } else - ret = 1; - - return ret; -} - -static void done(struct fusb300_ep *ep, struct fusb300_request *req, - int status) -{ - list_del_init(&req->queue); - - /* don't modify queue heads during completion callback */ - if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) - req->req.status = -ESHUTDOWN; - else - req->req.status = status; - - spin_unlock(&ep->fusb300->lock); - usb_gadget_giveback_request(&ep->ep, &req->req); - spin_lock(&ep->fusb300->lock); - - if (ep->epnum) { - disable_fifo_int(ep); - if (!list_empty(&ep->queue)) - enable_fifo_int(ep); - } else - fusb300_set_cxdone(ep->fusb300); -} - -static void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, dma_addr_t d, - u32 len) -{ - u32 value; - u32 reg; - - /* wait SW owner */ - do { - reg = ioread32(ep->fusb300->reg + - FUSB300_OFFSET_EPPRD_W0(ep->epnum)); - reg &= FUSB300_EPPRD0_H; - } while (reg); - - iowrite32(d, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum)); - - value = FUSB300_EPPRD0_BTC(len) | FUSB300_EPPRD0_H | - FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I; - iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); - - iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum)); - - fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY, - FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum)); -} - -static void fusb300_wait_idma_finished(struct fusb300_ep *ep) -{ - u32 reg; - - do { - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1); - if ((reg & FUSB300_IGR1_VBUS_CHG_INT) || - (reg & FUSB300_IGR1_WARM_RST_INT) || - (reg & FUSB300_IGR1_HOT_RST_INT) || - (reg & FUSB300_IGR1_USBRST_INT) - ) - goto IDMA_RESET; - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0); - reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum); - } while (!reg); - - fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0, - FUSB300_IGR0_EPn_PRD_INT(ep->epnum)); - return; - -IDMA_RESET: - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGER0); - reg &= ~FUSB300_IGER0_EEPn_PRD_INT(ep->epnum); - iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_IGER0); -} - -static void fusb300_set_idma(struct fusb300_ep *ep, - struct fusb300_request *req) -{ - int ret; - - ret = usb_gadget_map_request(&ep->fusb300->gadget, - &req->req, DMA_TO_DEVICE); - if (ret) - return; - - fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0, - FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); - - fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length); - /* check idma is done */ - fusb300_wait_idma_finished(ep); - - usb_gadget_unmap_request(&ep->fusb300->gadget, - &req->req, DMA_TO_DEVICE); -} - -static void in_ep_fifo_handler(struct fusb300_ep *ep) -{ - struct fusb300_request *req = list_entry(ep->queue.next, - struct fusb300_request, queue); - - if (req->req.length) - fusb300_set_idma(ep, req); - done(ep, req, 0); -} - -static void out_ep_fifo_handler(struct fusb300_ep *ep) -{ - struct fusb300 *fusb300 = ep->fusb300; - struct fusb300_request *req = list_entry(ep->queue.next, - struct fusb300_request, queue); - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); - u32 length = reg & FUSB300_FFR_BYCNT; - - fusb300_rdfifo(ep, req, length); - - /* finish out transfer */ - if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) - done(ep, req, 0); -} - -static void check_device_mode(struct fusb300 *fusb300) -{ - u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR); - - switch (reg & FUSB300_GCR_DEVEN_MSK) { - case FUSB300_GCR_DEVEN_SS: - fusb300->gadget.speed = USB_SPEED_SUPER; - break; - case FUSB300_GCR_DEVEN_HS: - fusb300->gadget.speed = USB_SPEED_HIGH; - break; - case FUSB300_GCR_DEVEN_FS: - fusb300->gadget.speed = USB_SPEED_FULL; - break; - default: - fusb300->gadget.speed = USB_SPEED_UNKNOWN; - break; - } - printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK)); -} - - -static void fusb300_ep0out(struct fusb300 *fusb300) -{ - struct fusb300_ep *ep = fusb300->ep[0]; - u32 reg; - - if (!list_empty(&ep->queue)) { - struct fusb300_request *req; - - req = list_first_entry(&ep->queue, - struct fusb300_request, queue); - if (req->req.length) - fusb300_rdcxf(ep->fusb300, req->req.buf, - req->req.length); - done(ep, req, 0); - reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); - reg &= ~FUSB300_IGER1_CX_OUT_INT; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1); - } else - pr_err("%s : empty queue\n", __func__); -} - -static void fusb300_ep0in(struct fusb300 *fusb300) -{ - struct fusb300_request *req; - struct fusb300_ep *ep = fusb300->ep[0]; - - if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) { - req = list_entry(ep->queue.next, - struct fusb300_request, queue); - if (req->req.length) - fusb300_wrcxf(ep, req); - if ((req->req.length - req->req.actual) < ep->ep.maxpacket) - done(ep, req, 0); - } else - fusb300_set_cxdone(fusb300); -} - -static void fusb300_grp2_handler(void) -{ -} - -static void fusb300_grp3_handler(void) -{ -} - -static void fusb300_grp4_handler(void) -{ -} - -static void fusb300_grp5_handler(void) -{ -} - -static irqreturn_t fusb300_irq(int irq, void *_fusb300) -{ - struct fusb300 *fusb300 = _fusb300; - u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); - u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); - u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0); - u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0); - struct usb_ctrlrequest ctrl; - u8 in; - u32 reg; - int i; - - spin_lock(&fusb300->lock); - - int_grp1 &= int_grp1_en; - int_grp0 &= int_grp0_en; - - if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_WARM_RST_INT); - printk(KERN_INFO"fusb300_warmreset\n"); - fusb300_reset(); - } - - if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_HOT_RST_INT); - printk(KERN_INFO"fusb300_hotreset\n"); - fusb300_reset(); - } - - if (int_grp1 & FUSB300_IGR1_USBRST_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_USBRST_INT); - fusb300_reset(); - } - /* COMABT_INT has a highest priority */ - - if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_CX_COMABT_INT); - printk(KERN_INFO"fusb300_ep0abt\n"); - } - - if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_VBUS_CHG_INT); - printk(KERN_INFO"fusb300_vbus_change\n"); - } - - if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U3_EXIT_FAIL_INT); - } - - if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U2_EXIT_FAIL_INT); - } - - if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U1_EXIT_FAIL_INT); - } - - if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U2_ENTRY_FAIL_INT); - } - - if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U1_ENTRY_FAIL_INT); - } - - if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U3_EXIT_INT); - printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U2_EXIT_INT); - printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U1_EXIT_INT); - printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U3_ENTRY_INT); - printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n"); - fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1, - FUSB300_SSCR1_GO_U3_DONE); - } - - if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U2_ENTRY_INT); - printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_U1_ENTRY_INT); - printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_RESM_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_RESM_INT); - printk(KERN_INFO "fusb300_resume\n"); - } - - if (int_grp1 & FUSB300_IGR1_SUSP_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_SUSP_INT); - printk(KERN_INFO "fusb300_suspend\n"); - } - - if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_HS_LPM_INT); - printk(KERN_INFO "fusb300_HS_LPM_INT\n"); - } - - if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) { - fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, - FUSB300_IGR1_DEV_MODE_CHG_INT); - check_device_mode(fusb300); - } - - if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) { - fusb300_set_cxstall(fusb300); - printk(KERN_INFO "fusb300_ep0fail\n"); - } - - if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) { - printk(KERN_INFO "fusb300_ep0setup\n"); - if (setup_packet(fusb300, &ctrl)) { - spin_unlock(&fusb300->lock); - if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0) - fusb300_set_cxstall(fusb300); - spin_lock(&fusb300->lock); - } - } - - if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT) - printk(KERN_INFO "fusb300_cmdend\n"); - - - if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) { - printk(KERN_INFO "fusb300_cxout\n"); - fusb300_ep0out(fusb300); - } - - if (int_grp1 & FUSB300_IGR1_CX_IN_INT) { - printk(KERN_INFO "fusb300_cxin\n"); - fusb300_ep0in(fusb300); - } - - if (int_grp1 & FUSB300_IGR1_INTGRP5) - fusb300_grp5_handler(); - - if (int_grp1 & FUSB300_IGR1_INTGRP4) - fusb300_grp4_handler(); - - if (int_grp1 & FUSB300_IGR1_INTGRP3) - fusb300_grp3_handler(); - - if (int_grp1 & FUSB300_IGR1_INTGRP2) - fusb300_grp2_handler(); - - if (int_grp0) { - for (i = 1; i < FUSB300_MAX_NUM_EP; i++) { - if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) { - reg = ioread32(fusb300->reg + - FUSB300_OFFSET_EPSET1(i)); - in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0; - if (in) - in_ep_fifo_handler(fusb300->ep[i]); - else - out_ep_fifo_handler(fusb300->ep[i]); - } - } - } - - spin_unlock(&fusb300->lock); - - return IRQ_HANDLED; -} - -static void fusb300_set_u2_timeout(struct fusb300 *fusb300, - u32 time) -{ - u32 reg; - - reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); - reg &= ~0xff; - reg |= FUSB300_SSCR2_U2TIMEOUT(time); - - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); -} - -static void fusb300_set_u1_timeout(struct fusb300 *fusb300, - u32 time) -{ - u32 reg; - - reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); - reg &= ~(0xff << 8); - reg |= FUSB300_SSCR2_U1TIMEOUT(time); - - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); -} - -static void init_controller(struct fusb300 *fusb300) -{ - u32 reg; - u32 mask = 0; - u32 val = 0; - - /* split on */ - mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON; - reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR); - reg &= ~mask; - reg |= val; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR); - - /* enable high-speed LPM */ - mask = val = FUSB300_HSCR_HS_LPM_PERMIT; - reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR); - reg &= ~mask; - reg |= val; - iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR); - - /*set u1 u2 timer*/ - fusb300_set_u2_timeout(fusb300, 0xff); - fusb300_set_u1_timeout(fusb300, 0xff); - - /* enable all grp1 interrupt */ - iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1); -} -/*------------------------------------------------------------------------*/ -static int fusb300_udc_start(struct usb_gadget *g, - struct usb_gadget_driver *driver) -{ - struct fusb300 *fusb300 = to_fusb300(g); - - /* hook up the driver */ - fusb300->driver = driver; - - return 0; -} - -static int fusb300_udc_stop(struct usb_gadget *g) -{ - struct fusb300 *fusb300 = to_fusb300(g); - - init_controller(fusb300); - fusb300->driver = NULL; - - return 0; -} -/*--------------------------------------------------------------------------*/ - -static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active) -{ - return 0; -} - -static const struct usb_gadget_ops fusb300_gadget_ops = { - .pullup = fusb300_udc_pullup, - .udc_start = fusb300_udc_start, - .udc_stop = fusb300_udc_stop, -}; - -static void fusb300_remove(struct platform_device *pdev) -{ - struct fusb300 *fusb300 = platform_get_drvdata(pdev); - int i; - - usb_del_gadget_udc(&fusb300->gadget); - iounmap(fusb300->reg); - free_irq(platform_get_irq(pdev, 0), fusb300); - free_irq(platform_get_irq(pdev, 1), fusb300); - - fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); - for (i = 0; i < FUSB300_MAX_NUM_EP; i++) - kfree(fusb300->ep[i]); - kfree(fusb300); -} - -static int fusb300_probe(struct platform_device *pdev) -{ - struct resource *res, *ires, *ires1; - void __iomem *reg = NULL; - struct fusb300 *fusb300 = NULL; - struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP]; - int ret = 0; - int i; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENODEV; - pr_err("platform_get_resource error.\n"); - goto clean_up; - } - - ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!ires) { - ret = -ENODEV; - dev_err(&pdev->dev, - "platform_get_resource IORESOURCE_IRQ error.\n"); - goto clean_up; - } - - ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1); - if (!ires1) { - ret = -ENODEV; - dev_err(&pdev->dev, - "platform_get_resource IORESOURCE_IRQ 1 error.\n"); - goto clean_up; - } - - reg = ioremap(res->start, resource_size(res)); - if (reg == NULL) { - ret = -ENOMEM; - pr_err("ioremap error.\n"); - goto clean_up; - } - - /* initialize udc */ - fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); - if (fusb300 == NULL) { - ret = -ENOMEM; - goto clean_up; - } - - for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { - _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); - if (_ep[i] == NULL) { - ret = -ENOMEM; - goto clean_up; - } - fusb300->ep[i] = _ep[i]; - } - - spin_lock_init(&fusb300->lock); - - platform_set_drvdata(pdev, fusb300); - - fusb300->gadget.ops = &fusb300_gadget_ops; - - fusb300->gadget.max_speed = USB_SPEED_HIGH; - fusb300->gadget.name = udc_name; - fusb300->reg = reg; - - ret = request_irq(ires->start, fusb300_irq, IRQF_SHARED, - udc_name, fusb300); - if (ret < 0) { - pr_err("request_irq error (%d)\n", ret); - goto clean_up; - } - - ret = request_irq(ires1->start, fusb300_irq, - IRQF_SHARED, udc_name, fusb300); - if (ret < 0) { - pr_err("request_irq1 error (%d)\n", ret); - goto err_request_irq1; - } - - INIT_LIST_HEAD(&fusb300->gadget.ep_list); - - for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) { - struct fusb300_ep *ep = fusb300->ep[i]; - - if (i != 0) { - INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list); - list_add_tail(&fusb300->ep[i]->ep.ep_list, - &fusb300->gadget.ep_list); - } - ep->fusb300 = fusb300; - INIT_LIST_HEAD(&ep->queue); - ep->ep.name = fusb300_ep_name[i]; - ep->ep.ops = &fusb300_ep_ops; - usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE); - - if (i == 0) { - ep->ep.caps.type_control = true; - } else { - ep->ep.caps.type_iso = true; - ep->ep.caps.type_bulk = true; - ep->ep.caps.type_int = true; - } - - ep->ep.caps.dir_in = true; - ep->ep.caps.dir_out = true; - } - usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE); - fusb300->ep[0]->epnum = 0; - fusb300->gadget.ep0 = &fusb300->ep[0]->ep; - INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list); - - fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, - GFP_KERNEL); - if (fusb300->ep0_req == NULL) { - ret = -ENOMEM; - goto err_alloc_request; - } - - init_controller(fusb300); - ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); - if (ret) - goto err_add_udc; - - dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); - - return 0; - -err_add_udc: - fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); - -err_alloc_request: - free_irq(ires1->start, fusb300); - -err_request_irq1: - free_irq(ires->start, fusb300); - -clean_up: - if (fusb300) { - if (fusb300->ep0_req) - fusb300_free_request(&fusb300->ep[0]->ep, - fusb300->ep0_req); - for (i = 0; i < FUSB300_MAX_NUM_EP; i++) - kfree(fusb300->ep[i]); - kfree(fusb300); - } - if (reg) - iounmap(reg); - - return ret; -} - -static struct platform_driver fusb300_driver = { - .probe = fusb300_probe, - .remove = fusb300_remove, - .driver = { - .name = udc_name, - }, -}; - -module_platform_driver(fusb300_driver); diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h deleted file mode 100644 index eb3d6d379ba7..000000000000 --- a/drivers/usb/gadget/udc/fusb300_udc.h +++ /dev/null @@ -1,675 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Fusb300 UDC (USB gadget) - * - * Copyright (C) 2010 Faraday Technology Corp. - * - * Author : Yuan-hsin Chen <yhchen@faraday-tech.com> - */ - - -#ifndef __FUSB300_UDC_H__ -#define __FUSB300_UDC_H__ - -#include <linux/kernel.h> - -#define FUSB300_OFFSET_GCR 0x00 -#define FUSB300_OFFSET_GTM 0x04 -#define FUSB300_OFFSET_DAR 0x08 -#define FUSB300_OFFSET_CSR 0x0C -#define FUSB300_OFFSET_CXPORT 0x10 -#define FUSB300_OFFSET_EPSET0(n) (0x20 + (n - 1) * 0x30) -#define FUSB300_OFFSET_EPSET1(n) (0x24 + (n - 1) * 0x30) -#define FUSB300_OFFSET_EPSET2(n) (0x28 + (n - 1) * 0x30) -#define FUSB300_OFFSET_EPFFR(n) (0x2c + (n - 1) * 0x30) -#define FUSB300_OFFSET_EPSTRID(n) (0x40 + (n - 1) * 0x30) -#define FUSB300_OFFSET_HSPTM 0x300 -#define FUSB300_OFFSET_HSCR 0x304 -#define FUSB300_OFFSET_SSCR0 0x308 -#define FUSB300_OFFSET_SSCR1 0x30C -#define FUSB300_OFFSET_TT 0x310 -#define FUSB300_OFFSET_DEVNOTF 0x314 -#define FUSB300_OFFSET_DNC1 0x318 -#define FUSB300_OFFSET_CS 0x31C -#define FUSB300_OFFSET_SOF 0x324 -#define FUSB300_OFFSET_EFCS 0x328 -#define FUSB300_OFFSET_IGR0 0x400 -#define FUSB300_OFFSET_IGR1 0x404 -#define FUSB300_OFFSET_IGR2 0x408 -#define FUSB300_OFFSET_IGR3 0x40C -#define FUSB300_OFFSET_IGR4 0x410 -#define FUSB300_OFFSET_IGR5 0x414 -#define FUSB300_OFFSET_IGER0 0x420 -#define FUSB300_OFFSET_IGER1 0x424 -#define FUSB300_OFFSET_IGER2 0x428 -#define FUSB300_OFFSET_IGER3 0x42C -#define FUSB300_OFFSET_IGER4 0x430 -#define FUSB300_OFFSET_IGER5 0x434 -#define FUSB300_OFFSET_DMAHMER 0x500 -#define FUSB300_OFFSET_EPPRDRDY 0x504 -#define FUSB300_OFFSET_DMAEPMR 0x508 -#define FUSB300_OFFSET_DMAENR 0x50C -#define FUSB300_OFFSET_DMAAPR 0x510 -#define FUSB300_OFFSET_AHBCR 0x514 -#define FUSB300_OFFSET_EPPRD_W0(n) (0x520 + (n - 1) * 0x10) -#define FUSB300_OFFSET_EPPRD_W1(n) (0x524 + (n - 1) * 0x10) -#define FUSB300_OFFSET_EPPRD_W2(n) (0x528 + (n - 1) * 0x10) -#define FUSB300_OFFSET_EPRD_PTR(n) (0x52C + (n - 1) * 0x10) -#define FUSB300_OFFSET_BUFDBG_START 0x800 -#define FUSB300_OFFSET_BUFDBG_END 0xBFC -#define FUSB300_OFFSET_EPPORT(n) (0x1010 + (n - 1) * 0x10) - -/* - * * Global Control Register (offset = 000H) - * */ -#define FUSB300_GCR_SF_RST (1 << 8) -#define FUSB300_GCR_VBUS_STATUS (1 << 7) -#define FUSB300_GCR_FORCE_HS_SUSP (1 << 6) -#define FUSB300_GCR_SYNC_FIFO1_CLR (1 << 5) -#define FUSB300_GCR_SYNC_FIFO0_CLR (1 << 4) -#define FUSB300_GCR_FIFOCLR (1 << 3) -#define FUSB300_GCR_GLINTEN (1 << 2) -#define FUSB300_GCR_DEVEN_FS 0x3 -#define FUSB300_GCR_DEVEN_HS 0x2 -#define FUSB300_GCR_DEVEN_SS 0x1 -#define FUSB300_GCR_DEVDIS 0x0 -#define FUSB300_GCR_DEVEN_MSK 0x3 - - -/* - * *Global Test Mode (offset = 004H) - * */ -#define FUSB300_GTM_TST_DIS_SOFGEN (1 << 16) -#define FUSB300_GTM_TST_CUR_EP_ENTRY(n) ((n & 0xF) << 12) -#define FUSB300_GTM_TST_EP_ENTRY(n) ((n & 0xF) << 8) -#define FUSB300_GTM_TST_EP_NUM(n) ((n & 0xF) << 4) -#define FUSB300_GTM_TST_FIFO_DEG (1 << 1) -#define FUSB300_GTM_TSTMODE (1 << 0) - -/* - * * Device Address Register (offset = 008H) - * */ -#define FUSB300_DAR_SETCONFG (1 << 7) -#define FUSB300_DAR_DRVADDR(x) (x & 0x7F) -#define FUSB300_DAR_DRVADDR_MSK 0x7F - -/* - * *Control Transfer Configuration and Status Register - * (CX_Config_Status, offset = 00CH) - * */ -#define FUSB300_CSR_LEN(x) ((x & 0xFFFF) << 8) -#define FUSB300_CSR_LEN_MSK (0xFFFF << 8) -#define FUSB300_CSR_EMP (1 << 4) -#define FUSB300_CSR_FUL (1 << 3) -#define FUSB300_CSR_CLR (1 << 2) -#define FUSB300_CSR_STL (1 << 1) -#define FUSB300_CSR_DONE (1 << 0) - -/* - * * EPn Setting 0 (EPn_SET0, offset = 020H+(n-1)*30H, n=1~15 ) - * */ -#define FUSB300_EPSET0_STL_CLR (1 << 3) -#define FUSB300_EPSET0_CLRSEQNUM (1 << 2) -#define FUSB300_EPSET0_STL (1 << 0) - -/* - * * EPn Setting 1 (EPn_SET1, offset = 024H+(n-1)*30H, n=1~15) - * */ -#define FUSB300_EPSET1_START_ENTRY(x) ((x & 0xFF) << 24) -#define FUSB300_EPSET1_START_ENTRY_MSK (0xFF << 24) -#define FUSB300_EPSET1_FIFOENTRY(x) ((x & 0x1F) << 12) -#define FUSB300_EPSET1_FIFOENTRY_MSK (0x1f << 12) -#define FUSB300_EPSET1_INTERVAL(x) ((x & 0x7) << 6) -#define FUSB300_EPSET1_BWNUM(x) ((x & 0x3) << 4) -#define FUSB300_EPSET1_TYPEISO (1 << 2) -#define FUSB300_EPSET1_TYPEBLK (2 << 2) -#define FUSB300_EPSET1_TYPEINT (3 << 2) -#define FUSB300_EPSET1_TYPE(x) ((x & 0x3) << 2) -#define FUSB300_EPSET1_TYPE_MSK (0x3 << 2) -#define FUSB300_EPSET1_DIROUT (0 << 1) -#define FUSB300_EPSET1_DIRIN (1 << 1) -#define FUSB300_EPSET1_DIR(x) ((x & 0x1) << 1) -#define FUSB300_EPSET1_DIRIN (1 << 1) -#define FUSB300_EPSET1_DIR_MSK ((0x1) << 1) -#define FUSB300_EPSET1_ACTDIS 0 -#define FUSB300_EPSET1_ACTEN 1 - -/* - * *EPn Setting 2 (EPn_SET2, offset = 028H+(n-1)*30H, n=1~15) - * */ -#define FUSB300_EPSET2_ADDROFS(x) ((x & 0x7FFF) << 16) -#define FUSB300_EPSET2_ADDROFS_MSK (0x7fff << 16) -#define FUSB300_EPSET2_MPS(x) (x & 0x7FF) -#define FUSB300_EPSET2_MPS_MSK 0x7FF - -/* - * * EPn FIFO Register (offset = 2cH+(n-1)*30H) - * */ -#define FUSB300_FFR_RST (1 << 31) -#define FUSB300_FF_FUL (1 << 30) -#define FUSB300_FF_EMPTY (1 << 29) -#define FUSB300_FFR_BYCNT 0x1FFFF - -/* - * *EPn Stream ID (EPn_STR_ID, offset = 040H+(n-1)*30H, n=1~15) - * */ -#define FUSB300_STRID_STREN (1 << 16) -#define FUSB300_STRID_STRID(x) (x & 0xFFFF) - -/* - * *HS PHY Test Mode (offset = 300H) - * */ -#define FUSB300_HSPTM_TSTPKDONE (1 << 4) -#define FUSB300_HSPTM_TSTPKT (1 << 3) -#define FUSB300_HSPTM_TSTSET0NAK (1 << 2) -#define FUSB300_HSPTM_TSTKSTA (1 << 1) -#define FUSB300_HSPTM_TSTJSTA (1 << 0) - -/* - * *HS Control Register (offset = 304H) - * */ -#define FUSB300_HSCR_HS_LPM_PERMIT (1 << 8) -#define FUSB300_HSCR_HS_LPM_RMWKUP (1 << 7) -#define FUSB300_HSCR_CAP_LPM_RMWKUP (1 << 6) -#define FUSB300_HSCR_HS_GOSUSP (1 << 5) -#define FUSB300_HSCR_HS_GORMWKU (1 << 4) -#define FUSB300_HSCR_CAP_RMWKUP (1 << 3) -#define FUSB300_HSCR_IDLECNT_0MS 0 -#define FUSB300_HSCR_IDLECNT_1MS 1 -#define FUSB300_HSCR_IDLECNT_2MS 2 -#define FUSB300_HSCR_IDLECNT_3MS 3 -#define FUSB300_HSCR_IDLECNT_4MS 4 -#define FUSB300_HSCR_IDLECNT_5MS 5 -#define FUSB300_HSCR_IDLECNT_6MS 6 -#define FUSB300_HSCR_IDLECNT_7MS 7 - -/* - * * SS Controller Register 0 (offset = 308H) - * */ -#define FUSB300_SSCR0_MAX_INTERVAL(x) ((x & 0x7) << 4) -#define FUSB300_SSCR0_U2_FUN_EN (1 << 1) -#define FUSB300_SSCR0_U1_FUN_EN (1 << 0) - -/* - * * SS Controller Register 1 (offset = 30CH) - * */ -#define FUSB300_SSCR1_GO_U3_DONE (1 << 8) -#define FUSB300_SSCR1_TXDEEMPH_LEVEL (1 << 7) -#define FUSB300_SSCR1_DIS_SCRMB (1 << 6) -#define FUSB300_SSCR1_FORCE_RECOVERY (1 << 5) -#define FUSB300_SSCR1_U3_WAKEUP_EN (1 << 4) -#define FUSB300_SSCR1_U2_EXIT_EN (1 << 3) -#define FUSB300_SSCR1_U1_EXIT_EN (1 << 2) -#define FUSB300_SSCR1_U2_ENTRY_EN (1 << 1) -#define FUSB300_SSCR1_U1_ENTRY_EN (1 << 0) - -/* - * *SS Controller Register 2 (offset = 310H) - * */ -#define FUSB300_SSCR2_SS_TX_SWING (1 << 25) -#define FUSB300_SSCR2_FORCE_LINKPM_ACCEPT (1 << 24) -#define FUSB300_SSCR2_U2_INACT_TIMEOUT(x) ((x & 0xFF) << 16) -#define FUSB300_SSCR2_U1TIMEOUT(x) ((x & 0xFF) << 8) -#define FUSB300_SSCR2_U2TIMEOUT(x) (x & 0xFF) - -/* - * *SS Device Notification Control (DEV_NOTF, offset = 314H) - * */ -#define FUSB300_DEVNOTF_CONTEXT0(x) ((x & 0xFFFFFF) << 8) -#define FUSB300_DEVNOTF_TYPE_DIS 0 -#define FUSB300_DEVNOTF_TYPE_FUNCWAKE 1 -#define FUSB300_DEVNOTF_TYPE_LTM 2 -#define FUSB300_DEVNOTF_TYPE_BUSINT_ADJMSG 3 - -/* - * *BFM Arbiter Priority Register (BFM_ARB offset = 31CH) - * */ -#define FUSB300_BFMARB_ARB_M1 (1 << 3) -#define FUSB300_BFMARB_ARB_M0 (1 << 2) -#define FUSB300_BFMARB_ARB_S1 (1 << 1) -#define FUSB300_BFMARB_ARB_S0 1 - -/* - * *Vendor Specific IO Control Register (offset = 320H) - * */ -#define FUSB300_VSIC_VCTLOAD_N (1 << 8) -#define FUSB300_VSIC_VCTL(x) (x & 0x3F) - -/* - * *SOF Mask Timer (offset = 324H) - * */ -#define FUSB300_SOF_MASK_TIMER_HS 0x044c -#define FUSB300_SOF_MASK_TIMER_FS 0x2710 - -/* - * *Error Flag and Control Status (offset = 328H) - * */ -#define FUSB300_EFCS_PM_STATE_U3 3 -#define FUSB300_EFCS_PM_STATE_U2 2 -#define FUSB300_EFCS_PM_STATE_U1 1 -#define FUSB300_EFCS_PM_STATE_U0 0 - -/* - * *Interrupt Group 0 Register (offset = 400H) - * */ -#define FUSB300_IGR0_EP15_PRD_INT (1 << 31) -#define FUSB300_IGR0_EP14_PRD_INT (1 << 30) -#define FUSB300_IGR0_EP13_PRD_INT (1 << 29) -#define FUSB300_IGR0_EP12_PRD_INT (1 << 28) -#define FUSB300_IGR0_EP11_PRD_INT (1 << 27) -#define FUSB300_IGR0_EP10_PRD_INT (1 << 26) -#define FUSB300_IGR0_EP9_PRD_INT (1 << 25) -#define FUSB300_IGR0_EP8_PRD_INT (1 << 24) -#define FUSB300_IGR0_EP7_PRD_INT (1 << 23) -#define FUSB300_IGR0_EP6_PRD_INT (1 << 22) -#define FUSB300_IGR0_EP5_PRD_INT (1 << 21) -#define FUSB300_IGR0_EP4_PRD_INT (1 << 20) -#define FUSB300_IGR0_EP3_PRD_INT (1 << 19) -#define FUSB300_IGR0_EP2_PRD_INT (1 << 18) -#define FUSB300_IGR0_EP1_PRD_INT (1 << 17) -#define FUSB300_IGR0_EPn_PRD_INT(n) (1 << (n + 16)) - -#define FUSB300_IGR0_EP15_FIFO_INT (1 << 15) -#define FUSB300_IGR0_EP14_FIFO_INT (1 << 14) -#define FUSB300_IGR0_EP13_FIFO_INT (1 << 13) -#define FUSB300_IGR0_EP12_FIFO_INT (1 << 12) -#define FUSB300_IGR0_EP11_FIFO_INT (1 << 11) -#define FUSB300_IGR0_EP10_FIFO_INT (1 << 10) -#define FUSB300_IGR0_EP9_FIFO_INT (1 << 9) -#define FUSB300_IGR0_EP8_FIFO_INT (1 << 8) -#define FUSB300_IGR0_EP7_FIFO_INT (1 << 7) -#define FUSB300_IGR0_EP6_FIFO_INT (1 << 6) -#define FUSB300_IGR0_EP5_FIFO_INT (1 << 5) -#define FUSB300_IGR0_EP4_FIFO_INT (1 << 4) -#define FUSB300_IGR0_EP3_FIFO_INT (1 << 3) -#define FUSB300_IGR0_EP2_FIFO_INT (1 << 2) -#define FUSB300_IGR0_EP1_FIFO_INT (1 << 1) -#define FUSB300_IGR0_EPn_FIFO_INT(n) (1 << n) - -/* - * *Interrupt Group 1 Register (offset = 404H) - * */ -#define FUSB300_IGR1_INTGRP5 (1 << 31) -#define FUSB300_IGR1_VBUS_CHG_INT (1 << 30) -#define FUSB300_IGR1_SYNF1_EMPTY_INT (1 << 29) -#define FUSB300_IGR1_SYNF0_EMPTY_INT (1 << 28) -#define FUSB300_IGR1_U3_EXIT_FAIL_INT (1 << 27) -#define FUSB300_IGR1_U2_EXIT_FAIL_INT (1 << 26) -#define FUSB300_IGR1_U1_EXIT_FAIL_INT (1 << 25) -#define FUSB300_IGR1_U2_ENTRY_FAIL_INT (1 << 24) -#define FUSB300_IGR1_U1_ENTRY_FAIL_INT (1 << 23) -#define FUSB300_IGR1_U3_EXIT_INT (1 << 22) -#define FUSB300_IGR1_U2_EXIT_INT (1 << 21) -#define FUSB300_IGR1_U1_EXIT_INT (1 << 20) -#define FUSB300_IGR1_U3_ENTRY_INT (1 << 19) -#define FUSB300_IGR1_U2_ENTRY_INT (1 << 18) -#define FUSB300_IGR1_U1_ENTRY_INT (1 << 17) -#define FUSB300_IGR1_HOT_RST_INT (1 << 16) -#define FUSB300_IGR1_WARM_RST_INT (1 << 15) -#define FUSB300_IGR1_RESM_INT (1 << 14) -#define FUSB300_IGR1_SUSP_INT (1 << 13) -#define FUSB300_IGR1_HS_LPM_INT (1 << 12) -#define FUSB300_IGR1_USBRST_INT (1 << 11) -#define FUSB300_IGR1_DEV_MODE_CHG_INT (1 << 9) -#define FUSB300_IGR1_CX_COMABT_INT (1 << 8) -#define FUSB300_IGR1_CX_COMFAIL_INT (1 << 7) -#define FUSB300_IGR1_CX_CMDEND_INT (1 << 6) -#define FUSB300_IGR1_CX_OUT_INT (1 << 5) -#define FUSB300_IGR1_CX_IN_INT (1 << 4) -#define FUSB300_IGR1_CX_SETUP_INT (1 << 3) -#define FUSB300_IGR1_INTGRP4 (1 << 2) -#define FUSB300_IGR1_INTGRP3 (1 << 1) -#define FUSB300_IGR1_INTGRP2 (1 << 0) - -/* - * *Interrupt Group 2 Register (offset = 408H) - * */ -#define FUSB300_IGR2_EP6_STR_ACCEPT_INT (1 << 29) -#define FUSB300_IGR2_EP6_STR_RESUME_INT (1 << 28) -#define FUSB300_IGR2_EP6_STR_REQ_INT (1 << 27) -#define FUSB300_IGR2_EP6_STR_NOTRDY_INT (1 << 26) -#define FUSB300_IGR2_EP6_STR_PRIME_INT (1 << 25) -#define FUSB300_IGR2_EP5_STR_ACCEPT_INT (1 << 24) -#define FUSB300_IGR2_EP5_STR_RESUME_INT (1 << 23) -#define FUSB300_IGR2_EP5_STR_REQ_INT (1 << 22) -#define FUSB300_IGR2_EP5_STR_NOTRDY_INT (1 << 21) -#define FUSB300_IGR2_EP5_STR_PRIME_INT (1 << 20) -#define FUSB300_IGR2_EP4_STR_ACCEPT_INT (1 << 19) -#define FUSB300_IGR2_EP4_STR_RESUME_INT (1 << 18) -#define FUSB300_IGR2_EP4_STR_REQ_INT (1 << 17) -#define FUSB300_IGR2_EP4_STR_NOTRDY_INT (1 << 16) -#define FUSB300_IGR2_EP4_STR_PRIME_INT (1 << 15) -#define FUSB300_IGR2_EP3_STR_ACCEPT_INT (1 << 14) -#define FUSB300_IGR2_EP3_STR_RESUME_INT (1 << 13) -#define FUSB300_IGR2_EP3_STR_REQ_INT (1 << 12) -#define FUSB300_IGR2_EP3_STR_NOTRDY_INT (1 << 11) -#define FUSB300_IGR2_EP3_STR_PRIME_INT (1 << 10) -#define FUSB300_IGR2_EP2_STR_ACCEPT_INT (1 << 9) -#define FUSB300_IGR2_EP2_STR_RESUME_INT (1 << 8) -#define FUSB300_IGR2_EP2_STR_REQ_INT (1 << 7) -#define FUSB300_IGR2_EP2_STR_NOTRDY_INT (1 << 6) -#define FUSB300_IGR2_EP2_STR_PRIME_INT (1 << 5) -#define FUSB300_IGR2_EP1_STR_ACCEPT_INT (1 << 4) -#define FUSB300_IGR2_EP1_STR_RESUME_INT (1 << 3) -#define FUSB300_IGR2_EP1_STR_REQ_INT (1 << 2) -#define FUSB300_IGR2_EP1_STR_NOTRDY_INT (1 << 1) -#define FUSB300_IGR2_EP1_STR_PRIME_INT (1 << 0) - -#define FUSB300_IGR2_EP_STR_ACCEPT_INT(n) (1 << (5 * n - 1)) -#define FUSB300_IGR2_EP_STR_RESUME_INT(n) (1 << (5 * n - 2)) -#define FUSB300_IGR2_EP_STR_REQ_INT(n) (1 << (5 * n - 3)) -#define FUSB300_IGR2_EP_STR_NOTRDY_INT(n) (1 << (5 * n - 4)) -#define FUSB300_IGR2_EP_STR_PRIME_INT(n) (1 << (5 * n - 5)) - -/* - * *Interrupt Group 3 Register (offset = 40CH) - * */ -#define FUSB300_IGR3_EP12_STR_ACCEPT_INT (1 << 29) -#define FUSB300_IGR3_EP12_STR_RESUME_INT (1 << 28) -#define FUSB300_IGR3_EP12_STR_REQ_INT (1 << 27) -#define FUSB300_IGR3_EP12_STR_NOTRDY_INT (1 << 26) -#define FUSB300_IGR3_EP12_STR_PRIME_INT (1 << 25) -#define FUSB300_IGR3_EP11_STR_ACCEPT_INT (1 << 24) -#define FUSB300_IGR3_EP11_STR_RESUME_INT (1 << 23) -#define FUSB300_IGR3_EP11_STR_REQ_INT (1 << 22) -#define FUSB300_IGR3_EP11_STR_NOTRDY_INT (1 << 21) -#define FUSB300_IGR3_EP11_STR_PRIME_INT (1 << 20) -#define FUSB300_IGR3_EP10_STR_ACCEPT_INT (1 << 19) -#define FUSB300_IGR3_EP10_STR_RESUME_INT (1 << 18) -#define FUSB300_IGR3_EP10_STR_REQ_INT (1 << 17) -#define FUSB300_IGR3_EP10_STR_NOTRDY_INT (1 << 16) -#define FUSB300_IGR3_EP10_STR_PRIME_INT (1 << 15) -#define FUSB300_IGR3_EP9_STR_ACCEPT_INT (1 << 14) -#define FUSB300_IGR3_EP9_STR_RESUME_INT (1 << 13) -#define FUSB300_IGR3_EP9_STR_REQ_INT (1 << 12) -#define FUSB300_IGR3_EP9_STR_NOTRDY_INT (1 << 11) -#define FUSB300_IGR3_EP9_STR_PRIME_INT (1 << 10) -#define FUSB300_IGR3_EP8_STR_ACCEPT_INT (1 << 9) -#define FUSB300_IGR3_EP8_STR_RESUME_INT (1 << 8) -#define FUSB300_IGR3_EP8_STR_REQ_INT (1 << 7) -#define FUSB300_IGR3_EP8_STR_NOTRDY_INT (1 << 6) -#define FUSB300_IGR3_EP8_STR_PRIME_INT (1 << 5) -#define FUSB300_IGR3_EP7_STR_ACCEPT_INT (1 << 4) -#define FUSB300_IGR3_EP7_STR_RESUME_INT (1 << 3) -#define FUSB300_IGR3_EP7_STR_REQ_INT (1 << 2) -#define FUSB300_IGR3_EP7_STR_NOTRDY_INT (1 << 1) -#define FUSB300_IGR3_EP7_STR_PRIME_INT (1 << 0) - -#define FUSB300_IGR3_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) -#define FUSB300_IGR3_EP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) -#define FUSB300_IGR3_EP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) -#define FUSB300_IGR3_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) -#define FUSB300_IGR3_EP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) - -/* - * *Interrupt Group 4 Register (offset = 410H) - * */ -#define FUSB300_IGR4_EP15_RX0_INT (1 << 31) -#define FUSB300_IGR4_EP14_RX0_INT (1 << 30) -#define FUSB300_IGR4_EP13_RX0_INT (1 << 29) -#define FUSB300_IGR4_EP12_RX0_INT (1 << 28) -#define FUSB300_IGR4_EP11_RX0_INT (1 << 27) -#define FUSB300_IGR4_EP10_RX0_INT (1 << 26) -#define FUSB300_IGR4_EP9_RX0_INT (1 << 25) -#define FUSB300_IGR4_EP8_RX0_INT (1 << 24) -#define FUSB300_IGR4_EP7_RX0_INT (1 << 23) -#define FUSB300_IGR4_EP6_RX0_INT (1 << 22) -#define FUSB300_IGR4_EP5_RX0_INT (1 << 21) -#define FUSB300_IGR4_EP4_RX0_INT (1 << 20) -#define FUSB300_IGR4_EP3_RX0_INT (1 << 19) -#define FUSB300_IGR4_EP2_RX0_INT (1 << 18) -#define FUSB300_IGR4_EP1_RX0_INT (1 << 17) -#define FUSB300_IGR4_EP_RX0_INT(x) (1 << (x + 16)) -#define FUSB300_IGR4_EP15_STR_ACCEPT_INT (1 << 14) -#define FUSB300_IGR4_EP15_STR_RESUME_INT (1 << 13) -#define FUSB300_IGR4_EP15_STR_REQ_INT (1 << 12) -#define FUSB300_IGR4_EP15_STR_NOTRDY_INT (1 << 11) -#define FUSB300_IGR4_EP15_STR_PRIME_INT (1 << 10) -#define FUSB300_IGR4_EP14_STR_ACCEPT_INT (1 << 9) -#define FUSB300_IGR4_EP14_STR_RESUME_INT (1 << 8) -#define FUSB300_IGR4_EP14_STR_REQ_INT (1 << 7) -#define FUSB300_IGR4_EP14_STR_NOTRDY_INT (1 << 6) -#define FUSB300_IGR4_EP14_STR_PRIME_INT (1 << 5) -#define FUSB300_IGR4_EP13_STR_ACCEPT_INT (1 << 4) -#define FUSB300_IGR4_EP13_STR_RESUME_INT (1 << 3) -#define FUSB300_IGR4_EP13_STR_REQ_INT (1 << 2) -#define FUSB300_IGR4_EP13_STR_NOTRDY_INT (1 << 1) -#define FUSB300_IGR4_EP13_STR_PRIME_INT (1 << 0) - -#define FUSB300_IGR4_EP_STR_ACCEPT_INT(n) (1 << (5 * (n - 12) - 1)) -#define FUSB300_IGR4_EP_STR_RESUME_INT(n) (1 << (5 * (n - 12) - 2)) -#define FUSB300_IGR4_EP_STR_REQ_INT(n) (1 << (5 * (n - 12) - 3)) -#define FUSB300_IGR4_EP_STR_NOTRDY_INT(n) (1 << (5 * (n - 12) - 4)) -#define FUSB300_IGR4_EP_STR_PRIME_INT(n) (1 << (5 * (n - 12) - 5)) - -/* - * *Interrupt Group 5 Register (offset = 414H) - * */ -#define FUSB300_IGR5_EP_STL_INT(n) (1 << n) - -/* - * *Interrupt Enable Group 0 Register (offset = 420H) - * */ -#define FUSB300_IGER0_EEP15_PRD_INT (1 << 31) -#define FUSB300_IGER0_EEP14_PRD_INT (1 << 30) -#define FUSB300_IGER0_EEP13_PRD_INT (1 << 29) -#define FUSB300_IGER0_EEP12_PRD_INT (1 << 28) -#define FUSB300_IGER0_EEP11_PRD_INT (1 << 27) -#define FUSB300_IGER0_EEP10_PRD_INT (1 << 26) -#define FUSB300_IGER0_EEP9_PRD_INT (1 << 25) -#define FUSB300_IGER0_EP8_PRD_INT (1 << 24) -#define FUSB300_IGER0_EEP7_PRD_INT (1 << 23) -#define FUSB300_IGER0_EEP6_PRD_INT (1 << 22) -#define FUSB300_IGER0_EEP5_PRD_INT (1 << 21) -#define FUSB300_IGER0_EEP4_PRD_INT (1 << 20) -#define FUSB300_IGER0_EEP3_PRD_INT (1 << 19) -#define FUSB300_IGER0_EEP2_PRD_INT (1 << 18) -#define FUSB300_IGER0_EEP1_PRD_INT (1 << 17) -#define FUSB300_IGER0_EEPn_PRD_INT(n) (1 << (n + 16)) - -#define FUSB300_IGER0_EEP15_FIFO_INT (1 << 15) -#define FUSB300_IGER0_EEP14_FIFO_INT (1 << 14) -#define FUSB300_IGER0_EEP13_FIFO_INT (1 << 13) -#define FUSB300_IGER0_EEP12_FIFO_INT (1 << 12) -#define FUSB300_IGER0_EEP11_FIFO_INT (1 << 11) -#define FUSB300_IGER0_EEP10_FIFO_INT (1 << 10) -#define FUSB300_IGER0_EEP9_FIFO_INT (1 << 9) -#define FUSB300_IGER0_EEP8_FIFO_INT (1 << 8) -#define FUSB300_IGER0_EEP7_FIFO_INT (1 << 7) -#define FUSB300_IGER0_EEP6_FIFO_INT (1 << 6) -#define FUSB300_IGER0_EEP5_FIFO_INT (1 << 5) -#define FUSB300_IGER0_EEP4_FIFO_INT (1 << 4) -#define FUSB300_IGER0_EEP3_FIFO_INT (1 << 3) -#define FUSB300_IGER0_EEP2_FIFO_INT (1 << 2) -#define FUSB300_IGER0_EEP1_FIFO_INT (1 << 1) -#define FUSB300_IGER0_EEPn_FIFO_INT(n) (1 << n) - -/* - * *Interrupt Enable Group 1 Register (offset = 424H) - * */ -#define FUSB300_IGER1_EINT_GRP5 (1 << 31) -#define FUSB300_IGER1_VBUS_CHG_INT (1 << 30) -#define FUSB300_IGER1_SYNF1_EMPTY_INT (1 << 29) -#define FUSB300_IGER1_SYNF0_EMPTY_INT (1 << 28) -#define FUSB300_IGER1_U3_EXIT_FAIL_INT (1 << 27) -#define FUSB300_IGER1_U2_EXIT_FAIL_INT (1 << 26) -#define FUSB300_IGER1_U1_EXIT_FAIL_INT (1 << 25) -#define FUSB300_IGER1_U2_ENTRY_FAIL_INT (1 << 24) -#define FUSB300_IGER1_U1_ENTRY_FAIL_INT (1 << 23) -#define FUSB300_IGER1_U3_EXIT_INT (1 << 22) -#define FUSB300_IGER1_U2_EXIT_INT (1 << 21) -#define FUSB300_IGER1_U1_EXIT_INT (1 << 20) -#define FUSB300_IGER1_U3_ENTRY_INT (1 << 19) -#define FUSB300_IGER1_U2_ENTRY_INT (1 << 18) -#define FUSB300_IGER1_U1_ENTRY_INT (1 << 17) -#define FUSB300_IGER1_HOT_RST_INT (1 << 16) -#define FUSB300_IGER1_WARM_RST_INT (1 << 15) -#define FUSB300_IGER1_RESM_INT (1 << 14) -#define FUSB300_IGER1_SUSP_INT (1 << 13) -#define FUSB300_IGER1_LPM_INT (1 << 12) -#define FUSB300_IGER1_HS_RST_INT (1 << 11) -#define FUSB300_IGER1_EDEV_MODE_CHG_INT (1 << 9) -#define FUSB300_IGER1_CX_COMABT_INT (1 << 8) -#define FUSB300_IGER1_CX_COMFAIL_INT (1 << 7) -#define FUSB300_IGER1_CX_CMDEND_INT (1 << 6) -#define FUSB300_IGER1_CX_OUT_INT (1 << 5) -#define FUSB300_IGER1_CX_IN_INT (1 << 4) -#define FUSB300_IGER1_CX_SETUP_INT (1 << 3) -#define FUSB300_IGER1_INTGRP4 (1 << 2) -#define FUSB300_IGER1_INTGRP3 (1 << 1) -#define FUSB300_IGER1_INTGRP2 (1 << 0) - -/* - * *Interrupt Enable Group 2 Register (offset = 428H) - * */ -#define FUSB300_IGER2_EEP_STR_ACCEPT_INT(n) (1 << (5 * n - 1)) -#define FUSB300_IGER2_EEP_STR_RESUME_INT(n) (1 << (5 * n - 2)) -#define FUSB300_IGER2_EEP_STR_REQ_INT(n) (1 << (5 * n - 3)) -#define FUSB300_IGER2_EEP_STR_NOTRDY_INT(n) (1 << (5 * n - 4)) -#define FUSB300_IGER2_EEP_STR_PRIME_INT(n) (1 << (5 * n - 5)) - -/* - * *Interrupt Enable Group 3 Register (offset = 42CH) - * */ - -#define FUSB300_IGER3_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) -#define FUSB300_IGER3_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) -#define FUSB300_IGER3_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) -#define FUSB300_IGER3_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) -#define FUSB300_IGER3_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) - -/* - * *Interrupt Enable Group 4 Register (offset = 430H) - * */ - -#define FUSB300_IGER4_EEP_RX0_INT(n) (1 << (n + 16)) -#define FUSB300_IGER4_EEP_STR_ACCEPT_INT(n) (1 << (5 * (n - 6) - 1)) -#define FUSB300_IGER4_EEP_STR_RESUME_INT(n) (1 << (5 * (n - 6) - 2)) -#define FUSB300_IGER4_EEP_STR_REQ_INT(n) (1 << (5 * (n - 6) - 3)) -#define FUSB300_IGER4_EEP_STR_NOTRDY_INT(n) (1 << (5 * (n - 6) - 4)) -#define FUSB300_IGER4_EEP_STR_PRIME_INT(n) (1 << (5 * (n - 6) - 5)) - -/* EP PRD Ready (EP_PRD_RDY, offset = 504H) */ - -#define FUSB300_EPPRDR_EP15_PRD_RDY (1 << 15) -#define FUSB300_EPPRDR_EP14_PRD_RDY (1 << 14) -#define FUSB300_EPPRDR_EP13_PRD_RDY (1 << 13) -#define FUSB300_EPPRDR_EP12_PRD_RDY (1 << 12) -#define FUSB300_EPPRDR_EP11_PRD_RDY (1 << 11) -#define FUSB300_EPPRDR_EP10_PRD_RDY (1 << 10) -#define FUSB300_EPPRDR_EP9_PRD_RDY (1 << 9) -#define FUSB300_EPPRDR_EP8_PRD_RDY (1 << 8) -#define FUSB300_EPPRDR_EP7_PRD_RDY (1 << 7) -#define FUSB300_EPPRDR_EP6_PRD_RDY (1 << 6) -#define FUSB300_EPPRDR_EP5_PRD_RDY (1 << 5) -#define FUSB300_EPPRDR_EP4_PRD_RDY (1 << 4) -#define FUSB300_EPPRDR_EP3_PRD_RDY (1 << 3) -#define FUSB300_EPPRDR_EP2_PRD_RDY (1 << 2) -#define FUSB300_EPPRDR_EP1_PRD_RDY (1 << 1) -#define FUSB300_EPPRDR_EP_PRD_RDY(n) (1 << n) - -/* AHB Bus Control Register (offset = 514H) */ -#define FUSB300_AHBBCR_S1_SPLIT_ON (1 << 17) -#define FUSB300_AHBBCR_S0_SPLIT_ON (1 << 16) -#define FUSB300_AHBBCR_S1_1entry (0 << 12) -#define FUSB300_AHBBCR_S1_4entry (3 << 12) -#define FUSB300_AHBBCR_S1_8entry (5 << 12) -#define FUSB300_AHBBCR_S1_16entry (7 << 12) -#define FUSB300_AHBBCR_S0_1entry (0 << 8) -#define FUSB300_AHBBCR_S0_4entry (3 << 8) -#define FUSB300_AHBBCR_S0_8entry (5 << 8) -#define FUSB300_AHBBCR_S0_16entry (7 << 8) -#define FUSB300_AHBBCR_M1_BURST_SINGLE (0 << 4) -#define FUSB300_AHBBCR_M1_BURST_INCR (1 << 4) -#define FUSB300_AHBBCR_M1_BURST_INCR4 (3 << 4) -#define FUSB300_AHBBCR_M1_BURST_INCR8 (5 << 4) -#define FUSB300_AHBBCR_M1_BURST_INCR16 (7 << 4) -#define FUSB300_AHBBCR_M0_BURST_SINGLE 0 -#define FUSB300_AHBBCR_M0_BURST_INCR 1 -#define FUSB300_AHBBCR_M0_BURST_INCR4 3 -#define FUSB300_AHBBCR_M0_BURST_INCR8 5 -#define FUSB300_AHBBCR_M0_BURST_INCR16 7 -#define FUSB300_IGER5_EEP_STL_INT(n) (1 << n) - -/* WORD 0 Data Structure of PRD Table */ -#define FUSB300_EPPRD0_M (1 << 30) -#define FUSB300_EPPRD0_O (1 << 29) -/* The finished prd */ -#define FUSB300_EPPRD0_F (1 << 28) -#define FUSB300_EPPRD0_I (1 << 27) -#define FUSB300_EPPRD0_A (1 << 26) -/* To decide HW point to first prd at next time */ -#define FUSB300_EPPRD0_L (1 << 25) -#define FUSB300_EPPRD0_H (1 << 24) -#define FUSB300_EPPRD0_BTC(n) (n & 0xFFFFFF) - -/*----------------------------------------------------------------------*/ -#define FUSB300_MAX_NUM_EP 16 - -#define FUSB300_FIFO_ENTRY_NUM 8 -#define FUSB300_MAX_FIFO_ENTRY 8 - -#define SS_CTL_MAX_PACKET_SIZE 0x200 -#define SS_BULK_MAX_PACKET_SIZE 0x400 -#define SS_INT_MAX_PACKET_SIZE 0x400 -#define SS_ISO_MAX_PACKET_SIZE 0x400 - -#define HS_BULK_MAX_PACKET_SIZE 0x200 -#define HS_CTL_MAX_PACKET_SIZE 0x40 -#define HS_INT_MAX_PACKET_SIZE 0x400 -#define HS_ISO_MAX_PACKET_SIZE 0x400 - -struct fusb300_ep_info { - u8 epnum; - u8 type; - u8 interval; - u8 dir_in; - u16 maxpacket; - u16 addrofs; - u16 bw_num; -}; - -struct fusb300_request { - - struct usb_request req; - struct list_head queue; -}; - - -struct fusb300_ep { - struct usb_ep ep; - struct fusb300 *fusb300; - - struct list_head queue; - unsigned stall:1; - unsigned wedged:1; - unsigned use_dma:1; - - unsigned char epnum; - unsigned char type; -}; - -struct fusb300 { - spinlock_t lock; - void __iomem *reg; - - unsigned long irq_trigger; - - struct usb_gadget gadget; - struct usb_gadget_driver *driver; - - struct fusb300_ep *ep[FUSB300_MAX_NUM_EP]; - - struct usb_request *ep0_req; /* for internal request */ - __le16 ep0_data; - u32 ep0_length; /* for internal request */ - u8 ep0_dir; /* 0/0x80 out/in */ - - u8 fifo_entry_num; /* next start fifo entry */ - u32 addrofs; /* next fifo address offset */ - u8 reenum; /* if re-enumeration */ -}; - -#define to_fusb300(g) (container_of((g), struct fusb300, gadget)) - -#endif diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 89d6daf2bda7..1a7d3c4f652f 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -1629,7 +1629,7 @@ static int lpc32xx_ep_enable(struct usb_ep *_ep, return -ESHUTDOWN; } - tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; + tmp = usb_endpoint_type(desc); switch (tmp) { case USB_ENDPOINT_XFER_CONTROL: return -EINVAL; diff --git a/drivers/usb/gadget/udc/mv_u3d.h b/drivers/usb/gadget/udc/mv_u3d.h deleted file mode 100644 index 66b84f792f64..000000000000 --- a/drivers/usb/gadget/udc/mv_u3d.h +++ /dev/null @@ -1,317 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2011 Marvell International Ltd. All rights reserved. - */ - -#ifndef __MV_U3D_H -#define __MV_U3D_H - -#define MV_U3D_EP_CONTEXT_ALIGNMENT 32 -#define MV_U3D_TRB_ALIGNMENT 16 -#define MV_U3D_DMA_BOUNDARY 4096 -#define MV_U3D_EP0_MAX_PKT_SIZE 512 - -/* ep0 transfer state */ -#define MV_U3D_WAIT_FOR_SETUP 0 -#define MV_U3D_DATA_STATE_XMIT 1 -#define MV_U3D_DATA_STATE_NEED_ZLP 2 -#define MV_U3D_WAIT_FOR_OUT_STATUS 3 -#define MV_U3D_DATA_STATE_RECV 4 -#define MV_U3D_STATUS_STAGE 5 - -#define MV_U3D_EP_MAX_LENGTH_TRANSFER 0x10000 - -/* USB3 Interrupt Status */ -#define MV_U3D_USBINT_SETUP 0x00000001 -#define MV_U3D_USBINT_RX_COMPLETE 0x00000002 -#define MV_U3D_USBINT_TX_COMPLETE 0x00000004 -#define MV_U3D_USBINT_UNDER_RUN 0x00000008 -#define MV_U3D_USBINT_RXDESC_ERR 0x00000010 -#define MV_U3D_USBINT_TXDESC_ERR 0x00000020 -#define MV_U3D_USBINT_RX_TRB_COMPLETE 0x00000040 -#define MV_U3D_USBINT_TX_TRB_COMPLETE 0x00000080 -#define MV_U3D_USBINT_VBUS_VALID 0x00010000 -#define MV_U3D_USBINT_STORAGE_CMD_FULL 0x00020000 -#define MV_U3D_USBINT_LINK_CHG 0x01000000 - -/* USB3 Interrupt Enable */ -#define MV_U3D_INTR_ENABLE_SETUP 0x00000001 -#define MV_U3D_INTR_ENABLE_RX_COMPLETE 0x00000002 -#define MV_U3D_INTR_ENABLE_TX_COMPLETE 0x00000004 -#define MV_U3D_INTR_ENABLE_UNDER_RUN 0x00000008 -#define MV_U3D_INTR_ENABLE_RXDESC_ERR 0x00000010 -#define MV_U3D_INTR_ENABLE_TXDESC_ERR 0x00000020 -#define MV_U3D_INTR_ENABLE_RX_TRB_COMPLETE 0x00000040 -#define MV_U3D_INTR_ENABLE_TX_TRB_COMPLETE 0x00000080 -#define MV_U3D_INTR_ENABLE_RX_BUFFER_ERR 0x00000100 -#define MV_U3D_INTR_ENABLE_VBUS_VALID 0x00010000 -#define MV_U3D_INTR_ENABLE_STORAGE_CMD_FULL 0x00020000 -#define MV_U3D_INTR_ENABLE_LINK_CHG 0x01000000 -#define MV_U3D_INTR_ENABLE_PRIME_STATUS 0x02000000 - -/* USB3 Link Change */ -#define MV_U3D_LINK_CHANGE_LINK_UP 0x00000001 -#define MV_U3D_LINK_CHANGE_SUSPEND 0x00000002 -#define MV_U3D_LINK_CHANGE_RESUME 0x00000004 -#define MV_U3D_LINK_CHANGE_WRESET 0x00000008 -#define MV_U3D_LINK_CHANGE_HRESET 0x00000010 -#define MV_U3D_LINK_CHANGE_VBUS_INVALID 0x00000020 -#define MV_U3D_LINK_CHANGE_INACT 0x00000040 -#define MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0 0x00000080 -#define MV_U3D_LINK_CHANGE_U1 0x00000100 -#define MV_U3D_LINK_CHANGE_U2 0x00000200 -#define MV_U3D_LINK_CHANGE_U3 0x00000400 - -/* bridge setting */ -#define MV_U3D_BRIDGE_SETTING_VBUS_VALID (1 << 16) - -/* Command Register Bit Masks */ -#define MV_U3D_CMD_RUN_STOP 0x00000001 -#define MV_U3D_CMD_CTRL_RESET 0x00000002 - -/* ep control register */ -#define MV_U3D_EPXCR_EP_TYPE_CONTROL 0 -#define MV_U3D_EPXCR_EP_TYPE_ISOC 1 -#define MV_U3D_EPXCR_EP_TYPE_BULK 2 -#define MV_U3D_EPXCR_EP_TYPE_INT 3 -#define MV_U3D_EPXCR_EP_ENABLE_SHIFT 4 -#define MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT 12 -#define MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT 16 -#define MV_U3D_USB_BULK_BURST_OUT 6 -#define MV_U3D_USB_BULK_BURST_IN 14 - -#define MV_U3D_EPXCR_EP_FLUSH (1 << 7) -#define MV_U3D_EPXCR_EP_HALT (1 << 1) -#define MV_U3D_EPXCR_EP_INIT (1) - -/* TX/RX Status Register */ -#define MV_U3D_XFERSTATUS_COMPLETE_SHIFT 24 -#define MV_U3D_COMPLETE_INVALID 0 -#define MV_U3D_COMPLETE_SUCCESS 1 -#define MV_U3D_COMPLETE_BUFF_ERR 2 -#define MV_U3D_COMPLETE_SHORT_PACKET 3 -#define MV_U3D_COMPLETE_TRB_ERR 5 -#define MV_U3D_XFERSTATUS_TRB_LENGTH_MASK (0xFFFFFF) - -#define MV_U3D_USB_LINK_BYPASS_VBUS 0x8 - -#define MV_U3D_LTSSM_PHY_INIT_DONE 0x80000000 -#define MV_U3D_LTSSM_NEVER_GO_COMPLIANCE 0x40000000 - -#define MV_U3D_USB3_OP_REGS_OFFSET 0x100 -#define MV_U3D_USB3_PHY_OFFSET 0xB800 - -#define DCS_ENABLE 0x1 - -/* timeout */ -#define MV_U3D_RESET_TIMEOUT 10000 -#define MV_U3D_FLUSH_TIMEOUT 100000 -#define MV_U3D_OWN_TIMEOUT 10000 -#define LOOPS_USEC_SHIFT 4 -#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) -#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) - -/* ep direction */ -#define MV_U3D_EP_DIR_IN 1 -#define MV_U3D_EP_DIR_OUT 0 -#define mv_u3d_ep_dir(ep) (((ep)->ep_num == 0) ? \ - ((ep)->u3d->ep0_dir) : ((ep)->direction)) - -/* usb capability registers */ -struct mv_u3d_cap_regs { - u32 rsvd[5]; - u32 dboff; /* doorbell register offset */ - u32 rtsoff; /* runtime register offset */ - u32 vuoff; /* vendor unique register offset */ -}; - -/* operation registers */ -struct mv_u3d_op_regs { - u32 usbcmd; /* Command register */ - u32 rsvd1[11]; - u32 dcbaapl; /* Device Context Base Address low register */ - u32 dcbaaph; /* Device Context Base Address high register */ - u32 rsvd2[243]; - u32 portsc; /* port status and control register*/ - u32 portlinkinfo; /* port link info register*/ - u32 rsvd3[9917]; - u32 doorbell; /* doorbell register */ -}; - -/* control endpoint enable registers */ -struct epxcr { - u32 epxoutcr0; /* ep out control 0 register */ - u32 epxoutcr1; /* ep out control 1 register */ - u32 epxincr0; /* ep in control 0 register */ - u32 epxincr1; /* ep in control 1 register */ -}; - -/* transfer status registers */ -struct xferstatus { - u32 curdeqlo; /* current TRB pointer low */ - u32 curdeqhi; /* current TRB pointer high */ - u32 statuslo; /* transfer status low */ - u32 statushi; /* transfer status high */ -}; - -/* vendor unique control registers */ -struct mv_u3d_vuc_regs { - u32 ctrlepenable; /* control endpoint enable register */ - u32 setuplock; /* setup lock register */ - u32 endcomplete; /* endpoint transfer complete register */ - u32 intrcause; /* interrupt cause register */ - u32 intrenable; /* interrupt enable register */ - u32 trbcomplete; /* TRB complete register */ - u32 linkchange; /* link change register */ - u32 rsvd1[5]; - u32 trbunderrun; /* TRB underrun register */ - u32 rsvd2[43]; - u32 bridgesetting; /* bridge setting register */ - u32 rsvd3[7]; - struct xferstatus txst[16]; /* TX status register */ - struct xferstatus rxst[16]; /* RX status register */ - u32 ltssm; /* LTSSM control register */ - u32 pipe; /* PIPE control register */ - u32 linkcr0; /* link control 0 register */ - u32 linkcr1; /* link control 1 register */ - u32 rsvd6[60]; - u32 mib0; /* MIB0 counter register */ - u32 usblink; /* usb link control register */ - u32 ltssmstate; /* LTSSM state register */ - u32 linkerrorcause; /* link error cause register */ - u32 rsvd7[60]; - u32 devaddrtiebrkr; /* device address and tie breaker */ - u32 itpinfo0; /* ITP info 0 register */ - u32 itpinfo1; /* ITP info 1 register */ - u32 rsvd8[61]; - struct epxcr epcr[16]; /* ep control register */ - u32 rsvd9[64]; - u32 phyaddr; /* PHY address register */ - u32 phydata; /* PHY data register */ -}; - -/* Endpoint context structure */ -struct mv_u3d_ep_context { - u32 rsvd0; - u32 rsvd1; - u32 trb_addr_lo; /* TRB address low 32 bit */ - u32 trb_addr_hi; /* TRB address high 32 bit */ - u32 rsvd2; - u32 rsvd3; - struct usb_ctrlrequest setup_buffer; /* setup data buffer */ -}; - -/* TRB control data structure */ -struct mv_u3d_trb_ctrl { - u32 own:1; /* owner of TRB */ - u32 rsvd1:3; - u32 chain:1; /* associate this TRB with the - next TRB on the Ring */ - u32 ioc:1; /* interrupt on complete */ - u32 rsvd2:4; - u32 type:6; /* TRB type */ -#define TYPE_NORMAL 1 -#define TYPE_DATA 3 -#define TYPE_LINK 6 - u32 dir:1; /* Working at data stage of control endpoint - operation. 0 is OUT and 1 is IN. */ - u32 rsvd3:15; -}; - -/* TRB data structure - * For multiple TRB, all the TRBs' physical address should be continuous. - */ -struct mv_u3d_trb_hw { - u32 buf_addr_lo; /* data buffer address low 32 bit */ - u32 buf_addr_hi; /* data buffer address high 32 bit */ - u32 trb_len; /* transfer length */ - struct mv_u3d_trb_ctrl ctrl; /* TRB control data */ -}; - -/* TRB structure */ -struct mv_u3d_trb { - struct mv_u3d_trb_hw *trb_hw; /* point to the trb_hw structure */ - dma_addr_t trb_dma; /* dma address for this trb_hw */ - struct list_head trb_list; /* trb list */ -}; - -/* device data structure */ -struct mv_u3d { - struct usb_gadget gadget; - struct usb_gadget_driver *driver; - spinlock_t lock; /* device lock */ - struct completion *done; - struct device *dev; - int irq; - - /* usb controller registers */ - struct mv_u3d_cap_regs __iomem *cap_regs; - struct mv_u3d_op_regs __iomem *op_regs; - struct mv_u3d_vuc_regs __iomem *vuc_regs; - void __iomem *phy_regs; - - unsigned int max_eps; - struct mv_u3d_ep_context *ep_context; - size_t ep_context_size; - dma_addr_t ep_context_dma; - - struct dma_pool *trb_pool; /* for TRB data structure */ - struct mv_u3d_ep *eps; - - struct mv_u3d_req *status_req; /* ep0 status request */ - struct usb_ctrlrequest local_setup_buff; /* store setup data*/ - - unsigned int resume_state; /* USB state to resume */ - unsigned int usb_state; /* USB current state */ - unsigned int ep0_state; /* Endpoint zero state */ - unsigned int ep0_dir; - - unsigned int dev_addr; /* device address */ - - unsigned int errors; - - unsigned softconnect:1; - unsigned vbus_active:1; /* vbus is active or not */ - unsigned remote_wakeup:1; /* support remote wakeup */ - unsigned clock_gating:1; /* clock gating or not */ - unsigned active:1; /* udc is active or not */ - unsigned vbus_valid_detect:1; /* udc vbus detection */ - - struct mv_usb_addon_irq *vbus; - unsigned int power; - - struct clk *clk; -}; - -/* endpoint data structure */ -struct mv_u3d_ep { - struct usb_ep ep; - struct mv_u3d *u3d; - struct list_head queue; /* ep request queued hardware */ - struct list_head req_list; /* list of ep request */ - struct mv_u3d_ep_context *ep_context; /* ep context */ - u32 direction; - char name[14]; - u32 processing; /* there is ep request - queued on haredware */ - spinlock_t req_lock; /* ep lock */ - unsigned wedge:1; - unsigned enabled:1; - unsigned ep_type:2; - unsigned ep_num:8; -}; - -/* request data structure */ -struct mv_u3d_req { - struct usb_request req; - struct mv_u3d_ep *ep; - struct list_head queue; /* ep requst queued on hardware */ - struct list_head list; /* ep request list */ - struct list_head trb_list; /* trb list of a request */ - - struct mv_u3d_trb *trb_head; /* point to first trb of a request */ - unsigned trb_count; /* TRB number in the chain */ - unsigned chain; /* TRB chain or not */ -}; - -#endif diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c deleted file mode 100644 index 062f43e146aa..000000000000 --- a/drivers/usb/gadget/udc/mv_u3d_core.c +++ /dev/null @@ -1,2062 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2011 Marvell International Ltd. All rights reserved. - */ - -#include <linux/module.h> -#include <linux/dma-mapping.h> -#include <linux/dmapool.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/timer.h> -#include <linux/list.h> -#include <linux/notifier.h> -#include <linux/interrupt.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> -#include <linux/pm.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/platform_device.h> -#include <linux/platform_data/mv_usb.h> -#include <linux/clk.h> - -#include "mv_u3d.h" - -#define DRIVER_DESC "Marvell PXA USB3.0 Device Controller driver" - -static const char driver_name[] = "mv_u3d"; - -static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status); -static void mv_u3d_stop_activity(struct mv_u3d *u3d, - struct usb_gadget_driver *driver); - -/* for endpoint 0 operations */ -static const struct usb_endpoint_descriptor mv_u3d_ep0_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = 0, - .bmAttributes = USB_ENDPOINT_XFER_CONTROL, - .wMaxPacketSize = MV_U3D_EP0_MAX_PKT_SIZE, -}; - -static void mv_u3d_ep0_reset(struct mv_u3d *u3d) -{ - struct mv_u3d_ep *ep; - u32 epxcr; - int i; - - for (i = 0; i < 2; i++) { - ep = &u3d->eps[i]; - ep->u3d = u3d; - - /* ep0 ep context, ep0 in and out share the same ep context */ - ep->ep_context = &u3d->ep_context[1]; - } - - /* reset ep state machine */ - /* reset ep0 out */ - epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); - epxcr |= MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0); - udelay(5); - epxcr &= ~MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr0); - - epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE - << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | MV_U3D_EPXCR_EP_TYPE_CONTROL); - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxoutcr1); - - /* reset ep0 in */ - epxcr = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); - epxcr |= MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0); - udelay(5); - epxcr &= ~MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr0); - - epxcr = ((MV_U3D_EP0_MAX_PKT_SIZE - << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | MV_U3D_EPXCR_EP_TYPE_CONTROL); - iowrite32(epxcr, &u3d->vuc_regs->epcr[0].epxincr1); -} - -static void mv_u3d_ep0_stall(struct mv_u3d *u3d) -{ - u32 tmp; - dev_dbg(u3d->dev, "%s\n", __func__); - - /* set TX and RX to stall */ - tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); - tmp |= MV_U3D_EPXCR_EP_HALT; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); - - tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); - tmp |= MV_U3D_EPXCR_EP_HALT; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); - - /* update ep0 state */ - u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; - u3d->ep0_dir = MV_U3D_EP_DIR_OUT; -} - -static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index, - struct mv_u3d_req *curr_req) -{ - struct mv_u3d_trb *curr_trb; - int actual, remaining_length = 0; - int direction, ep_num; - int retval = 0; - u32 tmp, status, length; - - direction = index % 2; - ep_num = index / 2; - - actual = curr_req->req.length; - - while (!list_empty(&curr_req->trb_list)) { - curr_trb = list_entry(curr_req->trb_list.next, - struct mv_u3d_trb, trb_list); - if (!curr_trb->trb_hw->ctrl.own) { - dev_err(u3d->dev, "%s, TRB own error!\n", - u3d->eps[index].name); - return 1; - } - - curr_trb->trb_hw->ctrl.own = 0; - if (direction == MV_U3D_EP_DIR_OUT) - tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo); - else - tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo); - - status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT; - length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK; - - if (status == MV_U3D_COMPLETE_SUCCESS || - (status == MV_U3D_COMPLETE_SHORT_PACKET && - direction == MV_U3D_EP_DIR_OUT)) { - remaining_length += length; - actual -= remaining_length; - } else { - dev_err(u3d->dev, - "complete_tr error: ep=%d %s: error = 0x%x\n", - index >> 1, direction ? "SEND" : "RECV", - status); - retval = -EPROTO; - } - - list_del_init(&curr_trb->trb_list); - } - if (retval) - return retval; - - curr_req->req.actual = actual; - return 0; -} - -/* - * mv_u3d_done() - retire a request; caller blocked irqs - * @status : request status to be set, only works when - * request is still in progress. - */ -static -void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status) - __releases(&ep->udc->lock) - __acquires(&ep->udc->lock) -{ - struct mv_u3d *u3d = (struct mv_u3d *)ep->u3d; - - dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n"); - /* Removed the req from ep queue */ - list_del_init(&req->queue); - - /* req.status should be set as -EINPROGRESS in ep_queue() */ - if (req->req.status == -EINPROGRESS) - req->req.status = status; - else - status = req->req.status; - - /* Free trb for the request */ - if (!req->chain) - dma_pool_free(u3d->trb_pool, - req->trb_head->trb_hw, req->trb_head->trb_dma); - else { - dma_unmap_single(ep->u3d->gadget.dev.parent, - (dma_addr_t)req->trb_head->trb_dma, - req->trb_count * sizeof(struct mv_u3d_trb_hw), - DMA_BIDIRECTIONAL); - kfree(req->trb_head->trb_hw); - } - kfree(req->trb_head); - - usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep)); - - if (status && (status != -ESHUTDOWN)) { - dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u", - ep->ep.name, &req->req, status, - req->req.actual, req->req.length); - } - - spin_unlock(&ep->u3d->lock); - - usb_gadget_giveback_request(&ep->ep, &req->req); - - spin_lock(&ep->u3d->lock); -} - -static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req) -{ - u32 tmp, direction; - struct mv_u3d *u3d; - struct mv_u3d_ep_context *ep_context; - int retval = 0; - - u3d = ep->u3d; - direction = mv_u3d_ep_dir(ep); - - /* ep0 in and out share the same ep context slot 1*/ - if (ep->ep_num == 0) - ep_context = &(u3d->ep_context[1]); - else - ep_context = &(u3d->ep_context[ep->ep_num * 2 + direction]); - - /* check if the pipe is empty or not */ - if (!list_empty(&ep->queue)) { - dev_err(u3d->dev, "add trb to non-empty queue!\n"); - retval = -ENOMEM; - WARN_ON(1); - } else { - ep_context->rsvd0 = cpu_to_le32(1); - ep_context->rsvd1 = 0; - - /* Configure the trb address and set the DCS bit. - * Both DCS bit and own bit in trb should be set. - */ - ep_context->trb_addr_lo = - cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE); - ep_context->trb_addr_hi = 0; - - /* Ensure that updates to the EP Context will - * occure before Ring Bell. - */ - wmb(); - - /* ring bell the ep */ - if (ep->ep_num == 0) - tmp = 0x1; - else - tmp = ep->ep_num * 2 - + ((direction == MV_U3D_EP_DIR_OUT) ? 0 : 1); - - iowrite32(tmp, &u3d->op_regs->doorbell); - } - return retval; -} - -static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req, - unsigned *length, dma_addr_t *dma) -{ - u32 temp; - unsigned int direction; - struct mv_u3d_trb *trb; - struct mv_u3d_trb_hw *trb_hw; - struct mv_u3d *u3d; - - /* how big will this transfer be? */ - *length = req->req.length - req->req.actual; - BUG_ON(*length > (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER); - - u3d = req->ep->u3d; - - trb = kzalloc(sizeof(*trb), GFP_ATOMIC); - if (!trb) - return NULL; - - /* - * Be careful that no _GFP_HIGHMEM is set, - * or we can not use dma_to_virt - * cannot use GFP_KERNEL in spin lock - */ - trb_hw = dma_pool_alloc(u3d->trb_pool, GFP_ATOMIC, dma); - if (!trb_hw) { - kfree(trb); - dev_err(u3d->dev, - "%s, dma_pool_alloc fail\n", __func__); - return NULL; - } - trb->trb_dma = *dma; - trb->trb_hw = trb_hw; - - /* initialize buffer page pointers */ - temp = (u32)(req->req.dma + req->req.actual); - - trb_hw->buf_addr_lo = cpu_to_le32(temp); - trb_hw->buf_addr_hi = 0; - trb_hw->trb_len = cpu_to_le32(*length); - trb_hw->ctrl.own = 1; - - if (req->ep->ep_num == 0) - trb_hw->ctrl.type = TYPE_DATA; - else - trb_hw->ctrl.type = TYPE_NORMAL; - - req->req.actual += *length; - - direction = mv_u3d_ep_dir(req->ep); - if (direction == MV_U3D_EP_DIR_IN) - trb_hw->ctrl.dir = 1; - else - trb_hw->ctrl.dir = 0; - - /* Enable interrupt for the last trb of a request */ - if (!req->req.no_interrupt) - trb_hw->ctrl.ioc = 1; - - trb_hw->ctrl.chain = 0; - - wmb(); - return trb; -} - -static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length, - struct mv_u3d_trb *trb, int *is_last) -{ - u32 temp; - unsigned int direction; - struct mv_u3d *u3d; - - /* how big will this transfer be? */ - *length = min(req->req.length - req->req.actual, - (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER); - - u3d = req->ep->u3d; - - trb->trb_dma = 0; - - /* initialize buffer page pointers */ - temp = (u32)(req->req.dma + req->req.actual); - - trb->trb_hw->buf_addr_lo = cpu_to_le32(temp); - trb->trb_hw->buf_addr_hi = 0; - trb->trb_hw->trb_len = cpu_to_le32(*length); - trb->trb_hw->ctrl.own = 1; - - if (req->ep->ep_num == 0) - trb->trb_hw->ctrl.type = TYPE_DATA; - else - trb->trb_hw->ctrl.type = TYPE_NORMAL; - - req->req.actual += *length; - - direction = mv_u3d_ep_dir(req->ep); - if (direction == MV_U3D_EP_DIR_IN) - trb->trb_hw->ctrl.dir = 1; - else - trb->trb_hw->ctrl.dir = 0; - - /* zlp is needed if req->req.zero is set */ - if (req->req.zero) { - if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) - *is_last = 1; - else - *is_last = 0; - } else if (req->req.length == req->req.actual) - *is_last = 1; - else - *is_last = 0; - - /* Enable interrupt for the last trb of a request */ - if (*is_last && !req->req.no_interrupt) - trb->trb_hw->ctrl.ioc = 1; - - if (*is_last) - trb->trb_hw->ctrl.chain = 0; - else { - trb->trb_hw->ctrl.chain = 1; - dev_dbg(u3d->dev, "chain trb\n"); - } - - wmb(); - - return 0; -} - -/* generate TRB linked list for a request - * usb controller only supports continous trb chain, - * that trb structure physical address should be continous. - */ -static int mv_u3d_req_to_trb(struct mv_u3d_req *req) -{ - unsigned count; - int is_last; - struct mv_u3d_trb *trb; - struct mv_u3d_trb_hw *trb_hw; - struct mv_u3d *u3d; - dma_addr_t dma; - unsigned length; - unsigned trb_num; - - u3d = req->ep->u3d; - - INIT_LIST_HEAD(&req->trb_list); - - length = req->req.length - req->req.actual; - /* normally the request transfer length is less than 16KB. - * we use buil_trb_one() to optimize it. - */ - if (length <= (unsigned)MV_U3D_EP_MAX_LENGTH_TRANSFER) { - trb = mv_u3d_build_trb_one(req, &count, &dma); - list_add_tail(&trb->trb_list, &req->trb_list); - req->trb_head = trb; - req->trb_count = 1; - req->chain = 0; - } else { - trb_num = length / MV_U3D_EP_MAX_LENGTH_TRANSFER; - if (length % MV_U3D_EP_MAX_LENGTH_TRANSFER) - trb_num++; - - trb = kcalloc(trb_num, sizeof(*trb), GFP_ATOMIC); - if (!trb) - return -ENOMEM; - - trb_hw = kcalloc(trb_num, sizeof(*trb_hw), GFP_ATOMIC); - if (!trb_hw) { - kfree(trb); - return -ENOMEM; - } - - do { - trb->trb_hw = trb_hw; - if (mv_u3d_build_trb_chain(req, &count, - trb, &is_last)) { - dev_err(u3d->dev, - "%s, mv_u3d_build_trb_chain fail\n", - __func__); - return -EIO; - } - - list_add_tail(&trb->trb_list, &req->trb_list); - req->trb_count++; - trb++; - trb_hw++; - } while (!is_last); - - req->trb_head = list_entry(req->trb_list.next, - struct mv_u3d_trb, trb_list); - req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent, - req->trb_head->trb_hw, - trb_num * sizeof(*trb_hw), - DMA_BIDIRECTIONAL); - if (dma_mapping_error(u3d->gadget.dev.parent, - req->trb_head->trb_dma)) { - kfree(req->trb_head->trb_hw); - kfree(req->trb_head); - return -EFAULT; - } - - req->chain = 1; - } - - return 0; -} - -static int -mv_u3d_start_queue(struct mv_u3d_ep *ep) -{ - struct mv_u3d *u3d = ep->u3d; - struct mv_u3d_req *req; - int ret; - - if (!list_empty(&ep->req_list) && !ep->processing) - req = list_entry(ep->req_list.next, struct mv_u3d_req, list); - else - return 0; - - ep->processing = 1; - - /* set up dma mapping */ - ret = usb_gadget_map_request(&u3d->gadget, &req->req, - mv_u3d_ep_dir(ep)); - if (ret) - goto break_processing; - - req->req.status = -EINPROGRESS; - req->req.actual = 0; - req->trb_count = 0; - - /* build trbs */ - ret = mv_u3d_req_to_trb(req); - if (ret) { - dev_err(u3d->dev, "%s, mv_u3d_req_to_trb fail\n", __func__); - goto break_processing; - } - - /* and push them to device queue */ - ret = mv_u3d_queue_trb(ep, req); - if (ret) - goto break_processing; - - /* irq handler advances the queue */ - list_add_tail(&req->queue, &ep->queue); - - return 0; - -break_processing: - ep->processing = 0; - return ret; -} - -static int mv_u3d_ep_enable(struct usb_ep *_ep, - const struct usb_endpoint_descriptor *desc) -{ - struct mv_u3d *u3d; - struct mv_u3d_ep *ep; - u16 max = 0; - unsigned maxburst = 0; - u32 epxcr, direction; - - if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) - return -EINVAL; - - ep = container_of(_ep, struct mv_u3d_ep, ep); - u3d = ep->u3d; - - if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - direction = mv_u3d_ep_dir(ep); - max = le16_to_cpu(desc->wMaxPacketSize); - - if (!_ep->maxburst) - _ep->maxburst = 1; - maxburst = _ep->maxburst; - - /* Set the max burst size */ - switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { - case USB_ENDPOINT_XFER_BULK: - if (maxburst > 16) { - dev_dbg(u3d->dev, - "max burst should not be greater " - "than 16 on bulk ep\n"); - maxburst = 1; - _ep->maxburst = maxburst; - } - dev_dbg(u3d->dev, - "maxburst: %d on bulk %s\n", maxburst, ep->name); - break; - case USB_ENDPOINT_XFER_CONTROL: - /* control transfer only supports maxburst as one */ - maxburst = 1; - _ep->maxburst = maxburst; - break; - case USB_ENDPOINT_XFER_INT: - if (maxburst != 1) { - dev_dbg(u3d->dev, - "max burst should be 1 on int ep " - "if transfer size is not 1024\n"); - maxburst = 1; - _ep->maxburst = maxburst; - } - break; - case USB_ENDPOINT_XFER_ISOC: - if (maxburst != 1) { - dev_dbg(u3d->dev, - "max burst should be 1 on isoc ep " - "if transfer size is not 1024\n"); - maxburst = 1; - _ep->maxburst = maxburst; - } - break; - default: - goto en_done; - } - - ep->ep.maxpacket = max; - ep->ep.desc = desc; - ep->enabled = 1; - - /* Enable the endpoint for Rx or Tx and set the endpoint type */ - if (direction == MV_U3D_EP_DIR_OUT) { - epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - epxcr |= MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - udelay(5); - epxcr &= ~MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - - epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) - | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); - } else { - epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - epxcr |= MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - udelay(5); - epxcr &= ~MV_U3D_EPXCR_EP_INIT; - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - - epxcr = ((max << MV_U3D_EPXCR_MAX_PACKET_SIZE_SHIFT) - | ((maxburst - 1) << MV_U3D_EPXCR_MAX_BURST_SIZE_SHIFT) - | (1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)); - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); - } - - return 0; -en_done: - return -EINVAL; -} - -static int mv_u3d_ep_disable(struct usb_ep *_ep) -{ - struct mv_u3d *u3d; - struct mv_u3d_ep *ep; - u32 epxcr, direction; - unsigned long flags; - - if (!_ep) - return -EINVAL; - - ep = container_of(_ep, struct mv_u3d_ep, ep); - if (!ep->ep.desc) - return -EINVAL; - - u3d = ep->u3d; - - direction = mv_u3d_ep_dir(ep); - - /* nuke all pending requests (does flush) */ - spin_lock_irqsave(&u3d->lock, flags); - mv_u3d_nuke(ep, -ESHUTDOWN); - spin_unlock_irqrestore(&u3d->lock, flags); - - /* Disable the endpoint for Rx or Tx and reset the endpoint type */ - if (direction == MV_U3D_EP_DIR_OUT) { - epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); - epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | USB_ENDPOINT_XFERTYPE_MASK); - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr1); - } else { - epxcr = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr1); - epxcr &= ~((1 << MV_U3D_EPXCR_EP_ENABLE_SHIFT) - | USB_ENDPOINT_XFERTYPE_MASK); - iowrite32(epxcr, &u3d->vuc_regs->epcr[ep->ep_num].epxincr1); - } - - ep->enabled = 0; - - ep->ep.desc = NULL; - return 0; -} - -static struct usb_request * -mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) -{ - struct mv_u3d_req *req; - - req = kzalloc(sizeof *req, gfp_flags); - if (!req) - return NULL; - - INIT_LIST_HEAD(&req->queue); - - return &req->req; -} - -static void mv_u3d_free_request(struct usb_ep *_ep, struct usb_request *_req) -{ - struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req); - - kfree(req); -} - -static void mv_u3d_ep_fifo_flush(struct usb_ep *_ep) -{ - struct mv_u3d *u3d; - u32 direction; - struct mv_u3d_ep *ep = container_of(_ep, struct mv_u3d_ep, ep); - unsigned int loops; - u32 tmp; - - /* if endpoint is not enabled, cannot flush endpoint */ - if (!ep->enabled) - return; - - u3d = ep->u3d; - direction = mv_u3d_ep_dir(ep); - - /* ep0 need clear bit after flushing fifo. */ - if (!ep->ep_num) { - if (direction == MV_U3D_EP_DIR_OUT) { - tmp = ioread32(&u3d->vuc_regs->epcr[0].epxoutcr0); - tmp |= MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); - udelay(10); - tmp &= ~MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxoutcr0); - } else { - tmp = ioread32(&u3d->vuc_regs->epcr[0].epxincr0); - tmp |= MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); - udelay(10); - tmp &= ~MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[0].epxincr0); - } - return; - } - - if (direction == MV_U3D_EP_DIR_OUT) { - tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - tmp |= MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - - /* Wait until flushing completed */ - loops = LOOPS(MV_U3D_FLUSH_TIMEOUT); - while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0) & - MV_U3D_EPXCR_EP_FLUSH) { - /* - * EP_FLUSH bit should be cleared to indicate this - * operation is complete - */ - if (loops == 0) { - dev_dbg(u3d->dev, - "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, - direction ? "in" : "out"); - return; - } - loops--; - udelay(LOOPS_USEC); - } - } else { /* EP_DIR_IN */ - tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - tmp |= MV_U3D_EPXCR_EP_FLUSH; - iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - - /* Wait until flushing completed */ - loops = LOOPS(MV_U3D_FLUSH_TIMEOUT); - while (ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0) & - MV_U3D_EPXCR_EP_FLUSH) { - /* - * EP_FLUSH bit should be cleared to indicate this - * operation is complete - */ - if (loops == 0) { - dev_dbg(u3d->dev, - "EP FLUSH TIMEOUT for ep%d%s\n", ep->ep_num, - direction ? "in" : "out"); - return; - } - loops--; - udelay(LOOPS_USEC); - } - } -} - -/* queues (submits) an I/O request to an endpoint */ -static int -mv_u3d_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) -{ - struct mv_u3d_ep *ep; - struct mv_u3d_req *req; - struct mv_u3d *u3d; - unsigned long flags; - int is_first_req = 0; - - if (unlikely(!_ep || !_req)) - return -EINVAL; - - ep = container_of(_ep, struct mv_u3d_ep, ep); - u3d = ep->u3d; - - req = container_of(_req, struct mv_u3d_req, req); - - if (!ep->ep_num - && u3d->ep0_state == MV_U3D_STATUS_STAGE - && !_req->length) { - dev_dbg(u3d->dev, "ep0 status stage\n"); - u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; - return 0; - } - - dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n", - __func__, _ep->name, req); - - /* catch various bogus parameters */ - if (!req->req.complete || !req->req.buf - || !list_empty(&req->queue)) { - dev_err(u3d->dev, - "%s, bad params, _req: 0x%p," - "req->req.complete: 0x%p, req->req.buf: 0x%p," - "list_empty: 0x%x\n", - __func__, _req, - req->req.complete, req->req.buf, - list_empty(&req->queue)); - return -EINVAL; - } - if (unlikely(!ep->ep.desc)) { - dev_err(u3d->dev, "%s, bad ep\n", __func__); - return -EINVAL; - } - if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { - if (req->req.length > ep->ep.maxpacket) - return -EMSGSIZE; - } - - if (!u3d->driver || u3d->gadget.speed == USB_SPEED_UNKNOWN) { - dev_err(u3d->dev, - "bad params of driver/speed\n"); - return -ESHUTDOWN; - } - - req->ep = ep; - - /* Software list handles usb request. */ - spin_lock_irqsave(&ep->req_lock, flags); - is_first_req = list_empty(&ep->req_list); - list_add_tail(&req->list, &ep->req_list); - spin_unlock_irqrestore(&ep->req_lock, flags); - if (!is_first_req) { - dev_dbg(u3d->dev, "list is not empty\n"); - return 0; - } - - dev_dbg(u3d->dev, "call mv_u3d_start_queue from usb_ep_queue\n"); - spin_lock_irqsave(&u3d->lock, flags); - mv_u3d_start_queue(ep); - spin_unlock_irqrestore(&u3d->lock, flags); - return 0; -} - -/* dequeues (cancels, unlinks) an I/O request from an endpoint */ -static int mv_u3d_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) -{ - struct mv_u3d_ep *ep; - struct mv_u3d_req *req = NULL, *iter; - struct mv_u3d *u3d; - struct mv_u3d_ep_context *ep_context; - struct mv_u3d_req *next_req; - - unsigned long flags; - int ret = 0; - - if (!_ep || !_req) - return -EINVAL; - - ep = container_of(_ep, struct mv_u3d_ep, ep); - u3d = ep->u3d; - - spin_lock_irqsave(&ep->u3d->lock, flags); - - /* make sure it's actually queued on this endpoint */ - list_for_each_entry(iter, &ep->queue, queue) { - if (&iter->req != _req) - continue; - req = iter; - break; - } - if (!req) { - ret = -EINVAL; - goto out; - } - - /* The request is in progress, or completed but not dequeued */ - if (ep->queue.next == &req->queue) { - _req->status = -ECONNRESET; - mv_u3d_ep_fifo_flush(_ep); - - /* The request isn't the last request in this ep queue */ - if (req->queue.next != &ep->queue) { - dev_dbg(u3d->dev, - "it is the last request in this ep queue\n"); - ep_context = ep->ep_context; - next_req = list_entry(req->queue.next, - struct mv_u3d_req, queue); - - /* Point first TRB of next request to the EP context. */ - iowrite32((unsigned long) next_req->trb_head, - &ep_context->trb_addr_lo); - } else { - struct mv_u3d_ep_context *ep_context; - ep_context = ep->ep_context; - ep_context->trb_addr_lo = 0; - ep_context->trb_addr_hi = 0; - } - - } else - WARN_ON(1); - - mv_u3d_done(ep, req, -ECONNRESET); - - /* remove the req from the ep req list */ - if (!list_empty(&ep->req_list)) { - struct mv_u3d_req *curr_req; - curr_req = list_entry(ep->req_list.next, - struct mv_u3d_req, list); - if (curr_req == req) { - list_del_init(&req->list); - ep->processing = 0; - } - } - -out: - spin_unlock_irqrestore(&ep->u3d->lock, flags); - return ret; -} - -static void -mv_u3d_ep_set_stall(struct mv_u3d *u3d, u8 ep_num, u8 direction, int stall) -{ - u32 tmp; - struct mv_u3d_ep *ep = u3d->eps; - - dev_dbg(u3d->dev, "%s\n", __func__); - if (direction == MV_U3D_EP_DIR_OUT) { - tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - if (stall) - tmp |= MV_U3D_EPXCR_EP_HALT; - else - tmp &= ~MV_U3D_EPXCR_EP_HALT; - iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxoutcr0); - } else { - tmp = ioread32(&u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - if (stall) - tmp |= MV_U3D_EPXCR_EP_HALT; - else - tmp &= ~MV_U3D_EPXCR_EP_HALT; - iowrite32(tmp, &u3d->vuc_regs->epcr[ep->ep_num].epxincr0); - } -} - -static int mv_u3d_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) -{ - struct mv_u3d_ep *ep; - unsigned long flags; - int status = 0; - struct mv_u3d *u3d; - - ep = container_of(_ep, struct mv_u3d_ep, ep); - u3d = ep->u3d; - if (!ep->ep.desc) { - status = -EINVAL; - goto out; - } - - if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { - status = -EOPNOTSUPP; - goto out; - } - - /* - * Attempt to halt IN ep will fail if any transfer requests - * are still queue - */ - if (halt && (mv_u3d_ep_dir(ep) == MV_U3D_EP_DIR_IN) - && !list_empty(&ep->queue)) { - status = -EAGAIN; - goto out; - } - - spin_lock_irqsave(&ep->u3d->lock, flags); - mv_u3d_ep_set_stall(u3d, ep->ep_num, mv_u3d_ep_dir(ep), halt); - if (halt && wedge) - ep->wedge = 1; - else if (!halt) - ep->wedge = 0; - spin_unlock_irqrestore(&ep->u3d->lock, flags); - - if (ep->ep_num == 0) - u3d->ep0_dir = MV_U3D_EP_DIR_OUT; -out: - return status; -} - -static int mv_u3d_ep_set_halt(struct usb_ep *_ep, int halt) -{ - return mv_u3d_ep_set_halt_wedge(_ep, halt, 0); -} - -static int mv_u3d_ep_set_wedge(struct usb_ep *_ep) -{ - return mv_u3d_ep_set_halt_wedge(_ep, 1, 1); -} - -static const struct usb_ep_ops mv_u3d_ep_ops = { - .enable = mv_u3d_ep_enable, - .disable = mv_u3d_ep_disable, - - .alloc_request = mv_u3d_alloc_request, - .free_request = mv_u3d_free_request, - - .queue = mv_u3d_ep_queue, - .dequeue = mv_u3d_ep_dequeue, - - .set_wedge = mv_u3d_ep_set_wedge, - .set_halt = mv_u3d_ep_set_halt, - .fifo_flush = mv_u3d_ep_fifo_flush, -}; - -static void mv_u3d_controller_stop(struct mv_u3d *u3d) -{ - u32 tmp; - - if (!u3d->clock_gating && u3d->vbus_valid_detect) - iowrite32(MV_U3D_INTR_ENABLE_VBUS_VALID, - &u3d->vuc_regs->intrenable); - else - iowrite32(0, &u3d->vuc_regs->intrenable); - iowrite32(~0x0, &u3d->vuc_regs->endcomplete); - iowrite32(~0x0, &u3d->vuc_regs->trbunderrun); - iowrite32(~0x0, &u3d->vuc_regs->trbcomplete); - iowrite32(~0x0, &u3d->vuc_regs->linkchange); - iowrite32(0x1, &u3d->vuc_regs->setuplock); - - /* Reset the RUN bit in the command register to stop USB */ - tmp = ioread32(&u3d->op_regs->usbcmd); - tmp &= ~MV_U3D_CMD_RUN_STOP; - iowrite32(tmp, &u3d->op_regs->usbcmd); - dev_dbg(u3d->dev, "after u3d_stop, USBCMD 0x%x\n", - ioread32(&u3d->op_regs->usbcmd)); -} - -static void mv_u3d_controller_start(struct mv_u3d *u3d) -{ - u32 usbintr; - u32 temp; - - /* enable link LTSSM state machine */ - temp = ioread32(&u3d->vuc_regs->ltssm); - temp |= MV_U3D_LTSSM_PHY_INIT_DONE; - iowrite32(temp, &u3d->vuc_regs->ltssm); - - /* Enable interrupts */ - usbintr = MV_U3D_INTR_ENABLE_LINK_CHG | MV_U3D_INTR_ENABLE_TXDESC_ERR | - MV_U3D_INTR_ENABLE_RXDESC_ERR | MV_U3D_INTR_ENABLE_TX_COMPLETE | - MV_U3D_INTR_ENABLE_RX_COMPLETE | MV_U3D_INTR_ENABLE_SETUP | - (u3d->vbus_valid_detect ? MV_U3D_INTR_ENABLE_VBUS_VALID : 0); - iowrite32(usbintr, &u3d->vuc_regs->intrenable); - - /* Enable ctrl ep */ - iowrite32(0x1, &u3d->vuc_regs->ctrlepenable); - - /* Set the Run bit in the command register */ - iowrite32(MV_U3D_CMD_RUN_STOP, &u3d->op_regs->usbcmd); - dev_dbg(u3d->dev, "after u3d_start, USBCMD 0x%x\n", - ioread32(&u3d->op_regs->usbcmd)); -} - -static int mv_u3d_controller_reset(struct mv_u3d *u3d) -{ - unsigned int loops; - u32 tmp; - - /* Stop the controller */ - tmp = ioread32(&u3d->op_regs->usbcmd); - tmp &= ~MV_U3D_CMD_RUN_STOP; - iowrite32(tmp, &u3d->op_regs->usbcmd); - - /* Reset the controller to get default values */ - iowrite32(MV_U3D_CMD_CTRL_RESET, &u3d->op_regs->usbcmd); - - /* wait for reset to complete */ - loops = LOOPS(MV_U3D_RESET_TIMEOUT); - while (ioread32(&u3d->op_regs->usbcmd) & MV_U3D_CMD_CTRL_RESET) { - if (loops == 0) { - dev_err(u3d->dev, - "Wait for RESET completed TIMEOUT\n"); - return -ETIMEDOUT; - } - loops--; - udelay(LOOPS_USEC); - } - - /* Configure the Endpoint Context Address */ - iowrite32(u3d->ep_context_dma, &u3d->op_regs->dcbaapl); - iowrite32(0, &u3d->op_regs->dcbaaph); - - return 0; -} - -static int mv_u3d_enable(struct mv_u3d *u3d) -{ - struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); - int retval; - - if (u3d->active) - return 0; - - if (!u3d->clock_gating) { - u3d->active = 1; - return 0; - } - - dev_dbg(u3d->dev, "enable u3d\n"); - clk_enable(u3d->clk); - if (pdata->phy_init) { - retval = pdata->phy_init(u3d->phy_regs); - if (retval) { - dev_err(u3d->dev, - "init phy error %d\n", retval); - clk_disable(u3d->clk); - return retval; - } - } - u3d->active = 1; - - return 0; -} - -static void mv_u3d_disable(struct mv_u3d *u3d) -{ - struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); - if (u3d->clock_gating && u3d->active) { - dev_dbg(u3d->dev, "disable u3d\n"); - if (pdata->phy_deinit) - pdata->phy_deinit(u3d->phy_regs); - clk_disable(u3d->clk); - u3d->active = 0; - } -} - -static int mv_u3d_vbus_session(struct usb_gadget *gadget, int is_active) -{ - struct mv_u3d *u3d; - unsigned long flags; - int retval = 0; - - u3d = container_of(gadget, struct mv_u3d, gadget); - - spin_lock_irqsave(&u3d->lock, flags); - - u3d->vbus_active = (is_active != 0); - dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n", - __func__, u3d->softconnect, u3d->vbus_active); - /* - * 1. external VBUS detect: we can disable/enable clock on demand. - * 2. UDC VBUS detect: we have to enable clock all the time. - * 3. No VBUS detect: we have to enable clock all the time. - */ - if (u3d->driver && u3d->softconnect && u3d->vbus_active) { - retval = mv_u3d_enable(u3d); - if (retval == 0) { - /* - * after clock is disabled, we lost all the register - * context. We have to re-init registers - */ - mv_u3d_controller_reset(u3d); - mv_u3d_ep0_reset(u3d); - mv_u3d_controller_start(u3d); - } - } else if (u3d->driver && u3d->softconnect) { - if (!u3d->active) - goto out; - - /* stop all the transfer in queue*/ - mv_u3d_stop_activity(u3d, u3d->driver); - mv_u3d_controller_stop(u3d); - mv_u3d_disable(u3d); - } - -out: - spin_unlock_irqrestore(&u3d->lock, flags); - return retval; -} - -/* constrain controller's VBUS power usage - * This call is used by gadget drivers during SET_CONFIGURATION calls, - * reporting how much power the device may consume. For example, this - * could affect how quickly batteries are recharged. - * - * Returns zero on success, else negative errno. - */ -static int mv_u3d_vbus_draw(struct usb_gadget *gadget, unsigned mA) -{ - struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget); - - u3d->power = mA; - - return 0; -} - -static int mv_u3d_pullup(struct usb_gadget *gadget, int is_on) -{ - struct mv_u3d *u3d = container_of(gadget, struct mv_u3d, gadget); - unsigned long flags; - int retval = 0; - - spin_lock_irqsave(&u3d->lock, flags); - - dev_dbg(u3d->dev, "%s: softconnect %d, vbus_active %d\n", - __func__, u3d->softconnect, u3d->vbus_active); - u3d->softconnect = (is_on != 0); - if (u3d->driver && u3d->softconnect && u3d->vbus_active) { - retval = mv_u3d_enable(u3d); - if (retval == 0) { - /* - * after clock is disabled, we lost all the register - * context. We have to re-init registers - */ - mv_u3d_controller_reset(u3d); - mv_u3d_ep0_reset(u3d); - mv_u3d_controller_start(u3d); - } - } else if (u3d->driver && u3d->vbus_active) { - /* stop all the transfer in queue*/ - mv_u3d_stop_activity(u3d, u3d->driver); - mv_u3d_controller_stop(u3d); - mv_u3d_disable(u3d); - } - - spin_unlock_irqrestore(&u3d->lock, flags); - - return retval; -} - -static int mv_u3d_start(struct usb_gadget *g, - struct usb_gadget_driver *driver) -{ - struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget); - struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); - unsigned long flags; - - if (u3d->driver) - return -EBUSY; - - spin_lock_irqsave(&u3d->lock, flags); - - if (!u3d->clock_gating) { - clk_enable(u3d->clk); - if (pdata->phy_init) - pdata->phy_init(u3d->phy_regs); - } - - /* hook up the driver ... */ - u3d->driver = driver; - - u3d->ep0_dir = USB_DIR_OUT; - - spin_unlock_irqrestore(&u3d->lock, flags); - - u3d->vbus_valid_detect = 1; - - return 0; -} - -static int mv_u3d_stop(struct usb_gadget *g) -{ - struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget); - struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev); - unsigned long flags; - - u3d->vbus_valid_detect = 0; - spin_lock_irqsave(&u3d->lock, flags); - - /* enable clock to access controller register */ - clk_enable(u3d->clk); - if (pdata->phy_init) - pdata->phy_init(u3d->phy_regs); - - mv_u3d_controller_stop(u3d); - /* stop all usb activities */ - u3d->gadget.speed = USB_SPEED_UNKNOWN; - mv_u3d_stop_activity(u3d, NULL); - mv_u3d_disable(u3d); - - if (pdata->phy_deinit) - pdata->phy_deinit(u3d->phy_regs); - clk_disable(u3d->clk); - - spin_unlock_irqrestore(&u3d->lock, flags); - - u3d->driver = NULL; - - return 0; -} - -/* device controller usb_gadget_ops structure */ -static const struct usb_gadget_ops mv_u3d_ops = { - /* notify controller that VBUS is powered or not */ - .vbus_session = mv_u3d_vbus_session, - - /* constrain controller's VBUS power usage */ - .vbus_draw = mv_u3d_vbus_draw, - - .pullup = mv_u3d_pullup, - .udc_start = mv_u3d_start, - .udc_stop = mv_u3d_stop, -}; - -static int mv_u3d_eps_init(struct mv_u3d *u3d) -{ - struct mv_u3d_ep *ep; - char name[14]; - int i; - - /* initialize ep0, ep0 in/out use eps[1] */ - ep = &u3d->eps[1]; - ep->u3d = u3d; - strscpy(ep->name, "ep0"); - ep->ep.name = ep->name; - ep->ep.ops = &mv_u3d_ep_ops; - ep->wedge = 0; - usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE); - ep->ep.caps.type_control = true; - ep->ep.caps.dir_in = true; - ep->ep.caps.dir_out = true; - ep->ep_num = 0; - ep->ep.desc = &mv_u3d_ep0_desc; - INIT_LIST_HEAD(&ep->queue); - INIT_LIST_HEAD(&ep->req_list); - ep->ep_type = USB_ENDPOINT_XFER_CONTROL; - - /* add ep0 ep_context */ - ep->ep_context = &u3d->ep_context[1]; - - /* initialize other endpoints */ - for (i = 2; i < u3d->max_eps * 2; i++) { - ep = &u3d->eps[i]; - if (i & 1) { - snprintf(name, sizeof(name), "ep%din", i >> 1); - ep->direction = MV_U3D_EP_DIR_IN; - ep->ep.caps.dir_in = true; - } else { - snprintf(name, sizeof(name), "ep%dout", i >> 1); - ep->direction = MV_U3D_EP_DIR_OUT; - ep->ep.caps.dir_out = true; - } - ep->u3d = u3d; - strscpy(ep->name, name); - ep->ep.name = ep->name; - - ep->ep.caps.type_iso = true; - ep->ep.caps.type_bulk = true; - ep->ep.caps.type_int = true; - - ep->ep.ops = &mv_u3d_ep_ops; - usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); - ep->ep_num = i / 2; - - INIT_LIST_HEAD(&ep->queue); - list_add_tail(&ep->ep.ep_list, &u3d->gadget.ep_list); - - INIT_LIST_HEAD(&ep->req_list); - spin_lock_init(&ep->req_lock); - ep->ep_context = &u3d->ep_context[i]; - } - - return 0; -} - -/* delete all endpoint requests, called with spinlock held */ -static void mv_u3d_nuke(struct mv_u3d_ep *ep, int status) -{ - /* endpoint fifo flush */ - mv_u3d_ep_fifo_flush(&ep->ep); - - while (!list_empty(&ep->queue)) { - struct mv_u3d_req *req = NULL; - req = list_entry(ep->queue.next, struct mv_u3d_req, queue); - mv_u3d_done(ep, req, status); - } -} - -/* stop all USB activities */ -static -void mv_u3d_stop_activity(struct mv_u3d *u3d, struct usb_gadget_driver *driver) -{ - struct mv_u3d_ep *ep; - - mv_u3d_nuke(&u3d->eps[1], -ESHUTDOWN); - - list_for_each_entry(ep, &u3d->gadget.ep_list, ep.ep_list) { - mv_u3d_nuke(ep, -ESHUTDOWN); - } - - /* report disconnect; the driver is already quiesced */ - if (driver) { - spin_unlock(&u3d->lock); - driver->disconnect(&u3d->gadget); - spin_lock(&u3d->lock); - } -} - -static void mv_u3d_irq_process_error(struct mv_u3d *u3d) -{ - /* Increment the error count */ - u3d->errors++; - dev_err(u3d->dev, "%s\n", __func__); -} - -static void mv_u3d_irq_process_link_change(struct mv_u3d *u3d) -{ - u32 linkchange; - - linkchange = ioread32(&u3d->vuc_regs->linkchange); - iowrite32(linkchange, &u3d->vuc_regs->linkchange); - - dev_dbg(u3d->dev, "linkchange: 0x%x\n", linkchange); - - if (linkchange & MV_U3D_LINK_CHANGE_LINK_UP) { - dev_dbg(u3d->dev, "link up: ltssm state: 0x%x\n", - ioread32(&u3d->vuc_regs->ltssmstate)); - - u3d->usb_state = USB_STATE_DEFAULT; - u3d->ep0_dir = MV_U3D_EP_DIR_OUT; - u3d->ep0_state = MV_U3D_WAIT_FOR_SETUP; - - /* set speed */ - u3d->gadget.speed = USB_SPEED_SUPER; - } - - if (linkchange & MV_U3D_LINK_CHANGE_SUSPEND) { - dev_dbg(u3d->dev, "link suspend\n"); - u3d->resume_state = u3d->usb_state; - u3d->usb_state = USB_STATE_SUSPENDED; - } - - if (linkchange & MV_U3D_LINK_CHANGE_RESUME) { - dev_dbg(u3d->dev, "link resume\n"); - u3d->usb_state = u3d->resume_state; - u3d->resume_state = 0; - } - - if (linkchange & MV_U3D_LINK_CHANGE_WRESET) { - dev_dbg(u3d->dev, "warm reset\n"); - u3d->usb_state = USB_STATE_POWERED; - } - - if (linkchange & MV_U3D_LINK_CHANGE_HRESET) { - dev_dbg(u3d->dev, "hot reset\n"); - u3d->usb_state = USB_STATE_DEFAULT; - } - - if (linkchange & MV_U3D_LINK_CHANGE_INACT) - dev_dbg(u3d->dev, "inactive\n"); - - if (linkchange & MV_U3D_LINK_CHANGE_DISABLE_AFTER_U0) - dev_dbg(u3d->dev, "ss.disabled\n"); - - if (linkchange & MV_U3D_LINK_CHANGE_VBUS_INVALID) { - dev_dbg(u3d->dev, "vbus invalid\n"); - u3d->usb_state = USB_STATE_ATTACHED; - u3d->vbus_valid_detect = 1; - /* if external vbus detect is not supported, - * we handle it here. - */ - if (!u3d->vbus) { - spin_unlock(&u3d->lock); - mv_u3d_vbus_session(&u3d->gadget, 0); - spin_lock(&u3d->lock); - } - } -} - -static void mv_u3d_ch9setaddress(struct mv_u3d *u3d, - struct usb_ctrlrequest *setup) -{ - u32 tmp; - - if (u3d->usb_state != USB_STATE_DEFAULT) { - dev_err(u3d->dev, - "%s, cannot setaddr in this state (%d)\n", - __func__, u3d->usb_state); - goto err; - } - - u3d->dev_addr = (u8)setup->wValue; - - dev_dbg(u3d->dev, "%s: 0x%x\n", __func__, u3d->dev_addr); - - if (u3d->dev_addr > 127) { - dev_err(u3d->dev, - "%s, u3d address is wrong (out of range)\n", __func__); - u3d->dev_addr = 0; - goto err; - } - - /* update usb state */ - u3d->usb_state = USB_STATE_ADDRESS; - - /* set the new address */ - tmp = ioread32(&u3d->vuc_regs->devaddrtiebrkr); - tmp &= ~0x7F; - tmp |= (u32)u3d->dev_addr; - iowrite32(tmp, &u3d->vuc_regs->devaddrtiebrkr); - - return; -err: - mv_u3d_ep0_stall(u3d); -} - -static int mv_u3d_is_set_configuration(struct usb_ctrlrequest *setup) -{ - if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) - if (setup->bRequest == USB_REQ_SET_CONFIGURATION) - return 1; - - return 0; -} - -static void mv_u3d_handle_setup_packet(struct mv_u3d *u3d, u8 ep_num, - struct usb_ctrlrequest *setup) - __releases(&u3c->lock) - __acquires(&u3c->lock) -{ - bool delegate = false; - - mv_u3d_nuke(&u3d->eps[ep_num * 2 + MV_U3D_EP_DIR_IN], -ESHUTDOWN); - - dev_dbg(u3d->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", - setup->bRequestType, setup->bRequest, - setup->wValue, setup->wIndex, setup->wLength); - - /* We process some stardard setup requests here */ - if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { - switch (setup->bRequest) { - case USB_REQ_GET_STATUS: - delegate = true; - break; - - case USB_REQ_SET_ADDRESS: - mv_u3d_ch9setaddress(u3d, setup); - break; - - case USB_REQ_CLEAR_FEATURE: - delegate = true; - break; - - case USB_REQ_SET_FEATURE: - delegate = true; - break; - - default: - delegate = true; - } - } else - delegate = true; - - /* delegate USB standard requests to the gadget driver */ - if (delegate) { - /* USB requests handled by gadget */ - if (setup->wLength) { - /* DATA phase from gadget, STATUS phase from u3d */ - u3d->ep0_dir = (setup->bRequestType & USB_DIR_IN) - ? MV_U3D_EP_DIR_IN : MV_U3D_EP_DIR_OUT; - spin_unlock(&u3d->lock); - if (u3d->driver->setup(&u3d->gadget, - &u3d->local_setup_buff) < 0) { - dev_err(u3d->dev, "setup error!\n"); - mv_u3d_ep0_stall(u3d); - } - spin_lock(&u3d->lock); - } else { - /* no DATA phase, STATUS phase from gadget */ - u3d->ep0_dir = MV_U3D_EP_DIR_IN; - u3d->ep0_state = MV_U3D_STATUS_STAGE; - spin_unlock(&u3d->lock); - if (u3d->driver->setup(&u3d->gadget, - &u3d->local_setup_buff) < 0) - mv_u3d_ep0_stall(u3d); - spin_lock(&u3d->lock); - } - - if (mv_u3d_is_set_configuration(setup)) { - dev_dbg(u3d->dev, "u3d configured\n"); - u3d->usb_state = USB_STATE_CONFIGURED; - } - } -} - -static void mv_u3d_get_setup_data(struct mv_u3d *u3d, u8 ep_num, u8 *buffer_ptr) -{ - struct mv_u3d_ep_context *epcontext; - - epcontext = &u3d->ep_context[ep_num * 2 + MV_U3D_EP_DIR_IN]; - - /* Copy the setup packet to local buffer */ - memcpy(buffer_ptr, (u8 *) &epcontext->setup_buffer, 8); -} - -static void mv_u3d_irq_process_setup(struct mv_u3d *u3d) -{ - u32 tmp, i; - /* Process all Setup packet received interrupts */ - tmp = ioread32(&u3d->vuc_regs->setuplock); - if (tmp) { - for (i = 0; i < u3d->max_eps; i++) { - if (tmp & (1 << i)) { - mv_u3d_get_setup_data(u3d, i, - (u8 *)(&u3d->local_setup_buff)); - mv_u3d_handle_setup_packet(u3d, i, - &u3d->local_setup_buff); - } - } - } - - iowrite32(tmp, &u3d->vuc_regs->setuplock); -} - -static void mv_u3d_irq_process_tr_complete(struct mv_u3d *u3d) -{ - u32 tmp, bit_pos; - int i, ep_num = 0, direction = 0; - struct mv_u3d_ep *curr_ep; - struct mv_u3d_req *curr_req, *temp_req; - int status; - - tmp = ioread32(&u3d->vuc_regs->endcomplete); - - dev_dbg(u3d->dev, "tr_complete: ep: 0x%x\n", tmp); - if (!tmp) - return; - iowrite32(tmp, &u3d->vuc_regs->endcomplete); - - for (i = 0; i < u3d->max_eps * 2; i++) { - ep_num = i >> 1; - direction = i % 2; - - bit_pos = 1 << (ep_num + 16 * direction); - - if (!(bit_pos & tmp)) - continue; - - if (i == 0) - curr_ep = &u3d->eps[1]; - else - curr_ep = &u3d->eps[i]; - - /* remove req out of ep request list after completion */ - dev_dbg(u3d->dev, "tr comp: check req_list\n"); - spin_lock(&curr_ep->req_lock); - if (!list_empty(&curr_ep->req_list)) { - struct mv_u3d_req *req; - req = list_entry(curr_ep->req_list.next, - struct mv_u3d_req, list); - list_del_init(&req->list); - curr_ep->processing = 0; - } - spin_unlock(&curr_ep->req_lock); - - /* process the req queue until an uncomplete request */ - list_for_each_entry_safe(curr_req, temp_req, - &curr_ep->queue, queue) { - status = mv_u3d_process_ep_req(u3d, i, curr_req); - if (status) - break; - /* write back status to req */ - curr_req->req.status = status; - - /* ep0 request completion */ - if (ep_num == 0) { - mv_u3d_done(curr_ep, curr_req, 0); - break; - } else { - mv_u3d_done(curr_ep, curr_req, status); - } - } - - dev_dbg(u3d->dev, "call mv_u3d_start_queue from ep complete\n"); - mv_u3d_start_queue(curr_ep); - } -} - -static irqreturn_t mv_u3d_irq(int irq, void *dev) -{ - struct mv_u3d *u3d = (struct mv_u3d *)dev; - u32 status, intr; - u32 bridgesetting; - u32 trbunderrun; - - spin_lock(&u3d->lock); - - status = ioread32(&u3d->vuc_regs->intrcause); - intr = ioread32(&u3d->vuc_regs->intrenable); - status &= intr; - - if (status == 0) { - spin_unlock(&u3d->lock); - dev_err(u3d->dev, "irq error!\n"); - return IRQ_NONE; - } - - if (status & MV_U3D_USBINT_VBUS_VALID) { - bridgesetting = ioread32(&u3d->vuc_regs->bridgesetting); - if (bridgesetting & MV_U3D_BRIDGE_SETTING_VBUS_VALID) { - /* write vbus valid bit of bridge setting to clear */ - bridgesetting = MV_U3D_BRIDGE_SETTING_VBUS_VALID; - iowrite32(bridgesetting, &u3d->vuc_regs->bridgesetting); - dev_dbg(u3d->dev, "vbus valid\n"); - - u3d->usb_state = USB_STATE_POWERED; - u3d->vbus_valid_detect = 0; - /* if external vbus detect is not supported, - * we handle it here. - */ - if (!u3d->vbus) { - spin_unlock(&u3d->lock); - mv_u3d_vbus_session(&u3d->gadget, 1); - spin_lock(&u3d->lock); - } - } else - dev_err(u3d->dev, "vbus bit is not set\n"); - } - - /* RX data is already in the 16KB FIFO.*/ - if (status & MV_U3D_USBINT_UNDER_RUN) { - trbunderrun = ioread32(&u3d->vuc_regs->trbunderrun); - dev_err(u3d->dev, "under run, ep%d\n", trbunderrun); - iowrite32(trbunderrun, &u3d->vuc_regs->trbunderrun); - mv_u3d_irq_process_error(u3d); - } - - if (status & (MV_U3D_USBINT_RXDESC_ERR | MV_U3D_USBINT_TXDESC_ERR)) { - /* write one to clear */ - iowrite32(status & (MV_U3D_USBINT_RXDESC_ERR - | MV_U3D_USBINT_TXDESC_ERR), - &u3d->vuc_regs->intrcause); - dev_err(u3d->dev, "desc err 0x%x\n", status); - mv_u3d_irq_process_error(u3d); - } - - if (status & MV_U3D_USBINT_LINK_CHG) - mv_u3d_irq_process_link_change(u3d); - - if (status & MV_U3D_USBINT_TX_COMPLETE) - mv_u3d_irq_process_tr_complete(u3d); - - if (status & MV_U3D_USBINT_RX_COMPLETE) - mv_u3d_irq_process_tr_complete(u3d); - - if (status & MV_U3D_USBINT_SETUP) - mv_u3d_irq_process_setup(u3d); - - spin_unlock(&u3d->lock); - return IRQ_HANDLED; -} - -static void mv_u3d_remove(struct platform_device *dev) -{ - struct mv_u3d *u3d = platform_get_drvdata(dev); - - BUG_ON(u3d == NULL); - - usb_del_gadget_udc(&u3d->gadget); - - /* free memory allocated in probe */ - dma_pool_destroy(u3d->trb_pool); - - if (u3d->ep_context) - dma_free_coherent(&dev->dev, u3d->ep_context_size, - u3d->ep_context, u3d->ep_context_dma); - - kfree(u3d->eps); - - if (u3d->irq) - free_irq(u3d->irq, u3d); - - if (u3d->cap_regs) - iounmap(u3d->cap_regs); - u3d->cap_regs = NULL; - - kfree(u3d->status_req); - - clk_put(u3d->clk); - - kfree(u3d); -} - -static int mv_u3d_probe(struct platform_device *dev) -{ - struct mv_u3d *u3d; - struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev); - int retval = 0; - struct resource *r; - size_t size; - - if (!dev_get_platdata(&dev->dev)) { - dev_err(&dev->dev, "missing platform_data\n"); - retval = -ENODEV; - goto err_pdata; - } - - u3d = kzalloc(sizeof(*u3d), GFP_KERNEL); - if (!u3d) { - retval = -ENOMEM; - goto err_alloc_private; - } - - spin_lock_init(&u3d->lock); - - platform_set_drvdata(dev, u3d); - - u3d->dev = &dev->dev; - u3d->vbus = pdata->vbus; - - u3d->clk = clk_get(&dev->dev, NULL); - if (IS_ERR(u3d->clk)) { - retval = PTR_ERR(u3d->clk); - goto err_get_clk; - } - - r = platform_get_resource_byname(dev, IORESOURCE_MEM, "capregs"); - if (!r) { - dev_err(&dev->dev, "no I/O memory resource defined\n"); - retval = -ENODEV; - goto err_get_cap_regs; - } - - u3d->cap_regs = (struct mv_u3d_cap_regs __iomem *) - ioremap(r->start, resource_size(r)); - if (!u3d->cap_regs) { - dev_err(&dev->dev, "failed to map I/O memory\n"); - retval = -EBUSY; - goto err_map_cap_regs; - } else { - dev_dbg(&dev->dev, "cap_regs address: 0x%lx/0x%lx\n", - (unsigned long) r->start, - (unsigned long) u3d->cap_regs); - } - - /* we will access controller register, so enable the u3d controller */ - retval = clk_enable(u3d->clk); - if (retval) { - dev_err(&dev->dev, "clk_enable error %d\n", retval); - goto err_u3d_enable; - } - - if (pdata->phy_init) { - retval = pdata->phy_init(u3d->phy_regs); - if (retval) { - dev_err(&dev->dev, "init phy error %d\n", retval); - clk_disable(u3d->clk); - goto err_phy_init; - } - } - - u3d->op_regs = (struct mv_u3d_op_regs __iomem *)(u3d->cap_regs - + MV_U3D_USB3_OP_REGS_OFFSET); - - u3d->vuc_regs = (struct mv_u3d_vuc_regs __iomem *)(u3d->cap_regs - + ioread32(&u3d->cap_regs->vuoff)); - - u3d->max_eps = 16; - - /* - * some platform will use usb to download image, it may not disconnect - * usb gadget before loading kernel. So first stop u3d here. - */ - mv_u3d_controller_stop(u3d); - iowrite32(0xFFFFFFFF, &u3d->vuc_regs->intrcause); - - if (pdata->phy_deinit) - pdata->phy_deinit(u3d->phy_regs); - clk_disable(u3d->clk); - - size = u3d->max_eps * sizeof(struct mv_u3d_ep_context) * 2; - size = (size + MV_U3D_EP_CONTEXT_ALIGNMENT - 1) - & ~(MV_U3D_EP_CONTEXT_ALIGNMENT - 1); - u3d->ep_context = dma_alloc_coherent(&dev->dev, size, - &u3d->ep_context_dma, GFP_KERNEL); - if (!u3d->ep_context) { - dev_err(&dev->dev, "allocate ep context memory failed\n"); - retval = -ENOMEM; - goto err_alloc_ep_context; - } - u3d->ep_context_size = size; - - /* create TRB dma_pool resource */ - u3d->trb_pool = dma_pool_create("u3d_trb", - &dev->dev, - sizeof(struct mv_u3d_trb_hw), - MV_U3D_TRB_ALIGNMENT, - MV_U3D_DMA_BOUNDARY); - - if (!u3d->trb_pool) { - retval = -ENOMEM; - goto err_alloc_trb_pool; - } - - size = u3d->max_eps * sizeof(struct mv_u3d_ep) * 2; - u3d->eps = kzalloc(size, GFP_KERNEL); - if (!u3d->eps) { - retval = -ENOMEM; - goto err_alloc_eps; - } - - /* initialize ep0 status request structure */ - u3d->status_req = kzalloc(sizeof(struct mv_u3d_req) + 8, GFP_KERNEL); - if (!u3d->status_req) { - retval = -ENOMEM; - goto err_alloc_status_req; - } - INIT_LIST_HEAD(&u3d->status_req->queue); - - /* allocate a small amount of memory to get valid address */ - u3d->status_req->req.buf = (char *)u3d->status_req - + sizeof(struct mv_u3d_req); - u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf); - - u3d->resume_state = USB_STATE_NOTATTACHED; - u3d->usb_state = USB_STATE_ATTACHED; - u3d->ep0_dir = MV_U3D_EP_DIR_OUT; - u3d->remote_wakeup = 0; - - r = platform_get_resource(dev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&dev->dev, "no IRQ resource defined\n"); - retval = -ENODEV; - goto err_get_irq; - } - u3d->irq = r->start; - - /* initialize gadget structure */ - u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */ - u3d->gadget.ep0 = &u3d->eps[1].ep; /* gadget ep0 */ - INIT_LIST_HEAD(&u3d->gadget.ep_list); /* ep_list */ - u3d->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ - - /* the "gadget" abstracts/virtualizes the controller */ - u3d->gadget.name = driver_name; /* gadget name */ - - mv_u3d_eps_init(u3d); - - if (request_irq(u3d->irq, mv_u3d_irq, - IRQF_SHARED, driver_name, u3d)) { - u3d->irq = 0; - dev_err(&dev->dev, "Request irq %d for u3d failed\n", - u3d->irq); - retval = -ENODEV; - goto err_request_irq; - } - - /* external vbus detection */ - if (u3d->vbus) { - u3d->clock_gating = 1; - dev_err(&dev->dev, "external vbus detection\n"); - } - - if (!u3d->clock_gating) - u3d->vbus_active = 1; - - /* enable usb3 controller vbus detection */ - u3d->vbus_valid_detect = 1; - - retval = usb_add_gadget_udc(&dev->dev, &u3d->gadget); - if (retval) - goto err_unregister; - - dev_dbg(&dev->dev, "successful probe usb3 device %s clock gating.\n", - u3d->clock_gating ? "with" : "without"); - - return 0; - -err_unregister: - free_irq(u3d->irq, u3d); -err_get_irq: -err_request_irq: - kfree(u3d->status_req); -err_alloc_status_req: - kfree(u3d->eps); -err_alloc_eps: - dma_pool_destroy(u3d->trb_pool); -err_alloc_trb_pool: - dma_free_coherent(&dev->dev, u3d->ep_context_size, - u3d->ep_context, u3d->ep_context_dma); -err_alloc_ep_context: -err_phy_init: -err_u3d_enable: - iounmap(u3d->cap_regs); -err_map_cap_regs: -err_get_cap_regs: - clk_put(u3d->clk); -err_get_clk: - kfree(u3d); -err_alloc_private: -err_pdata: - return retval; -} - -#ifdef CONFIG_PM_SLEEP -static int mv_u3d_suspend(struct device *dev) -{ - struct mv_u3d *u3d = dev_get_drvdata(dev); - - /* - * only cable is unplugged, usb can suspend. - * So do not care about clock_gating == 1, it is handled by - * vbus session. - */ - if (!u3d->clock_gating) { - mv_u3d_controller_stop(u3d); - - spin_lock_irq(&u3d->lock); - /* stop all usb activities */ - mv_u3d_stop_activity(u3d, u3d->driver); - spin_unlock_irq(&u3d->lock); - - mv_u3d_disable(u3d); - } - - return 0; -} - -static int mv_u3d_resume(struct device *dev) -{ - struct mv_u3d *u3d = dev_get_drvdata(dev); - int retval; - - if (!u3d->clock_gating) { - retval = mv_u3d_enable(u3d); - if (retval) - return retval; - - if (u3d->driver && u3d->softconnect) { - mv_u3d_controller_reset(u3d); - mv_u3d_ep0_reset(u3d); - mv_u3d_controller_start(u3d); - } - } - - return 0; -} -#endif - -static SIMPLE_DEV_PM_OPS(mv_u3d_pm_ops, mv_u3d_suspend, mv_u3d_resume); - -static void mv_u3d_shutdown(struct platform_device *dev) -{ - struct mv_u3d *u3d = platform_get_drvdata(dev); - u32 tmp; - - tmp = ioread32(&u3d->op_regs->usbcmd); - tmp &= ~MV_U3D_CMD_RUN_STOP; - iowrite32(tmp, &u3d->op_regs->usbcmd); -} - -static struct platform_driver mv_u3d_driver = { - .probe = mv_u3d_probe, - .remove = mv_u3d_remove, - .shutdown = mv_u3d_shutdown, - .driver = { - .name = "mv-u3d", - .pm = &mv_u3d_pm_ops, - }, -}; - -module_platform_driver(mv_u3d_driver); -MODULE_ALIAS("platform:mv-u3d"); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR("Yu Xu <yuxu@marvell.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/udc/mv_udc.h b/drivers/usb/gadget/udc/mv_udc.h deleted file mode 100644 index b3f759c0962c..000000000000 --- a/drivers/usb/gadget/udc/mv_udc.h +++ /dev/null @@ -1,309 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2011 Marvell International Ltd. All rights reserved. - */ - -#ifndef __MV_UDC_H -#define __MV_UDC_H - -#define VUSBHS_MAX_PORTS 8 - -#define DQH_ALIGNMENT 2048 -#define DTD_ALIGNMENT 64 -#define DMA_BOUNDARY 4096 - -#define EP_DIR_IN 1 -#define EP_DIR_OUT 0 - -#define DMA_ADDR_INVALID (~(dma_addr_t)0) - -#define EP0_MAX_PKT_SIZE 64 -/* ep0 transfer state */ -#define WAIT_FOR_SETUP 0 -#define DATA_STATE_XMIT 1 -#define DATA_STATE_NEED_ZLP 2 -#define WAIT_FOR_OUT_STATUS 3 -#define DATA_STATE_RECV 4 - -#define CAPLENGTH_MASK (0xff) -#define DCCPARAMS_DEN_MASK (0x1f) - -#define HCSPARAMS_PPC (0x10) - -/* Frame Index Register Bit Masks */ -#define USB_FRINDEX_MASKS 0x3fff - -/* Command Register Bit Masks */ -#define USBCMD_RUN_STOP (0x00000001) -#define USBCMD_CTRL_RESET (0x00000002) -#define USBCMD_SETUP_TRIPWIRE_SET (0x00002000) -#define USBCMD_SETUP_TRIPWIRE_CLEAR (~USBCMD_SETUP_TRIPWIRE_SET) - -#define USBCMD_ATDTW_TRIPWIRE_SET (0x00004000) -#define USBCMD_ATDTW_TRIPWIRE_CLEAR (~USBCMD_ATDTW_TRIPWIRE_SET) - -/* bit 15,3,2 are for frame list size */ -#define USBCMD_FRAME_SIZE_1024 (0x00000000) /* 000 */ -#define USBCMD_FRAME_SIZE_512 (0x00000004) /* 001 */ -#define USBCMD_FRAME_SIZE_256 (0x00000008) /* 010 */ -#define USBCMD_FRAME_SIZE_128 (0x0000000C) /* 011 */ -#define USBCMD_FRAME_SIZE_64 (0x00008000) /* 100 */ -#define USBCMD_FRAME_SIZE_32 (0x00008004) /* 101 */ -#define USBCMD_FRAME_SIZE_16 (0x00008008) /* 110 */ -#define USBCMD_FRAME_SIZE_8 (0x0000800C) /* 111 */ - -#define EPCTRL_TX_ALL_MASK (0xFFFF0000) -#define EPCTRL_RX_ALL_MASK (0x0000FFFF) - -#define EPCTRL_TX_DATA_TOGGLE_RST (0x00400000) -#define EPCTRL_TX_EP_STALL (0x00010000) -#define EPCTRL_RX_EP_STALL (0x00000001) -#define EPCTRL_RX_DATA_TOGGLE_RST (0x00000040) -#define EPCTRL_RX_ENABLE (0x00000080) -#define EPCTRL_TX_ENABLE (0x00800000) -#define EPCTRL_CONTROL (0x00000000) -#define EPCTRL_ISOCHRONOUS (0x00040000) -#define EPCTRL_BULK (0x00080000) -#define EPCTRL_INT (0x000C0000) -#define EPCTRL_TX_TYPE (0x000C0000) -#define EPCTRL_RX_TYPE (0x0000000C) -#define EPCTRL_DATA_TOGGLE_INHIBIT (0x00000020) -#define EPCTRL_TX_EP_TYPE_SHIFT (18) -#define EPCTRL_RX_EP_TYPE_SHIFT (2) - -#define EPCOMPLETE_MAX_ENDPOINTS (16) - -/* endpoint list address bit masks */ -#define USB_EP_LIST_ADDRESS_MASK 0xfffff800 - -#define PORTSCX_W1C_BITS 0x2a -#define PORTSCX_PORT_RESET 0x00000100 -#define PORTSCX_PORT_POWER 0x00001000 -#define PORTSCX_FORCE_FULL_SPEED_CONNECT 0x01000000 -#define PORTSCX_PAR_XCVR_SELECT 0xC0000000 -#define PORTSCX_PORT_FORCE_RESUME 0x00000040 -#define PORTSCX_PORT_SUSPEND 0x00000080 -#define PORTSCX_PORT_SPEED_FULL 0x00000000 -#define PORTSCX_PORT_SPEED_LOW 0x04000000 -#define PORTSCX_PORT_SPEED_HIGH 0x08000000 -#define PORTSCX_PORT_SPEED_MASK 0x0C000000 - -/* USB MODE Register Bit Masks */ -#define USBMODE_CTRL_MODE_IDLE 0x00000000 -#define USBMODE_CTRL_MODE_DEVICE 0x00000002 -#define USBMODE_CTRL_MODE_HOST 0x00000003 -#define USBMODE_CTRL_MODE_RSV 0x00000001 -#define USBMODE_SETUP_LOCK_OFF 0x00000008 -#define USBMODE_STREAM_DISABLE 0x00000010 - -/* USB STS Register Bit Masks */ -#define USBSTS_INT 0x00000001 -#define USBSTS_ERR 0x00000002 -#define USBSTS_PORT_CHANGE 0x00000004 -#define USBSTS_FRM_LST_ROLL 0x00000008 -#define USBSTS_SYS_ERR 0x00000010 -#define USBSTS_IAA 0x00000020 -#define USBSTS_RESET 0x00000040 -#define USBSTS_SOF 0x00000080 -#define USBSTS_SUSPEND 0x00000100 -#define USBSTS_HC_HALTED 0x00001000 -#define USBSTS_RCL 0x00002000 -#define USBSTS_PERIODIC_SCHEDULE 0x00004000 -#define USBSTS_ASYNC_SCHEDULE 0x00008000 - - -/* Interrupt Enable Register Bit Masks */ -#define USBINTR_INT_EN (0x00000001) -#define USBINTR_ERR_INT_EN (0x00000002) -#define USBINTR_PORT_CHANGE_DETECT_EN (0x00000004) - -#define USBINTR_ASYNC_ADV_AAE (0x00000020) -#define USBINTR_ASYNC_ADV_AAE_ENABLE (0x00000020) -#define USBINTR_ASYNC_ADV_AAE_DISABLE (0xFFFFFFDF) - -#define USBINTR_RESET_EN (0x00000040) -#define USBINTR_SOF_UFRAME_EN (0x00000080) -#define USBINTR_DEVICE_SUSPEND (0x00000100) - -#define USB_DEVICE_ADDRESS_MASK (0xfe000000) -#define USB_DEVICE_ADDRESS_BIT_SHIFT (25) - -struct mv_cap_regs { - u32 caplength_hciversion; - u32 hcsparams; /* HC structural parameters */ - u32 hccparams; /* HC Capability Parameters*/ - u32 reserved[5]; - u32 dciversion; /* DC version number and reserved 16 bits */ - u32 dccparams; /* DC Capability Parameters */ -}; - -struct mv_op_regs { - u32 usbcmd; /* Command register */ - u32 usbsts; /* Status register */ - u32 usbintr; /* Interrupt enable */ - u32 frindex; /* Frame index */ - u32 reserved1[1]; - u32 deviceaddr; /* Device Address */ - u32 eplistaddr; /* Endpoint List Address */ - u32 ttctrl; /* HOST TT status and control */ - u32 burstsize; /* Programmable Burst Size */ - u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */ - u32 reserved[4]; - u32 epnak; /* Endpoint NAK */ - u32 epnaken; /* Endpoint NAK Enable */ - u32 configflag; /* Configured Flag register */ - u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */ - u32 otgsc; - u32 usbmode; /* USB Host/Device mode */ - u32 epsetupstat; /* Endpoint Setup Status */ - u32 epprime; /* Endpoint Initialize */ - u32 epflush; /* Endpoint De-initialize */ - u32 epstatus; /* Endpoint Status */ - u32 epcomplete; /* Endpoint Interrupt On Complete */ - u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */ - u32 mcr; /* Mux Control */ - u32 isr; /* Interrupt Status */ - u32 ier; /* Interrupt Enable */ -}; - -struct mv_udc { - struct usb_gadget gadget; - struct usb_gadget_driver *driver; - spinlock_t lock; - struct completion *done; - struct platform_device *dev; - int irq; - - struct mv_cap_regs __iomem *cap_regs; - struct mv_op_regs __iomem *op_regs; - void __iomem *phy_regs; - unsigned int max_eps; - struct mv_dqh *ep_dqh; - size_t ep_dqh_size; - dma_addr_t ep_dqh_dma; - - struct dma_pool *dtd_pool; - struct mv_ep *eps; - - struct mv_dtd *dtd_head; - struct mv_dtd *dtd_tail; - unsigned int dtd_entries; - - struct mv_req *status_req; - struct usb_ctrlrequest local_setup_buff; - - unsigned int resume_state; /* USB state to resume */ - unsigned int usb_state; /* USB current state */ - unsigned int ep0_state; /* Endpoint zero state */ - unsigned int ep0_dir; - - unsigned int dev_addr; - unsigned int test_mode; - - int errors; - unsigned softconnect:1, - vbus_active:1, - remote_wakeup:1, - softconnected:1, - force_fs:1, - clock_gating:1, - active:1, - stopped:1; /* stop bit is setted */ - - struct work_struct vbus_work; - struct workqueue_struct *qwork; - - struct usb_phy *transceiver; - - struct mv_usb_platform_data *pdata; - - /* some SOC has mutiple clock sources for USB*/ - struct clk *clk; -}; - -/* endpoint data structure */ -struct mv_ep { - struct usb_ep ep; - struct mv_udc *udc; - struct list_head queue; - struct mv_dqh *dqh; - u32 direction; - char name[14]; - unsigned stopped:1, - wedge:1, - ep_type:2, - ep_num:8; -}; - -/* request data structure */ -struct mv_req { - struct usb_request req; - struct mv_dtd *dtd, *head, *tail; - struct mv_ep *ep; - struct list_head queue; - unsigned int test_mode; - unsigned dtd_count; - unsigned mapped:1; -}; - -#define EP_QUEUE_HEAD_MULT_POS 30 -#define EP_QUEUE_HEAD_ZLT_SEL 0x20000000 -#define EP_QUEUE_HEAD_MAX_PKT_LEN_POS 16 -#define EP_QUEUE_HEAD_MAX_PKT_LEN(ep_info) (((ep_info)>>16)&0x07ff) -#define EP_QUEUE_HEAD_IOS 0x00008000 -#define EP_QUEUE_HEAD_NEXT_TERMINATE 0x00000001 -#define EP_QUEUE_HEAD_IOC 0x00008000 -#define EP_QUEUE_HEAD_MULTO 0x00000C00 -#define EP_QUEUE_HEAD_STATUS_HALT 0x00000040 -#define EP_QUEUE_HEAD_STATUS_ACTIVE 0x00000080 -#define EP_QUEUE_CURRENT_OFFSET_MASK 0x00000FFF -#define EP_QUEUE_HEAD_NEXT_POINTER_MASK 0xFFFFFFE0 -#define EP_QUEUE_FRINDEX_MASK 0x000007FF -#define EP_MAX_LENGTH_TRANSFER 0x4000 - -struct mv_dqh { - /* Bits 16..26 Bit 15 is Interrupt On Setup */ - u32 max_packet_length; - u32 curr_dtd_ptr; /* Current dTD Pointer */ - u32 next_dtd_ptr; /* Next dTD Pointer */ - /* Total bytes (16..30), IOC (15), INT (8), STS (0-7) */ - u32 size_ioc_int_sts; - u32 buff_ptr0; /* Buffer pointer Page 0 (12-31) */ - u32 buff_ptr1; /* Buffer pointer Page 1 (12-31) */ - u32 buff_ptr2; /* Buffer pointer Page 2 (12-31) */ - u32 buff_ptr3; /* Buffer pointer Page 3 (12-31) */ - u32 buff_ptr4; /* Buffer pointer Page 4 (12-31) */ - u32 reserved1; - /* 8 bytes of setup data that follows the Setup PID */ - u8 setup_buffer[8]; - u32 reserved2[4]; -}; - - -#define DTD_NEXT_TERMINATE (0x00000001) -#define DTD_IOC (0x00008000) -#define DTD_STATUS_ACTIVE (0x00000080) -#define DTD_STATUS_HALTED (0x00000040) -#define DTD_STATUS_DATA_BUFF_ERR (0x00000020) -#define DTD_STATUS_TRANSACTION_ERR (0x00000008) -#define DTD_RESERVED_FIELDS (0x00007F00) -#define DTD_ERROR_MASK (0x68) -#define DTD_ADDR_MASK (0xFFFFFFE0) -#define DTD_PACKET_SIZE 0x7FFF0000 -#define DTD_LENGTH_BIT_POS (16) - -struct mv_dtd { - u32 dtd_next; - u32 size_ioc_sts; - u32 buff_ptr0; /* Buffer pointer Page 0 */ - u32 buff_ptr1; /* Buffer pointer Page 1 */ - u32 buff_ptr2; /* Buffer pointer Page 2 */ - u32 buff_ptr3; /* Buffer pointer Page 3 */ - u32 buff_ptr4; /* Buffer pointer Page 4 */ - u32 scratch_ptr; - /* 32 bytes */ - dma_addr_t td_dma; /* dma address for this td */ - struct mv_dtd *next_dtd_virt; -}; - -#endif diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c deleted file mode 100644 index ff103e6b3048..000000000000 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ /dev/null @@ -1,2426 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2011 Marvell International Ltd. All rights reserved. - * Author: Chao Xie <chao.xie@marvell.com> - * Neil Zhang <zhangwm@marvell.com> - */ - -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/dmapool.h> -#include <linux/kernel.h> -#include <linux/delay.h> -#include <linux/ioport.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/timer.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> -#include <linux/usb/otg.h> -#include <linux/pm.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/platform_device.h> -#include <linux/clk.h> -#include <linux/platform_data/mv_usb.h> -#include <linux/unaligned.h> - -#include "mv_udc.h" - -#define DRIVER_DESC "Marvell PXA USB Device Controller driver" - -#define ep_dir(ep) (((ep)->ep_num == 0) ? \ - ((ep)->udc->ep0_dir) : ((ep)->direction)) - -/* timeout value -- usec */ -#define RESET_TIMEOUT 10000 -#define FLUSH_TIMEOUT 10000 -#define EPSTATUS_TIMEOUT 10000 -#define PRIME_TIMEOUT 10000 -#define READSAFE_TIMEOUT 1000 - -#define LOOPS_USEC_SHIFT 1 -#define LOOPS_USEC (1 << LOOPS_USEC_SHIFT) -#define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT) - -static DECLARE_COMPLETION(release_done); - -static const char driver_name[] = "mv_udc"; - -static void nuke(struct mv_ep *ep, int status); -static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver); - -/* for endpoint 0 operations */ -static const struct usb_endpoint_descriptor mv_ep0_desc = { - .bLength = USB_DT_ENDPOINT_SIZE, - .bDescriptorType = USB_DT_ENDPOINT, - .bEndpointAddress = 0, - .bmAttributes = USB_ENDPOINT_XFER_CONTROL, - .wMaxPacketSize = EP0_MAX_PKT_SIZE, -}; - -static void ep0_reset(struct mv_udc *udc) -{ - struct mv_ep *ep; - u32 epctrlx; - int i = 0; - - /* ep0 in and out */ - for (i = 0; i < 2; i++) { - ep = &udc->eps[i]; - ep->udc = udc; - - /* ep0 dQH */ - ep->dqh = &udc->ep_dqh[i]; - - /* configure ep0 endpoint capabilities in dQH */ - ep->dqh->max_packet_length = - (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) - | EP_QUEUE_HEAD_IOS; - - ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; - - epctrlx = readl(&udc->op_regs->epctrlx[0]); - if (i) { /* TX */ - epctrlx |= EPCTRL_TX_ENABLE - | (USB_ENDPOINT_XFER_CONTROL - << EPCTRL_TX_EP_TYPE_SHIFT); - - } else { /* RX */ - epctrlx |= EPCTRL_RX_ENABLE - | (USB_ENDPOINT_XFER_CONTROL - << EPCTRL_RX_EP_TYPE_SHIFT); - } - - writel(epctrlx, &udc->op_regs->epctrlx[0]); - } -} - -/* protocol ep0 stall, will automatically be cleared on new transaction */ -static void ep0_stall(struct mv_udc *udc) -{ - u32 epctrlx; - - /* set TX and RX to stall */ - epctrlx = readl(&udc->op_regs->epctrlx[0]); - epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL; - writel(epctrlx, &udc->op_regs->epctrlx[0]); - - /* update ep0 state */ - udc->ep0_state = WAIT_FOR_SETUP; - udc->ep0_dir = EP_DIR_OUT; -} - -static int process_ep_req(struct mv_udc *udc, int index, - struct mv_req *curr_req) -{ - struct mv_dtd *curr_dtd; - struct mv_dqh *curr_dqh; - int actual, remaining_length; - int i, direction; - int retval = 0; - u32 errors; - u32 bit_pos; - - curr_dqh = &udc->ep_dqh[index]; - direction = index % 2; - - curr_dtd = curr_req->head; - actual = curr_req->req.length; - - for (i = 0; i < curr_req->dtd_count; i++) { - if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) { - dev_dbg(&udc->dev->dev, "%s, dTD not completed\n", - udc->eps[index].name); - return 1; - } - - errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK; - if (!errors) { - remaining_length = - (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE) - >> DTD_LENGTH_BIT_POS; - actual -= remaining_length; - - if (remaining_length) { - if (direction) { - dev_dbg(&udc->dev->dev, - "TX dTD remains data\n"); - retval = -EPROTO; - break; - } else - break; - } - } else { - dev_info(&udc->dev->dev, - "complete_tr error: ep=%d %s: error = 0x%x\n", - index >> 1, direction ? "SEND" : "RECV", - errors); - if (errors & DTD_STATUS_HALTED) { - /* Clear the errors and Halt condition */ - curr_dqh->size_ioc_int_sts &= ~errors; - retval = -EPIPE; - } else if (errors & DTD_STATUS_DATA_BUFF_ERR) { - retval = -EPROTO; - } else if (errors & DTD_STATUS_TRANSACTION_ERR) { - retval = -EILSEQ; - } - } - if (i != curr_req->dtd_count - 1) - curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt; - } - if (retval) - return retval; - - if (direction == EP_DIR_OUT) - bit_pos = 1 << curr_req->ep->ep_num; - else - bit_pos = 1 << (16 + curr_req->ep->ep_num); - - while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) { - if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) { - while (readl(&udc->op_regs->epstatus) & bit_pos) - udelay(1); - break; - } - udelay(1); - } - - curr_req->req.actual = actual; - - return 0; -} - -/* - * done() - retire a request; caller blocked irqs - * @status : request status to be set, only works when - * request is still in progress. - */ -static void done(struct mv_ep *ep, struct mv_req *req, int status) - __releases(&ep->udc->lock) - __acquires(&ep->udc->lock) -{ - struct mv_udc *udc = NULL; - unsigned char stopped = ep->stopped; - struct mv_dtd *curr_td, *next_td; - int j; - - udc = (struct mv_udc *)ep->udc; - /* Removed the req from fsl_ep->queue */ - list_del_init(&req->queue); - - /* req.status should be set as -EINPROGRESS in ep_queue() */ - if (req->req.status == -EINPROGRESS) - req->req.status = status; - else - status = req->req.status; - - /* Free dtd for the request */ - next_td = req->head; - for (j = 0; j < req->dtd_count; j++) { - curr_td = next_td; - if (j != req->dtd_count - 1) - next_td = curr_td->next_dtd_virt; - dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma); - } - - usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); - - if (status && (status != -ESHUTDOWN)) - dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u", - ep->ep.name, &req->req, status, - req->req.actual, req->req.length); - - ep->stopped = 1; - - spin_unlock(&ep->udc->lock); - - usb_gadget_giveback_request(&ep->ep, &req->req); - - spin_lock(&ep->udc->lock); - ep->stopped = stopped; -} - -static int queue_dtd(struct mv_ep *ep, struct mv_req *req) -{ - struct mv_udc *udc; - struct mv_dqh *dqh; - u32 bit_pos, direction; - u32 usbcmd, epstatus; - unsigned int loops; - int retval = 0; - - udc = ep->udc; - direction = ep_dir(ep); - dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]); - bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); - - /* check if the pipe is empty */ - if (!(list_empty(&ep->queue))) { - struct mv_req *lastreq; - lastreq = list_entry(ep->queue.prev, struct mv_req, queue); - lastreq->tail->dtd_next = - req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; - - wmb(); - - if (readl(&udc->op_regs->epprime) & bit_pos) - goto done; - - loops = LOOPS(READSAFE_TIMEOUT); - while (1) { - /* start with setting the semaphores */ - usbcmd = readl(&udc->op_regs->usbcmd); - usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET; - writel(usbcmd, &udc->op_regs->usbcmd); - - /* read the endpoint status */ - epstatus = readl(&udc->op_regs->epstatus) & bit_pos; - - /* - * Reread the ATDTW semaphore bit to check if it is - * cleared. When hardware see a hazard, it will clear - * the bit or else we remain set to 1 and we can - * proceed with priming of endpoint if not already - * primed. - */ - if (readl(&udc->op_regs->usbcmd) - & USBCMD_ATDTW_TRIPWIRE_SET) - break; - - loops--; - if (loops == 0) { - dev_err(&udc->dev->dev, - "Timeout for ATDTW_TRIPWIRE...\n"); - retval = -ETIME; - goto done; - } - udelay(LOOPS_USEC); - } - - /* Clear the semaphore */ - usbcmd = readl(&udc->op_regs->usbcmd); - usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR; - writel(usbcmd, &udc->op_regs->usbcmd); - - if (epstatus) - goto done; - } - - /* Write dQH next pointer and terminate bit to 0 */ - dqh->next_dtd_ptr = req->head->td_dma - & EP_QUEUE_HEAD_NEXT_POINTER_MASK; - - /* clear active and halt bit, in case set from a previous error */ - dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); - - /* Ensure that updates to the QH will occur before priming. */ - wmb(); - - /* Prime the Endpoint */ - writel(bit_pos, &udc->op_regs->epprime); - -done: - return retval; -} - -static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, - dma_addr_t *dma, int *is_last) -{ - struct mv_dtd *dtd; - struct mv_udc *udc; - struct mv_dqh *dqh; - u32 temp, mult = 0; - - /* how big will this transfer be? */ - if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { - dqh = req->ep->dqh; - mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS) - & 0x3; - *length = min(req->req.length - req->req.actual, - (unsigned)(mult * req->ep->ep.maxpacket)); - } else - *length = min(req->req.length - req->req.actual, - (unsigned)EP_MAX_LENGTH_TRANSFER); - - udc = req->ep->udc; - - /* - * Be careful that no _GFP_HIGHMEM is set, - * or we can not use dma_to_virt - */ - dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma); - if (dtd == NULL) - return dtd; - - dtd->td_dma = *dma; - /* initialize buffer page pointers */ - temp = (u32)(req->req.dma + req->req.actual); - dtd->buff_ptr0 = cpu_to_le32(temp); - temp &= ~0xFFF; - dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000); - dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000); - dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000); - dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000); - - req->req.actual += *length; - - /* zlp is needed if req->req.zero is set */ - if (req->req.zero) { - if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) - *is_last = 1; - else - *is_last = 0; - } else if (req->req.length == req->req.actual) - *is_last = 1; - else - *is_last = 0; - - /* Fill in the transfer size; set active bit */ - temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE); - - /* Enable interrupt for the last dtd of a request */ - if (*is_last && !req->req.no_interrupt) - temp |= DTD_IOC; - - temp |= mult << 10; - - dtd->size_ioc_sts = temp; - - mb(); - - return dtd; -} - -/* generate dTD linked list for a request */ -static int req_to_dtd(struct mv_req *req) -{ - unsigned count; - int is_last, is_first = 1; - struct mv_dtd *dtd, *last_dtd = NULL; - dma_addr_t dma; - - do { - dtd = build_dtd(req, &count, &dma, &is_last); - if (dtd == NULL) - return -ENOMEM; - - if (is_first) { - is_first = 0; - req->head = dtd; - } else { - last_dtd->dtd_next = dma; - last_dtd->next_dtd_virt = dtd; - } - last_dtd = dtd; - req->dtd_count++; - } while (!is_last); - - /* set terminate bit to 1 for the last dTD */ - dtd->dtd_next = DTD_NEXT_TERMINATE; - - req->tail = dtd; - - return 0; -} - -static int mv_ep_enable(struct usb_ep *_ep, - const struct usb_endpoint_descriptor *desc) -{ - struct mv_udc *udc; - struct mv_ep *ep; - struct mv_dqh *dqh; - u16 max = 0; - u32 bit_pos, epctrlx, direction; - const unsigned char zlt = 1; - unsigned char ios, mult; - unsigned long flags; - - ep = container_of(_ep, struct mv_ep, ep); - udc = ep->udc; - - if (!_ep || !desc - || desc->bDescriptorType != USB_DT_ENDPOINT) - return -EINVAL; - - if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - direction = ep_dir(ep); - max = usb_endpoint_maxp(desc); - - /* - * disable HW zero length termination select - * driver handles zero length packet through req->req.zero - */ - bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num); - - /* Check if the Endpoint is Primed */ - if ((readl(&udc->op_regs->epprime) & bit_pos) - || (readl(&udc->op_regs->epstatus) & bit_pos)) { - dev_info(&udc->dev->dev, - "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x," - " ENDPTSTATUS=0x%x, bit_pos=0x%x\n", - (unsigned)ep->ep_num, direction ? "SEND" : "RECV", - (unsigned)readl(&udc->op_regs->epprime), - (unsigned)readl(&udc->op_regs->epstatus), - (unsigned)bit_pos); - goto en_done; - } - - /* Set the max packet length, interrupt on Setup and Mult fields */ - ios = 0; - mult = 0; - switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) { - case USB_ENDPOINT_XFER_BULK: - case USB_ENDPOINT_XFER_INT: - break; - case USB_ENDPOINT_XFER_CONTROL: - ios = 1; - break; - case USB_ENDPOINT_XFER_ISOC: - /* Calculate transactions needed for high bandwidth iso */ - mult = usb_endpoint_maxp_mult(desc); - /* 3 transactions at most */ - if (mult > 3) - goto en_done; - break; - default: - goto en_done; - } - - spin_lock_irqsave(&udc->lock, flags); - /* Get the endpoint queue head address */ - dqh = ep->dqh; - dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) - | (mult << EP_QUEUE_HEAD_MULT_POS) - | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0) - | (ios ? EP_QUEUE_HEAD_IOS : 0); - dqh->next_dtd_ptr = 1; - dqh->size_ioc_int_sts = 0; - - ep->ep.maxpacket = max; - ep->ep.desc = desc; - ep->stopped = 0; - - /* Enable the endpoint for Rx or Tx and set the endpoint type */ - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - if (direction == EP_DIR_IN) { - epctrlx &= ~EPCTRL_TX_ALL_MASK; - epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST - | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - << EPCTRL_TX_EP_TYPE_SHIFT); - } else { - epctrlx &= ~EPCTRL_RX_ALL_MASK; - epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST - | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - << EPCTRL_RX_EP_TYPE_SHIFT); - } - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - - /* - * Implement Guideline (GL# USB-7) The unused endpoint type must - * be programmed to bulk. - */ - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - if ((epctrlx & EPCTRL_RX_ENABLE) == 0) { - epctrlx |= (USB_ENDPOINT_XFER_BULK - << EPCTRL_RX_EP_TYPE_SHIFT); - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - } - - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - if ((epctrlx & EPCTRL_TX_ENABLE) == 0) { - epctrlx |= (USB_ENDPOINT_XFER_BULK - << EPCTRL_TX_EP_TYPE_SHIFT); - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - } - - spin_unlock_irqrestore(&udc->lock, flags); - - return 0; -en_done: - return -EINVAL; -} - -static int mv_ep_disable(struct usb_ep *_ep) -{ - struct mv_udc *udc; - struct mv_ep *ep; - struct mv_dqh *dqh; - u32 epctrlx, direction; - unsigned long flags; - - ep = container_of(_ep, struct mv_ep, ep); - if ((_ep == NULL) || !ep->ep.desc) - return -EINVAL; - - udc = ep->udc; - - /* Get the endpoint queue head address */ - dqh = ep->dqh; - - spin_lock_irqsave(&udc->lock, flags); - - direction = ep_dir(ep); - - /* Reset the max packet length and the interrupt on Setup */ - dqh->max_packet_length = 0; - - /* Disable the endpoint for Rx or Tx and reset the endpoint type */ - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - epctrlx &= ~((direction == EP_DIR_IN) - ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE) - : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE)); - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - - /* nuke all pending requests (does flush) */ - nuke(ep, -ESHUTDOWN); - - ep->ep.desc = NULL; - ep->stopped = 1; - - spin_unlock_irqrestore(&udc->lock, flags); - - return 0; -} - -static struct usb_request * -mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) -{ - struct mv_req *req; - - req = kzalloc(sizeof *req, gfp_flags); - if (!req) - return NULL; - - req->req.dma = DMA_ADDR_INVALID; - INIT_LIST_HEAD(&req->queue); - - return &req->req; -} - -static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req) -{ - struct mv_req *req = NULL; - - req = container_of(_req, struct mv_req, req); - - if (_req) - kfree(req); -} - -static void mv_ep_fifo_flush(struct usb_ep *_ep) -{ - struct mv_udc *udc; - u32 bit_pos, direction; - struct mv_ep *ep; - unsigned int loops; - - if (!_ep) - return; - - ep = container_of(_ep, struct mv_ep, ep); - if (!ep->ep.desc) - return; - - udc = ep->udc; - direction = ep_dir(ep); - - if (ep->ep_num == 0) - bit_pos = (1 << 16) | 1; - else if (direction == EP_DIR_OUT) - bit_pos = 1 << ep->ep_num; - else - bit_pos = 1 << (16 + ep->ep_num); - - loops = LOOPS(EPSTATUS_TIMEOUT); - do { - unsigned int inter_loops; - - if (loops == 0) { - dev_err(&udc->dev->dev, - "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n", - (unsigned)readl(&udc->op_regs->epstatus), - (unsigned)bit_pos); - return; - } - /* Write 1 to the Flush register */ - writel(bit_pos, &udc->op_regs->epflush); - - /* Wait until flushing completed */ - inter_loops = LOOPS(FLUSH_TIMEOUT); - while (readl(&udc->op_regs->epflush)) { - /* - * ENDPTFLUSH bit should be cleared to indicate this - * operation is complete - */ - if (inter_loops == 0) { - dev_err(&udc->dev->dev, - "TIMEOUT for ENDPTFLUSH=0x%x," - "bit_pos=0x%x\n", - (unsigned)readl(&udc->op_regs->epflush), - (unsigned)bit_pos); - return; - } - inter_loops--; - udelay(LOOPS_USEC); - } - loops--; - } while (readl(&udc->op_regs->epstatus) & bit_pos); -} - -/* queues (submits) an I/O request to an endpoint */ -static int -mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) -{ - struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); - struct mv_req *req = container_of(_req, struct mv_req, req); - struct mv_udc *udc = ep->udc; - unsigned long flags; - int retval; - - /* catch various bogus parameters */ - if (!_req || !req->req.complete || !req->req.buf - || !list_empty(&req->queue)) { - dev_err(&udc->dev->dev, "%s, bad params", __func__); - return -EINVAL; - } - if (unlikely(!_ep || !ep->ep.desc)) { - dev_err(&udc->dev->dev, "%s, bad ep", __func__); - return -EINVAL; - } - - udc = ep->udc; - if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - req->ep = ep; - - /* map virtual address to hardware */ - retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep)); - if (retval) - return retval; - - req->req.status = -EINPROGRESS; - req->req.actual = 0; - req->dtd_count = 0; - - spin_lock_irqsave(&udc->lock, flags); - - /* build dtds and push them to device queue */ - if (!req_to_dtd(req)) { - retval = queue_dtd(ep, req); - if (retval) { - spin_unlock_irqrestore(&udc->lock, flags); - dev_err(&udc->dev->dev, "Failed to queue dtd\n"); - goto err_unmap_dma; - } - } else { - spin_unlock_irqrestore(&udc->lock, flags); - dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n"); - retval = -ENOMEM; - goto err_unmap_dma; - } - - /* Update ep0 state */ - if (ep->ep_num == 0) - udc->ep0_state = DATA_STATE_XMIT; - - /* irq handler advances the queue */ - list_add_tail(&req->queue, &ep->queue); - spin_unlock_irqrestore(&udc->lock, flags); - - return 0; - -err_unmap_dma: - usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep)); - - return retval; -} - -static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) -{ - struct mv_dqh *dqh = ep->dqh; - u32 bit_pos; - - /* Write dQH next pointer and terminate bit to 0 */ - dqh->next_dtd_ptr = req->head->td_dma - & EP_QUEUE_HEAD_NEXT_POINTER_MASK; - - /* clear active and halt bit, in case set from a previous error */ - dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED); - - /* Ensure that updates to the QH will occure before priming. */ - wmb(); - - bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num); - - /* Prime the Endpoint */ - writel(bit_pos, &ep->udc->op_regs->epprime); -} - -/* dequeues (cancels, unlinks) an I/O request from an endpoint */ -static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) -{ - struct mv_ep *ep = container_of(_ep, struct mv_ep, ep); - struct mv_req *req = NULL, *iter; - struct mv_udc *udc = ep->udc; - unsigned long flags; - int stopped, ret = 0; - u32 epctrlx; - - if (!_ep || !_req) - return -EINVAL; - - spin_lock_irqsave(&ep->udc->lock, flags); - stopped = ep->stopped; - - /* Stop the ep before we deal with the queue */ - ep->stopped = 1; - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - if (ep_dir(ep) == EP_DIR_IN) - epctrlx &= ~EPCTRL_TX_ENABLE; - else - epctrlx &= ~EPCTRL_RX_ENABLE; - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - - /* make sure it's actually queued on this endpoint */ - list_for_each_entry(iter, &ep->queue, queue) { - if (&iter->req != _req) - continue; - req = iter; - break; - } - if (!req) { - ret = -EINVAL; - goto out; - } - - /* The request is in progress, or completed but not dequeued */ - if (ep->queue.next == &req->queue) { - _req->status = -ECONNRESET; - mv_ep_fifo_flush(_ep); /* flush current transfer */ - - /* The request isn't the last request in this ep queue */ - if (req->queue.next != &ep->queue) { - struct mv_req *next_req; - - next_req = list_entry(req->queue.next, - struct mv_req, queue); - - /* Point the QH to the first TD of next request */ - mv_prime_ep(ep, next_req); - } else { - struct mv_dqh *qh; - - qh = ep->dqh; - qh->next_dtd_ptr = 1; - qh->size_ioc_int_sts = 0; - } - - /* The request hasn't been processed, patch up the TD chain */ - } else { - struct mv_req *prev_req; - - prev_req = list_entry(req->queue.prev, struct mv_req, queue); - writel(readl(&req->tail->dtd_next), - &prev_req->tail->dtd_next); - - } - - done(ep, req, -ECONNRESET); - - /* Enable EP */ -out: - epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]); - if (ep_dir(ep) == EP_DIR_IN) - epctrlx |= EPCTRL_TX_ENABLE; - else - epctrlx |= EPCTRL_RX_ENABLE; - writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]); - ep->stopped = stopped; - - spin_unlock_irqrestore(&ep->udc->lock, flags); - return ret; -} - -static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall) -{ - u32 epctrlx; - - epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); - - if (stall) { - if (direction == EP_DIR_IN) - epctrlx |= EPCTRL_TX_EP_STALL; - else - epctrlx |= EPCTRL_RX_EP_STALL; - } else { - if (direction == EP_DIR_IN) { - epctrlx &= ~EPCTRL_TX_EP_STALL; - epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST; - } else { - epctrlx &= ~EPCTRL_RX_EP_STALL; - epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST; - } - } - writel(epctrlx, &udc->op_regs->epctrlx[ep_num]); -} - -static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction) -{ - u32 epctrlx; - - epctrlx = readl(&udc->op_regs->epctrlx[ep_num]); - - if (direction == EP_DIR_OUT) - return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0; - else - return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0; -} - -static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge) -{ - struct mv_ep *ep; - unsigned long flags; - int status = 0; - struct mv_udc *udc; - - ep = container_of(_ep, struct mv_ep, ep); - udc = ep->udc; - if (!_ep || !ep->ep.desc) { - status = -EINVAL; - goto out; - } - - if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { - status = -EOPNOTSUPP; - goto out; - } - - /* - * Attempt to halt IN ep will fail if any transfer requests - * are still queue - */ - if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) { - status = -EAGAIN; - goto out; - } - - spin_lock_irqsave(&ep->udc->lock, flags); - ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt); - if (halt && wedge) - ep->wedge = 1; - else if (!halt) - ep->wedge = 0; - spin_unlock_irqrestore(&ep->udc->lock, flags); - - if (ep->ep_num == 0) { - udc->ep0_state = WAIT_FOR_SETUP; - udc->ep0_dir = EP_DIR_OUT; - } -out: - return status; -} - -static int mv_ep_set_halt(struct usb_ep *_ep, int halt) -{ - return mv_ep_set_halt_wedge(_ep, halt, 0); -} - -static int mv_ep_set_wedge(struct usb_ep *_ep) -{ - return mv_ep_set_halt_wedge(_ep, 1, 1); -} - -static const struct usb_ep_ops mv_ep_ops = { - .enable = mv_ep_enable, - .disable = mv_ep_disable, - - .alloc_request = mv_alloc_request, - .free_request = mv_free_request, - - .queue = mv_ep_queue, - .dequeue = mv_ep_dequeue, - - .set_wedge = mv_ep_set_wedge, - .set_halt = mv_ep_set_halt, - .fifo_flush = mv_ep_fifo_flush, /* flush fifo */ -}; - -static int udc_clock_enable(struct mv_udc *udc) -{ - return clk_prepare_enable(udc->clk); -} - -static void udc_clock_disable(struct mv_udc *udc) -{ - clk_disable_unprepare(udc->clk); -} - -static void udc_stop(struct mv_udc *udc) -{ - u32 tmp; - - /* Disable interrupts */ - tmp = readl(&udc->op_regs->usbintr); - tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN | - USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN); - writel(tmp, &udc->op_regs->usbintr); - - udc->stopped = 1; - - /* Reset the Run the bit in the command register to stop VUSB */ - tmp = readl(&udc->op_regs->usbcmd); - tmp &= ~USBCMD_RUN_STOP; - writel(tmp, &udc->op_regs->usbcmd); -} - -static void udc_start(struct mv_udc *udc) -{ - u32 usbintr; - - usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN - | USBINTR_PORT_CHANGE_DETECT_EN - | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND; - /* Enable interrupts */ - writel(usbintr, &udc->op_regs->usbintr); - - udc->stopped = 0; - - /* Set the Run bit in the command register */ - writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd); -} - -static int udc_reset(struct mv_udc *udc) -{ - unsigned int loops; - u32 tmp, portsc; - - /* Stop the controller */ - tmp = readl(&udc->op_regs->usbcmd); - tmp &= ~USBCMD_RUN_STOP; - writel(tmp, &udc->op_regs->usbcmd); - - /* Reset the controller to get default values */ - writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd); - - /* wait for reset to complete */ - loops = LOOPS(RESET_TIMEOUT); - while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) { - if (loops == 0) { - dev_err(&udc->dev->dev, - "Wait for RESET completed TIMEOUT\n"); - return -ETIMEDOUT; - } - loops--; - udelay(LOOPS_USEC); - } - - /* set controller to device mode */ - tmp = readl(&udc->op_regs->usbmode); - tmp |= USBMODE_CTRL_MODE_DEVICE; - - /* turn setup lockout off, require setup tripwire in usbcmd */ - tmp |= USBMODE_SETUP_LOCK_OFF; - - writel(tmp, &udc->op_regs->usbmode); - - writel(0x0, &udc->op_regs->epsetupstat); - - /* Configure the Endpoint List Address */ - writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK, - &udc->op_regs->eplistaddr); - - portsc = readl(&udc->op_regs->portsc[0]); - if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC) - portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER); - - if (udc->force_fs) - portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT; - else - portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT); - - writel(portsc, &udc->op_regs->portsc[0]); - - tmp = readl(&udc->op_regs->epctrlx[0]); - tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL); - writel(tmp, &udc->op_regs->epctrlx[0]); - - return 0; -} - -static int mv_udc_enable_internal(struct mv_udc *udc) -{ - int retval; - - if (udc->active) - return 0; - - dev_dbg(&udc->dev->dev, "enable udc\n"); - retval = udc_clock_enable(udc); - if (retval) - return retval; - - if (udc->pdata->phy_init) { - retval = udc->pdata->phy_init(udc->phy_regs); - if (retval) { - dev_err(&udc->dev->dev, - "init phy error %d\n", retval); - udc_clock_disable(udc); - return retval; - } - } - udc->active = 1; - - return 0; -} - -static int mv_udc_enable(struct mv_udc *udc) -{ - if (udc->clock_gating) - return mv_udc_enable_internal(udc); - - return 0; -} - -static void mv_udc_disable_internal(struct mv_udc *udc) -{ - if (udc->active) { - dev_dbg(&udc->dev->dev, "disable udc\n"); - if (udc->pdata->phy_deinit) - udc->pdata->phy_deinit(udc->phy_regs); - udc_clock_disable(udc); - udc->active = 0; - } -} - -static void mv_udc_disable(struct mv_udc *udc) -{ - if (udc->clock_gating) - mv_udc_disable_internal(udc); -} - -static int mv_udc_get_frame(struct usb_gadget *gadget) -{ - struct mv_udc *udc; - u16 retval; - - if (!gadget) - return -ENODEV; - - udc = container_of(gadget, struct mv_udc, gadget); - - retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS; - - return retval; -} - -/* Tries to wake up the host connected to this gadget */ -static int mv_udc_wakeup(struct usb_gadget *gadget) -{ - struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget); - u32 portsc; - - /* Remote wakeup feature not enabled by host */ - if (!udc->remote_wakeup) - return -ENOTSUPP; - - portsc = readl(&udc->op_regs->portsc); - /* not suspended? */ - if (!(portsc & PORTSCX_PORT_SUSPEND)) - return 0; - /* trigger force resume */ - portsc |= PORTSCX_PORT_FORCE_RESUME; - writel(portsc, &udc->op_regs->portsc[0]); - return 0; -} - -static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active) -{ - struct mv_udc *udc; - unsigned long flags; - int retval = 0; - - udc = container_of(gadget, struct mv_udc, gadget); - spin_lock_irqsave(&udc->lock, flags); - - udc->vbus_active = (is_active != 0); - - dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", - __func__, udc->softconnect, udc->vbus_active); - - if (udc->driver && udc->softconnect && udc->vbus_active) { - retval = mv_udc_enable(udc); - if (retval == 0) { - /* Clock is disabled, need re-init registers */ - udc_reset(udc); - ep0_reset(udc); - udc_start(udc); - } - } else if (udc->driver && udc->softconnect) { - if (!udc->active) - goto out; - - /* stop all the transfer in queue*/ - stop_activity(udc, udc->driver); - udc_stop(udc); - mv_udc_disable(udc); - } - -out: - spin_unlock_irqrestore(&udc->lock, flags); - return retval; -} - -static int mv_udc_pullup(struct usb_gadget *gadget, int is_on) -{ - struct mv_udc *udc; - unsigned long flags; - int retval = 0; - - udc = container_of(gadget, struct mv_udc, gadget); - spin_lock_irqsave(&udc->lock, flags); - - udc->softconnect = (is_on != 0); - - dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n", - __func__, udc->softconnect, udc->vbus_active); - - if (udc->driver && udc->softconnect && udc->vbus_active) { - retval = mv_udc_enable(udc); - if (retval == 0) { - /* Clock is disabled, need re-init registers */ - udc_reset(udc); - ep0_reset(udc); - udc_start(udc); - } - } else if (udc->driver && udc->vbus_active) { - /* stop all the transfer in queue*/ - stop_activity(udc, udc->driver); - udc_stop(udc); - mv_udc_disable(udc); - } - - spin_unlock_irqrestore(&udc->lock, flags); - return retval; -} - -static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *); -static int mv_udc_stop(struct usb_gadget *); -/* device controller usb_gadget_ops structure */ -static const struct usb_gadget_ops mv_ops = { - - /* returns the current frame number */ - .get_frame = mv_udc_get_frame, - - /* tries to wake up the host connected to this gadget */ - .wakeup = mv_udc_wakeup, - - /* notify controller that VBUS is powered or not */ - .vbus_session = mv_udc_vbus_session, - - /* D+ pullup, software-controlled connect/disconnect to USB host */ - .pullup = mv_udc_pullup, - .udc_start = mv_udc_start, - .udc_stop = mv_udc_stop, -}; - -static int eps_init(struct mv_udc *udc) -{ - struct mv_ep *ep; - char name[14]; - int i; - - /* initialize ep0 */ - ep = &udc->eps[0]; - ep->udc = udc; - strncpy(ep->name, "ep0", sizeof(ep->name)); - ep->ep.name = ep->name; - ep->ep.ops = &mv_ep_ops; - ep->wedge = 0; - ep->stopped = 0; - usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE); - ep->ep.caps.type_control = true; - ep->ep.caps.dir_in = true; - ep->ep.caps.dir_out = true; - ep->ep_num = 0; - ep->ep.desc = &mv_ep0_desc; - INIT_LIST_HEAD(&ep->queue); - - ep->ep_type = USB_ENDPOINT_XFER_CONTROL; - - /* initialize other endpoints */ - for (i = 2; i < udc->max_eps * 2; i++) { - ep = &udc->eps[i]; - if (i % 2) { - snprintf(name, sizeof(name), "ep%din", i / 2); - ep->direction = EP_DIR_IN; - ep->ep.caps.dir_in = true; - } else { - snprintf(name, sizeof(name), "ep%dout", i / 2); - ep->direction = EP_DIR_OUT; - ep->ep.caps.dir_out = true; - } - ep->udc = udc; - strncpy(ep->name, name, sizeof(ep->name)); - ep->ep.name = ep->name; - - ep->ep.caps.type_iso = true; - ep->ep.caps.type_bulk = true; - ep->ep.caps.type_int = true; - - ep->ep.ops = &mv_ep_ops; - ep->stopped = 0; - usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0); - ep->ep_num = i / 2; - - INIT_LIST_HEAD(&ep->queue); - list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); - - ep->dqh = &udc->ep_dqh[i]; - } - - return 0; -} - -/* delete all endpoint requests, called with spinlock held */ -static void nuke(struct mv_ep *ep, int status) -{ - /* called with spinlock held */ - ep->stopped = 1; - - /* endpoint fifo flush */ - mv_ep_fifo_flush(&ep->ep); - - while (!list_empty(&ep->queue)) { - struct mv_req *req = NULL; - req = list_entry(ep->queue.next, struct mv_req, queue); - done(ep, req, status); - } -} - -static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver) -{ - struct mv_ep *ep; - - nuke(&udc->eps[0], -ESHUTDOWN); - - list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { - nuke(ep, -ESHUTDOWN); - } - - /* report reset; the driver is already quiesced */ - if (driver) { - spin_unlock(&udc->lock); - usb_gadget_udc_reset(&udc->gadget, driver); - spin_lock(&udc->lock); - } -} -/* stop all USB activities */ -static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver) -{ - struct mv_ep *ep; - - nuke(&udc->eps[0], -ESHUTDOWN); - - list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { - nuke(ep, -ESHUTDOWN); - } - - /* report disconnect; the driver is already quiesced */ - if (driver) { - spin_unlock(&udc->lock); - driver->disconnect(&udc->gadget); - spin_lock(&udc->lock); - } -} - -static int mv_udc_start(struct usb_gadget *gadget, - struct usb_gadget_driver *driver) -{ - struct mv_udc *udc; - int retval = 0; - unsigned long flags; - - udc = container_of(gadget, struct mv_udc, gadget); - - if (udc->driver) - return -EBUSY; - - spin_lock_irqsave(&udc->lock, flags); - - /* hook up the driver ... */ - udc->driver = driver; - - udc->usb_state = USB_STATE_ATTACHED; - udc->ep0_state = WAIT_FOR_SETUP; - udc->ep0_dir = EP_DIR_OUT; - - spin_unlock_irqrestore(&udc->lock, flags); - - if (udc->transceiver) { - retval = otg_set_peripheral(udc->transceiver->otg, - &udc->gadget); - if (retval) { - dev_err(&udc->dev->dev, - "unable to register peripheral to otg\n"); - udc->driver = NULL; - return retval; - } - } - - /* When boot with cable attached, there will be no vbus irq occurred */ - if (udc->qwork) - queue_work(udc->qwork, &udc->vbus_work); - - return 0; -} - -static int mv_udc_stop(struct usb_gadget *gadget) -{ - struct mv_udc *udc; - unsigned long flags; - - udc = container_of(gadget, struct mv_udc, gadget); - - spin_lock_irqsave(&udc->lock, flags); - - mv_udc_enable(udc); - udc_stop(udc); - - /* stop all usb activities */ - udc->gadget.speed = USB_SPEED_UNKNOWN; - stop_activity(udc, NULL); - mv_udc_disable(udc); - - spin_unlock_irqrestore(&udc->lock, flags); - - /* unbind gadget driver */ - udc->driver = NULL; - - return 0; -} - -static void mv_set_ptc(struct mv_udc *udc, u32 mode) -{ - u32 portsc; - - portsc = readl(&udc->op_regs->portsc[0]); - portsc |= mode << 16; - writel(portsc, &udc->op_regs->portsc[0]); -} - -static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req) -{ - struct mv_ep *mvep = container_of(ep, struct mv_ep, ep); - struct mv_req *req = container_of(_req, struct mv_req, req); - struct mv_udc *udc; - unsigned long flags; - - udc = mvep->udc; - - dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); - - spin_lock_irqsave(&udc->lock, flags); - if (req->test_mode) { - mv_set_ptc(udc, req->test_mode); - req->test_mode = 0; - } - spin_unlock_irqrestore(&udc->lock, flags); -} - -static int -udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) -{ - int retval = 0; - struct mv_req *req; - struct mv_ep *ep; - - ep = &udc->eps[0]; - udc->ep0_dir = direction; - udc->ep0_state = WAIT_FOR_OUT_STATUS; - - req = udc->status_req; - - /* fill in the request structure */ - if (empty == false) { - *((u16 *) req->req.buf) = cpu_to_le16(status); - req->req.length = 2; - } else - req->req.length = 0; - - req->ep = ep; - req->req.status = -EINPROGRESS; - req->req.actual = 0; - if (udc->test_mode) { - req->req.complete = prime_status_complete; - req->test_mode = udc->test_mode; - udc->test_mode = 0; - } else - req->req.complete = NULL; - req->dtd_count = 0; - - if (req->req.dma == DMA_ADDR_INVALID) { - req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, - req->req.buf, req->req.length, - ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - req->mapped = 1; - } - - /* prime the data phase */ - if (!req_to_dtd(req)) { - retval = queue_dtd(ep, req); - if (retval) { - dev_err(&udc->dev->dev, - "Failed to queue dtd when prime status\n"); - goto out; - } - } else{ /* no mem */ - retval = -ENOMEM; - dev_err(&udc->dev->dev, - "Failed to dma_pool_alloc when prime status\n"); - goto out; - } - - list_add_tail(&req->queue, &ep->queue); - - return 0; -out: - usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); - - return retval; -} - -static void mv_udc_testmode(struct mv_udc *udc, u16 index) -{ - if (index <= USB_TEST_FORCE_ENABLE) { - udc->test_mode = index; - if (udc_prime_status(udc, EP_DIR_IN, 0, true)) - ep0_stall(udc); - } else - dev_err(&udc->dev->dev, - "This test mode(%d) is not supported\n", index); -} - -static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup) -{ - udc->dev_addr = (u8)setup->wValue; - - /* update usb state */ - udc->usb_state = USB_STATE_ADDRESS; - - if (udc_prime_status(udc, EP_DIR_IN, 0, true)) - ep0_stall(udc); -} - -static void ch9getstatus(struct mv_udc *udc, u8 ep_num, - struct usb_ctrlrequest *setup) -{ - u16 status = 0; - int retval; - - if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK)) - != (USB_DIR_IN | USB_TYPE_STANDARD)) - return; - - if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) { - status = 1 << USB_DEVICE_SELF_POWERED; - status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP; - } else if ((setup->bRequestType & USB_RECIP_MASK) - == USB_RECIP_INTERFACE) { - /* get interface status */ - status = 0; - } else if ((setup->bRequestType & USB_RECIP_MASK) - == USB_RECIP_ENDPOINT) { - u8 ep_num, direction; - - ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; - direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) - ? EP_DIR_IN : EP_DIR_OUT; - status = ep_is_stall(udc, ep_num, direction) - << USB_ENDPOINT_HALT; - } - - retval = udc_prime_status(udc, EP_DIR_IN, status, false); - if (retval) - ep0_stall(udc); - else - udc->ep0_state = DATA_STATE_XMIT; -} - -static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) -{ - u8 ep_num; - u8 direction; - struct mv_ep *ep; - - if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) - == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { - switch (setup->wValue) { - case USB_DEVICE_REMOTE_WAKEUP: - udc->remote_wakeup = 0; - break; - default: - goto out; - } - } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) - == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { - switch (setup->wValue) { - case USB_ENDPOINT_HALT: - ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; - direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) - ? EP_DIR_IN : EP_DIR_OUT; - if (setup->wValue != 0 || setup->wLength != 0 - || ep_num > udc->max_eps) - goto out; - ep = &udc->eps[ep_num * 2 + direction]; - if (ep->wedge == 1) - break; - spin_unlock(&udc->lock); - ep_set_stall(udc, ep_num, direction, 0); - spin_lock(&udc->lock); - break; - default: - goto out; - } - } else - goto out; - - if (udc_prime_status(udc, EP_DIR_IN, 0, true)) - ep0_stall(udc); -out: - return; -} - -static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup) -{ - u8 ep_num; - u8 direction; - - if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) - == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) { - switch (setup->wValue) { - case USB_DEVICE_REMOTE_WAKEUP: - udc->remote_wakeup = 1; - break; - case USB_DEVICE_TEST_MODE: - if (setup->wIndex & 0xFF - || udc->gadget.speed != USB_SPEED_HIGH) - ep0_stall(udc); - - if (udc->usb_state != USB_STATE_CONFIGURED - && udc->usb_state != USB_STATE_ADDRESS - && udc->usb_state != USB_STATE_DEFAULT) - ep0_stall(udc); - - mv_udc_testmode(udc, (setup->wIndex >> 8)); - goto out; - default: - goto out; - } - } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK)) - == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) { - switch (setup->wValue) { - case USB_ENDPOINT_HALT: - ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK; - direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK) - ? EP_DIR_IN : EP_DIR_OUT; - if (setup->wValue != 0 || setup->wLength != 0 - || ep_num > udc->max_eps) - goto out; - spin_unlock(&udc->lock); - ep_set_stall(udc, ep_num, direction, 1); - spin_lock(&udc->lock); - break; - default: - goto out; - } - } else - goto out; - - if (udc_prime_status(udc, EP_DIR_IN, 0, true)) - ep0_stall(udc); -out: - return; -} - -static void handle_setup_packet(struct mv_udc *udc, u8 ep_num, - struct usb_ctrlrequest *setup) - __releases(&ep->udc->lock) - __acquires(&ep->udc->lock) -{ - bool delegate = false; - - nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN); - - dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", - setup->bRequestType, setup->bRequest, - setup->wValue, setup->wIndex, setup->wLength); - /* We process some standard setup requests here */ - if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { - switch (setup->bRequest) { - case USB_REQ_GET_STATUS: - ch9getstatus(udc, ep_num, setup); - break; - - case USB_REQ_SET_ADDRESS: - ch9setaddress(udc, setup); - break; - - case USB_REQ_CLEAR_FEATURE: - ch9clearfeature(udc, setup); - break; - - case USB_REQ_SET_FEATURE: - ch9setfeature(udc, setup); - break; - - default: - delegate = true; - } - } else - delegate = true; - - /* delegate USB standard requests to the gadget driver */ - if (delegate == true) { - /* USB requests handled by gadget */ - if (setup->wLength) { - /* DATA phase from gadget, STATUS phase from udc */ - udc->ep0_dir = (setup->bRequestType & USB_DIR_IN) - ? EP_DIR_IN : EP_DIR_OUT; - spin_unlock(&udc->lock); - if (udc->driver->setup(&udc->gadget, - &udc->local_setup_buff) < 0) - ep0_stall(udc); - spin_lock(&udc->lock); - udc->ep0_state = (setup->bRequestType & USB_DIR_IN) - ? DATA_STATE_XMIT : DATA_STATE_RECV; - } else { - /* no DATA phase, IN STATUS phase from gadget */ - udc->ep0_dir = EP_DIR_IN; - spin_unlock(&udc->lock); - if (udc->driver->setup(&udc->gadget, - &udc->local_setup_buff) < 0) - ep0_stall(udc); - spin_lock(&udc->lock); - udc->ep0_state = WAIT_FOR_OUT_STATUS; - } - } -} - -/* complete DATA or STATUS phase of ep0 prime status phase if needed */ -static void ep0_req_complete(struct mv_udc *udc, - struct mv_ep *ep0, struct mv_req *req) -{ - u32 new_addr; - - if (udc->usb_state == USB_STATE_ADDRESS) { - /* set the new address */ - new_addr = (u32)udc->dev_addr; - writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT, - &udc->op_regs->deviceaddr); - } - - done(ep0, req, 0); - - switch (udc->ep0_state) { - case DATA_STATE_XMIT: - /* receive status phase */ - if (udc_prime_status(udc, EP_DIR_OUT, 0, true)) - ep0_stall(udc); - break; - case DATA_STATE_RECV: - /* send status phase */ - if (udc_prime_status(udc, EP_DIR_IN, 0 , true)) - ep0_stall(udc); - break; - case WAIT_FOR_OUT_STATUS: - udc->ep0_state = WAIT_FOR_SETUP; - break; - case WAIT_FOR_SETUP: - dev_err(&udc->dev->dev, "unexpect ep0 packets\n"); - break; - default: - ep0_stall(udc); - break; - } -} - -static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr) -{ - u32 temp; - struct mv_dqh *dqh; - - dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT]; - - /* Clear bit in ENDPTSETUPSTAT */ - writel((1 << ep_num), &udc->op_regs->epsetupstat); - - /* while a hazard exists when setup package arrives */ - do { - /* Set Setup Tripwire */ - temp = readl(&udc->op_regs->usbcmd); - writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); - - /* Copy the setup packet to local buffer */ - memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8); - } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET)); - - /* Clear Setup Tripwire */ - temp = readl(&udc->op_regs->usbcmd); - writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd); -} - -static void irq_process_tr_complete(struct mv_udc *udc) -{ - u32 tmp, bit_pos; - int i, ep_num = 0, direction = 0; - struct mv_ep *curr_ep; - struct mv_req *curr_req, *temp_req; - int status; - - /* - * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE - * because the setup packets are to be read ASAP - */ - - /* Process all Setup packet received interrupts */ - tmp = readl(&udc->op_regs->epsetupstat); - - if (tmp) { - for (i = 0; i < udc->max_eps; i++) { - if (tmp & (1 << i)) { - get_setup_data(udc, i, - (u8 *)(&udc->local_setup_buff)); - handle_setup_packet(udc, i, - &udc->local_setup_buff); - } - } - } - - /* Don't clear the endpoint setup status register here. - * It is cleared as a setup packet is read out of the buffer - */ - - /* Process non-setup transaction complete interrupts */ - tmp = readl(&udc->op_regs->epcomplete); - - if (!tmp) - return; - - writel(tmp, &udc->op_regs->epcomplete); - - for (i = 0; i < udc->max_eps * 2; i++) { - ep_num = i >> 1; - direction = i % 2; - - bit_pos = 1 << (ep_num + 16 * direction); - - if (!(bit_pos & tmp)) - continue; - - if (i == 1) - curr_ep = &udc->eps[0]; - else - curr_ep = &udc->eps[i]; - /* process the req queue until an uncomplete request */ - list_for_each_entry_safe(curr_req, temp_req, - &curr_ep->queue, queue) { - status = process_ep_req(udc, i, curr_req); - if (status) - break; - - /* write back status to req */ - curr_req->req.status = status; - - /* ep0 request completion */ - if (ep_num == 0) { - ep0_req_complete(udc, curr_ep, curr_req); - break; - } else { - done(curr_ep, curr_req, status); - } - } - } -} - -static void irq_process_reset(struct mv_udc *udc) -{ - u32 tmp; - unsigned int loops; - - udc->ep0_dir = EP_DIR_OUT; - udc->ep0_state = WAIT_FOR_SETUP; - udc->remote_wakeup = 0; /* default to 0 on reset */ - - /* The address bits are past bit 25-31. Set the address */ - tmp = readl(&udc->op_regs->deviceaddr); - tmp &= ~(USB_DEVICE_ADDRESS_MASK); - writel(tmp, &udc->op_regs->deviceaddr); - - /* Clear all the setup token semaphores */ - tmp = readl(&udc->op_regs->epsetupstat); - writel(tmp, &udc->op_regs->epsetupstat); - - /* Clear all the endpoint complete status bits */ - tmp = readl(&udc->op_regs->epcomplete); - writel(tmp, &udc->op_regs->epcomplete); - - /* wait until all endptprime bits cleared */ - loops = LOOPS(PRIME_TIMEOUT); - while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) { - if (loops == 0) { - dev_err(&udc->dev->dev, - "Timeout for ENDPTPRIME = 0x%x\n", - readl(&udc->op_regs->epprime)); - break; - } - loops--; - udelay(LOOPS_USEC); - } - - /* Write 1s to the Flush register */ - writel((u32)~0, &udc->op_regs->epflush); - - if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) { - dev_info(&udc->dev->dev, "usb bus reset\n"); - udc->usb_state = USB_STATE_DEFAULT; - /* reset all the queues, stop all USB activities */ - gadget_reset(udc, udc->driver); - } else { - dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n", - readl(&udc->op_regs->portsc)); - - /* - * re-initialize - * controller reset - */ - udc_reset(udc); - - /* reset all the queues, stop all USB activities */ - stop_activity(udc, udc->driver); - - /* reset ep0 dQH and endptctrl */ - ep0_reset(udc); - - /* enable interrupt and set controller to run state */ - udc_start(udc); - - udc->usb_state = USB_STATE_ATTACHED; - } -} - -static void handle_bus_resume(struct mv_udc *udc) -{ - udc->usb_state = udc->resume_state; - udc->resume_state = 0; - - /* report resume to the driver */ - if (udc->driver) { - if (udc->driver->resume) { - spin_unlock(&udc->lock); - udc->driver->resume(&udc->gadget); - spin_lock(&udc->lock); - } - } -} - -static void irq_process_suspend(struct mv_udc *udc) -{ - udc->resume_state = udc->usb_state; - udc->usb_state = USB_STATE_SUSPENDED; - - if (udc->driver->suspend) { - spin_unlock(&udc->lock); - udc->driver->suspend(&udc->gadget); - spin_lock(&udc->lock); - } -} - -static void irq_process_port_change(struct mv_udc *udc) -{ - u32 portsc; - - portsc = readl(&udc->op_regs->portsc[0]); - if (!(portsc & PORTSCX_PORT_RESET)) { - /* Get the speed */ - u32 speed = portsc & PORTSCX_PORT_SPEED_MASK; - switch (speed) { - case PORTSCX_PORT_SPEED_HIGH: - udc->gadget.speed = USB_SPEED_HIGH; - break; - case PORTSCX_PORT_SPEED_FULL: - udc->gadget.speed = USB_SPEED_FULL; - break; - case PORTSCX_PORT_SPEED_LOW: - udc->gadget.speed = USB_SPEED_LOW; - break; - default: - udc->gadget.speed = USB_SPEED_UNKNOWN; - break; - } - } - - if (portsc & PORTSCX_PORT_SUSPEND) { - udc->resume_state = udc->usb_state; - udc->usb_state = USB_STATE_SUSPENDED; - if (udc->driver->suspend) { - spin_unlock(&udc->lock); - udc->driver->suspend(&udc->gadget); - spin_lock(&udc->lock); - } - } - - if (!(portsc & PORTSCX_PORT_SUSPEND) - && udc->usb_state == USB_STATE_SUSPENDED) { - handle_bus_resume(udc); - } - - if (!udc->resume_state) - udc->usb_state = USB_STATE_DEFAULT; -} - -static void irq_process_error(struct mv_udc *udc) -{ - /* Increment the error count */ - udc->errors++; -} - -static irqreturn_t mv_udc_irq(int irq, void *dev) -{ - struct mv_udc *udc = (struct mv_udc *)dev; - u32 status, intr; - - /* Disable ISR when stopped bit is set */ - if (udc->stopped) - return IRQ_NONE; - - spin_lock(&udc->lock); - - status = readl(&udc->op_regs->usbsts); - intr = readl(&udc->op_regs->usbintr); - status &= intr; - - if (status == 0) { - spin_unlock(&udc->lock); - return IRQ_NONE; - } - - /* Clear all the interrupts occurred */ - writel(status, &udc->op_regs->usbsts); - - if (status & USBSTS_ERR) - irq_process_error(udc); - - if (status & USBSTS_RESET) - irq_process_reset(udc); - - if (status & USBSTS_PORT_CHANGE) - irq_process_port_change(udc); - - if (status & USBSTS_INT) - irq_process_tr_complete(udc); - - if (status & USBSTS_SUSPEND) - irq_process_suspend(udc); - - spin_unlock(&udc->lock); - - return IRQ_HANDLED; -} - -static irqreturn_t mv_udc_vbus_irq(int irq, void *dev) -{ - struct mv_udc *udc = (struct mv_udc *)dev; - - /* polling VBUS and init phy may cause too much time*/ - if (udc->qwork) - queue_work(udc->qwork, &udc->vbus_work); - - return IRQ_HANDLED; -} - -static void mv_udc_vbus_work(struct work_struct *work) -{ - struct mv_udc *udc; - unsigned int vbus; - - udc = container_of(work, struct mv_udc, vbus_work); - if (!udc->pdata->vbus) - return; - - vbus = udc->pdata->vbus->poll(); - dev_info(&udc->dev->dev, "vbus is %d\n", vbus); - - if (vbus == VBUS_HIGH) - mv_udc_vbus_session(&udc->gadget, 1); - else if (vbus == VBUS_LOW) - mv_udc_vbus_session(&udc->gadget, 0); -} - -/* release device structure */ -static void gadget_release(struct device *_dev) -{ - struct mv_udc *udc; - - udc = dev_get_drvdata(_dev); - - complete(udc->done); -} - -static void mv_udc_remove(struct platform_device *pdev) -{ - struct mv_udc *udc; - - udc = platform_get_drvdata(pdev); - - usb_del_gadget_udc(&udc->gadget); - - if (udc->qwork) - destroy_workqueue(udc->qwork); - - /* free memory allocated in probe */ - dma_pool_destroy(udc->dtd_pool); - - if (udc->ep_dqh) - dma_free_coherent(&pdev->dev, udc->ep_dqh_size, - udc->ep_dqh, udc->ep_dqh_dma); - - mv_udc_disable(udc); - - /* free dev, wait for the release() finished */ - wait_for_completion(udc->done); -} - -static int mv_udc_probe(struct platform_device *pdev) -{ - struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct mv_udc *udc; - int retval = 0; - struct resource *r; - size_t size; - - if (pdata == NULL) { - dev_err(&pdev->dev, "missing platform_data\n"); - return -ENODEV; - } - - udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL); - if (udc == NULL) - return -ENOMEM; - - udc->done = &release_done; - udc->pdata = dev_get_platdata(&pdev->dev); - spin_lock_init(&udc->lock); - - udc->dev = pdev; - - if (pdata->mode == MV_USB_MODE_OTG) { - udc->transceiver = devm_usb_get_phy(&pdev->dev, - USB_PHY_TYPE_USB2); - if (IS_ERR(udc->transceiver)) { - retval = PTR_ERR(udc->transceiver); - - if (retval == -ENXIO) - return retval; - - udc->transceiver = NULL; - return -EPROBE_DEFER; - } - } - - /* udc only have one sysclk. */ - udc->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(udc->clk)) - return PTR_ERR(udc->clk); - - r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs"); - if (r == NULL) { - dev_err(&pdev->dev, "no I/O memory resource defined\n"); - return -ENODEV; - } - - udc->cap_regs = (struct mv_cap_regs __iomem *) - devm_ioremap(&pdev->dev, r->start, resource_size(r)); - if (udc->cap_regs == NULL) { - dev_err(&pdev->dev, "failed to map I/O memory\n"); - return -EBUSY; - } - - r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs"); - if (r == NULL) { - dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); - return -ENODEV; - } - - udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); - if (udc->phy_regs == NULL) { - dev_err(&pdev->dev, "failed to map phy I/O memory\n"); - return -EBUSY; - } - - /* we will acces controller register, so enable the clk */ - retval = mv_udc_enable_internal(udc); - if (retval) - return retval; - - udc->op_regs = - (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs - + (readl(&udc->cap_regs->caplength_hciversion) - & CAPLENGTH_MASK)); - udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK; - - /* - * some platform will use usb to download image, it may not disconnect - * usb gadget before loading kernel. So first stop udc here. - */ - udc_stop(udc); - writel(0xFFFFFFFF, &udc->op_regs->usbsts); - - size = udc->max_eps * sizeof(struct mv_dqh) *2; - size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1); - udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size, - &udc->ep_dqh_dma, GFP_KERNEL); - - if (udc->ep_dqh == NULL) { - dev_err(&pdev->dev, "allocate dQH memory failed\n"); - retval = -ENOMEM; - goto err_disable_clock; - } - udc->ep_dqh_size = size; - - /* create dTD dma_pool resource */ - udc->dtd_pool = dma_pool_create("mv_dtd", - &pdev->dev, - sizeof(struct mv_dtd), - DTD_ALIGNMENT, - DMA_BOUNDARY); - - if (!udc->dtd_pool) { - retval = -ENOMEM; - goto err_free_dma; - } - - size = udc->max_eps * sizeof(struct mv_ep) *2; - udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (udc->eps == NULL) { - retval = -ENOMEM; - goto err_destroy_dma; - } - - /* initialize ep0 status request structure */ - udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req), - GFP_KERNEL); - if (!udc->status_req) { - retval = -ENOMEM; - goto err_destroy_dma; - } - INIT_LIST_HEAD(&udc->status_req->queue); - - /* allocate a small amount of memory to get valid address */ - udc->status_req->req.buf = devm_kzalloc(&pdev->dev, 8, GFP_KERNEL); - if (!udc->status_req->req.buf) { - retval = -ENOMEM; - goto err_destroy_dma; - } - udc->status_req->req.dma = DMA_ADDR_INVALID; - - udc->resume_state = USB_STATE_NOTATTACHED; - udc->usb_state = USB_STATE_POWERED; - udc->ep0_dir = EP_DIR_OUT; - udc->remote_wakeup = 0; - - r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no IRQ resource defined\n"); - retval = -ENODEV; - goto err_destroy_dma; - } - udc->irq = r->start; - if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq, - IRQF_SHARED, driver_name, udc)) { - dev_err(&pdev->dev, "Request irq %d for UDC failed\n", - udc->irq); - retval = -ENODEV; - goto err_destroy_dma; - } - - /* initialize gadget structure */ - udc->gadget.ops = &mv_ops; /* usb_gadget_ops */ - udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */ - INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */ - udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */ - udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */ - - /* the "gadget" abstracts/virtualizes the controller */ - udc->gadget.name = driver_name; /* gadget name */ - - eps_init(udc); - - /* VBUS detect: we can disable/enable clock on demand.*/ - if (udc->transceiver) - udc->clock_gating = 1; - else if (pdata->vbus) { - udc->clock_gating = 1; - retval = devm_request_threaded_irq(&pdev->dev, - pdata->vbus->irq, NULL, - mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc); - if (retval) { - dev_info(&pdev->dev, - "Can not request irq for VBUS, " - "disable clock gating\n"); - udc->clock_gating = 0; - } - - udc->qwork = create_singlethread_workqueue("mv_udc_queue"); - if (!udc->qwork) { - dev_err(&pdev->dev, "cannot create workqueue\n"); - retval = -ENOMEM; - goto err_destroy_dma; - } - - INIT_WORK(&udc->vbus_work, mv_udc_vbus_work); - } - - /* - * When clock gating is supported, we can disable clk and phy. - * If not, it means that VBUS detection is not supported, we - * have to enable vbus active all the time to let controller work. - */ - if (udc->clock_gating) - mv_udc_disable_internal(udc); - else - udc->vbus_active = 1; - - retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget, - gadget_release); - if (retval) - goto err_create_workqueue; - - platform_set_drvdata(pdev, udc); - dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n", - udc->clock_gating ? "with" : "without"); - - return 0; - -err_create_workqueue: - if (udc->qwork) - destroy_workqueue(udc->qwork); -err_destroy_dma: - dma_pool_destroy(udc->dtd_pool); -err_free_dma: - dma_free_coherent(&pdev->dev, udc->ep_dqh_size, - udc->ep_dqh, udc->ep_dqh_dma); -err_disable_clock: - mv_udc_disable_internal(udc); - - return retval; -} - -#ifdef CONFIG_PM -static int mv_udc_suspend(struct device *dev) -{ - struct mv_udc *udc; - - udc = dev_get_drvdata(dev); - - /* if OTG is enabled, the following will be done in OTG driver*/ - if (udc->transceiver) - return 0; - - if (udc->pdata->vbus && udc->pdata->vbus->poll) - if (udc->pdata->vbus->poll() == VBUS_HIGH) { - dev_info(&udc->dev->dev, "USB cable is connected!\n"); - return -EAGAIN; - } - - /* - * only cable is unplugged, udc can suspend. - * So do not care about clock_gating == 1. - */ - if (!udc->clock_gating) { - udc_stop(udc); - - spin_lock_irq(&udc->lock); - /* stop all usb activities */ - stop_activity(udc, udc->driver); - spin_unlock_irq(&udc->lock); - - mv_udc_disable_internal(udc); - } - - return 0; -} - -static int mv_udc_resume(struct device *dev) -{ - struct mv_udc *udc; - int retval; - - udc = dev_get_drvdata(dev); - - /* if OTG is enabled, the following will be done in OTG driver*/ - if (udc->transceiver) - return 0; - - if (!udc->clock_gating) { - retval = mv_udc_enable_internal(udc); - if (retval) - return retval; - - if (udc->driver && udc->softconnect) { - udc_reset(udc); - ep0_reset(udc); - udc_start(udc); - } - } - - return 0; -} - -static const struct dev_pm_ops mv_udc_pm_ops = { - .suspend = mv_udc_suspend, - .resume = mv_udc_resume, -}; -#endif - -static void mv_udc_shutdown(struct platform_device *pdev) -{ - struct mv_udc *udc; - u32 mode; - - udc = platform_get_drvdata(pdev); - /* reset controller mode to IDLE */ - mv_udc_enable(udc); - mode = readl(&udc->op_regs->usbmode); - mode &= ~3; - writel(mode, &udc->op_regs->usbmode); - mv_udc_disable(udc); -} - -static struct platform_driver udc_driver = { - .probe = mv_udc_probe, - .remove = mv_udc_remove, - .shutdown = mv_udc_shutdown, - .driver = { - .name = "mv-udc", -#ifdef CONFIG_PM - .pm = &mv_udc_pm_ops, -#endif - }, -}; - -module_platform_driver(udc_driver); -MODULE_ALIAS("platform:mv-udc"); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c deleted file mode 100644 index 7ecddbf5c90d..000000000000 --- a/drivers/usb/gadget/udc/net2272.c +++ /dev/null @@ -1,2723 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Driver for PLX NET2272 USB device controller - * - * Copyright (C) 2005-2006 PLX Technology, Inc. - * Copyright (C) 2006-2011 Analog Devices, Inc. - */ - -#include <linux/delay.h> -#include <linux/device.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/ioport.h> -#include <linux/kernel.h> -#include <linux/list.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/pci.h> -#include <linux/platform_device.h> -#include <linux/prefetch.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/timer.h> -#include <linux/usb.h> -#include <linux/usb/ch9.h> -#include <linux/usb/gadget.h> - -#include <asm/byteorder.h> -#include <linux/unaligned.h> - -#include "net2272.h" - -#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller" - -static const char driver_name[] = "net2272"; -static const char driver_vers[] = "2006 October 17/mainline"; -static const char driver_desc[] = DRIVER_DESC; - -static const char ep0name[] = "ep0"; -static const char * const ep_name[] = { - ep0name, - "ep-a", "ep-b", "ep-c", -}; - -#ifdef CONFIG_USB_NET2272_DMA -/* - * use_dma: the NET2272 can use an external DMA controller. - * Note that since there is no generic DMA api, some functions, - * notably request_dma, start_dma, and cancel_dma will need to be - * modified for your platform's particular dma controller. - * - * If use_dma is disabled, pio will be used instead. - */ -static bool use_dma = false; -module_param(use_dma, bool, 0644); - -/* - * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b) - * The NET2272 can only use dma for a single endpoint at a time. - * At some point this could be modified to allow either endpoint - * to take control of dma as it becomes available. - * - * Note that DMA should not be used on OUT endpoints unless it can - * be guaranteed that no short packets will arrive on an IN endpoint - * while the DMA operation is pending. Otherwise the OUT DMA will - * terminate prematurely (See NET2272 Errata 630-0213-0101) - */ -static ushort dma_ep = 1; -module_param(dma_ep, ushort, 0644); - -/* - * dma_mode: net2272 dma mode setting (see LOCCTL1 definition): - * mode 0 == Slow DREQ mode - * mode 1 == Fast DREQ mode - * mode 2 == Burst mode - */ -static ushort dma_mode = 2; -module_param(dma_mode, ushort, 0644); -#else -#define use_dma 0 -#define dma_ep 1 -#define dma_mode 2 -#endif - -/* - * fifo_mode: net2272 buffer configuration: - * mode 0 == ep-{a,b,c} 512db each - * mode 1 == ep-a 1k, ep-{b,c} 512db - * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db - * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db - */ -static ushort fifo_mode; -module_param(fifo_mode, ushort, 0644); - -/* - * enable_suspend: When enabled, the driver will respond to - * USB suspend requests by powering down the NET2272. Otherwise, - * USB suspend requests will be ignored. This is acceptable for - * self-powered devices. For bus powered devices set this to 1. - */ -static ushort enable_suspend; -module_param(enable_suspend, ushort, 0644); - -static void assert_out_naking(struct net2272_ep *ep, const char *where) -{ - u8 tmp; - -#ifndef DEBUG - return; -#endif - - tmp = net2272_ep_read(ep, EP_STAT0); - if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) { - dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n", - ep->ep.name, where, tmp); - net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); - } -} -#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__) - -static void stop_out_naking(struct net2272_ep *ep) -{ - u8 tmp = net2272_ep_read(ep, EP_STAT0); - - if ((tmp & (1 << NAK_OUT_PACKETS)) != 0) - net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); -} - -#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out") - -static char *type_string(u8 bmAttributes) -{ - switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) { - case USB_ENDPOINT_XFER_BULK: return "bulk"; - case USB_ENDPOINT_XFER_ISOC: return "iso"; - case USB_ENDPOINT_XFER_INT: return "intr"; - default: return "control"; - } -} - -static char *buf_state_string(unsigned state) -{ - switch (state) { - case BUFF_FREE: return "free"; - case BUFF_VALID: return "valid"; - case BUFF_LCL: return "local"; - case BUFF_USB: return "usb"; - default: return "unknown"; - } -} - -static char *dma_mode_string(void) -{ - if (!use_dma) - return "PIO"; - switch (dma_mode) { - case 0: return "SLOW DREQ"; - case 1: return "FAST DREQ"; - case 2: return "BURST"; - default: return "invalid"; - } -} - -static void net2272_dequeue_all(struct net2272_ep *); -static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); -static int net2272_fifo_status(struct usb_ep *); - -static const struct usb_ep_ops net2272_ep_ops; - -/*---------------------------------------------------------------------------*/ - -static int -net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) -{ - struct net2272 *dev; - struct net2272_ep *ep; - u32 max; - u8 tmp; - unsigned long flags; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || !desc || ep->desc || _ep->name == ep0name - || desc->bDescriptorType != USB_DT_ENDPOINT) - return -EINVAL; - dev = ep->dev; - if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - max = usb_endpoint_maxp(desc); - - spin_lock_irqsave(&dev->lock, flags); - _ep->maxpacket = max; - ep->desc = desc; - - /* net2272_ep_reset() has already been called */ - ep->stopped = 0; - ep->wedged = 0; - - /* set speed-dependent max packet */ - net2272_ep_write(ep, EP_MAXPKT0, max & 0xff); - net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8); - - /* set type, direction, address; reset fifo counters */ - net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); - tmp = usb_endpoint_type(desc); - if (usb_endpoint_xfer_bulk(desc)) { - /* catch some particularly blatant driver bugs */ - if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) || - (dev->gadget.speed == USB_SPEED_FULL && max > 64)) { - spin_unlock_irqrestore(&dev->lock, flags); - return -ERANGE; - } - } - ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0; - tmp <<= ENDPOINT_TYPE; - tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER); - tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION; - tmp |= (1 << ENDPOINT_ENABLE); - - /* for OUT transfers, block the rx fifo until a read is posted */ - ep->is_in = usb_endpoint_dir_in(desc); - if (!ep->is_in) - net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); - - net2272_ep_write(ep, EP_CFG, tmp); - - /* enable irqs */ - tmp = (1 << ep->num) | net2272_read(dev, IRQENB0); - net2272_write(dev, IRQENB0, tmp); - - tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) - | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) - | net2272_ep_read(ep, EP_IRQENB); - net2272_ep_write(ep, EP_IRQENB, tmp); - - tmp = desc->bEndpointAddress; - dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n", - _ep->name, tmp & 0x0f, PIPEDIR(tmp), - type_string(desc->bmAttributes), max, - net2272_ep_read(ep, EP_CFG)); - - spin_unlock_irqrestore(&dev->lock, flags); - return 0; -} - -static void net2272_ep_reset(struct net2272_ep *ep) -{ - u8 tmp; - - ep->desc = NULL; - INIT_LIST_HEAD(&ep->queue); - - usb_ep_set_maxpacket_limit(&ep->ep, ~0); - ep->ep.ops = &net2272_ep_ops; - - /* disable irqs, endpoint */ - net2272_ep_write(ep, EP_IRQENB, 0); - - /* init to our chosen defaults, notably so that we NAK OUT - * packets until the driver queues a read. - */ - tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS); - net2272_ep_write(ep, EP_RSPSET, tmp); - - tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE); - if (ep->num != 0) - tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT); - - net2272_ep_write(ep, EP_RSPCLR, tmp); - - /* scrub most status bits, and flush any fifo state */ - net2272_ep_write(ep, EP_STAT0, - (1 << DATA_IN_TOKEN_INTERRUPT) - | (1 << DATA_OUT_TOKEN_INTERRUPT) - | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) - | (1 << DATA_PACKET_RECEIVED_INTERRUPT) - | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); - - net2272_ep_write(ep, EP_STAT1, - (1 << TIMEOUT) - | (1 << USB_OUT_ACK_SENT) - | (1 << USB_OUT_NAK_SENT) - | (1 << USB_IN_ACK_RCVD) - | (1 << USB_IN_NAK_SENT) - | (1 << USB_STALL_SENT) - | (1 << LOCAL_OUT_ZLP) - | (1 << BUFFER_FLUSH)); - - /* fifo size is handled separately */ -} - -static int net2272_disable(struct usb_ep *_ep) -{ - struct net2272_ep *ep; - unsigned long flags; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || !ep->desc || _ep->name == ep0name) - return -EINVAL; - - spin_lock_irqsave(&ep->dev->lock, flags); - net2272_dequeue_all(ep); - net2272_ep_reset(ep); - - dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name); - - spin_unlock_irqrestore(&ep->dev->lock, flags); - return 0; -} - -/*---------------------------------------------------------------------------*/ - -static struct usb_request * -net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) -{ - struct net2272_request *req; - - if (!_ep) - return NULL; - - req = kzalloc(sizeof(*req), gfp_flags); - if (!req) - return NULL; - - INIT_LIST_HEAD(&req->queue); - - return &req->req; -} - -static void -net2272_free_request(struct usb_ep *_ep, struct usb_request *_req) -{ - struct net2272_request *req; - - if (!_ep || !_req) - return; - - req = container_of(_req, struct net2272_request, req); - WARN_ON(!list_empty(&req->queue)); - kfree(req); -} - -static void -net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) -{ - struct net2272 *dev; - unsigned stopped = ep->stopped; - - if (ep->num == 0) { - if (ep->dev->protocol_stall) { - ep->stopped = 1; - set_halt(ep); - } - allow_status(ep); - } - - list_del_init(&req->queue); - - if (req->req.status == -EINPROGRESS) - req->req.status = status; - else - status = req->req.status; - - dev = ep->dev; - if (use_dma && ep->dma) - usb_gadget_unmap_request(&dev->gadget, &req->req, - ep->is_in); - - if (status && status != -ESHUTDOWN) - dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", - ep->ep.name, &req->req, status, - req->req.actual, req->req.length, req->req.buf); - - /* don't modify queue heads during completion callback */ - ep->stopped = 1; - spin_unlock(&dev->lock); - usb_gadget_giveback_request(&ep->ep, &req->req); - spin_lock(&dev->lock); - ep->stopped = stopped; -} - -static int -net2272_write_packet(struct net2272_ep *ep, u8 *buf, - struct net2272_request *req, unsigned max) -{ - u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); - u16 *bufp; - unsigned length, count; - u8 tmp; - - length = min(req->req.length - req->req.actual, max); - req->req.actual += length; - - dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", - ep->ep.name, req, max, length, - (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); - - count = length; - bufp = (u16 *)buf; - - while (likely(count >= 2)) { - /* no byte-swap required; chip endian set during init */ - writew(*bufp++, ep_data); - count -= 2; - } - buf = (u8 *)bufp; - - /* write final byte by placing the NET2272 into 8-bit mode */ - if (unlikely(count)) { - tmp = net2272_read(ep->dev, LOCCTL); - net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH)); - writeb(*buf, ep_data); - net2272_write(ep->dev, LOCCTL, tmp); - } - return length; -} - -/* returns: 0: still running, 1: completed, negative: errno */ -static int -net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) -{ - u8 *buf; - unsigned count, max; - int status; - - dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n", - ep->ep.name, req->req.actual, req->req.length); - - /* - * Keep loading the endpoint until the final packet is loaded, - * or the endpoint buffer is full. - */ - top: - /* - * Clear interrupt status - * - Packet Transmitted interrupt will become set again when the - * host successfully takes another packet - */ - net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); - while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) { - buf = req->req.buf + req->req.actual; - prefetch(buf); - - /* force pagesel */ - net2272_ep_read(ep, EP_STAT0); - - max = (net2272_ep_read(ep, EP_AVAIL1) << 8) | - (net2272_ep_read(ep, EP_AVAIL0)); - - if (max < ep->ep.maxpacket) - max = (net2272_ep_read(ep, EP_AVAIL1) << 8) - | (net2272_ep_read(ep, EP_AVAIL0)); - - count = net2272_write_packet(ep, buf, req, max); - /* see if we are done */ - if (req->req.length == req->req.actual) { - /* validate short or zlp packet */ - if (count < ep->ep.maxpacket) - set_fifo_bytecount(ep, 0); - net2272_done(ep, req, 0); - - if (!list_empty(&ep->queue)) { - req = list_entry(ep->queue.next, - struct net2272_request, - queue); - status = net2272_kick_dma(ep, req); - - if (status < 0) - if ((net2272_ep_read(ep, EP_STAT0) - & (1 << BUFFER_EMPTY))) - goto top; - } - return 1; - } - net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)); - } - return 0; -} - -static void -net2272_out_flush(struct net2272_ep *ep) -{ - ASSERT_OUT_NAKING(ep); - - net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT) - | (1 << DATA_PACKET_RECEIVED_INTERRUPT)); - net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); -} - -static int -net2272_read_packet(struct net2272_ep *ep, u8 *buf, - struct net2272_request *req, unsigned avail) -{ - u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA); - unsigned is_short; - u16 *bufp; - - req->req.actual += avail; - - dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", - ep->ep.name, req, avail, - (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0)); - - is_short = (avail < ep->ep.maxpacket); - - if (unlikely(avail == 0)) { - /* remove any zlp from the buffer */ - (void)readw(ep_data); - return is_short; - } - - /* Ensure we get the final byte */ - if (unlikely(avail % 2)) - avail++; - bufp = (u16 *)buf; - - do { - *bufp++ = readw(ep_data); - avail -= 2; - } while (avail); - - /* - * To avoid false endpoint available race condition must read - * ep stat0 twice in the case of a short transfer - */ - if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) - net2272_ep_read(ep, EP_STAT0); - - return is_short; -} - -static int -net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) -{ - u8 *buf; - unsigned is_short; - int count; - int tmp; - int cleanup = 0; - - dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n", - ep->ep.name, req->req.actual, req->req.length); - - top: - do { - buf = req->req.buf + req->req.actual; - prefetchw(buf); - - count = (net2272_ep_read(ep, EP_AVAIL1) << 8) - | net2272_ep_read(ep, EP_AVAIL0); - - net2272_ep_write(ep, EP_STAT0, - (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) | - (1 << DATA_PACKET_RECEIVED_INTERRUPT)); - - tmp = req->req.length - req->req.actual; - - if (count > tmp) { - if ((tmp % ep->ep.maxpacket) != 0) { - dev_err(ep->dev->dev, - "%s out fifo %d bytes, expected %d\n", - ep->ep.name, count, tmp); - cleanup = 1; - } - count = (tmp > 0) ? tmp : 0; - } - - is_short = net2272_read_packet(ep, buf, req, count); - - /* completion */ - if (unlikely(cleanup || is_short || - req->req.actual == req->req.length)) { - - if (cleanup) { - net2272_out_flush(ep); - net2272_done(ep, req, -EOVERFLOW); - } else - net2272_done(ep, req, 0); - - /* re-initialize endpoint transfer registers - * otherwise they may result in erroneous pre-validation - * for subsequent control reads - */ - if (unlikely(ep->num == 0)) { - net2272_ep_write(ep, EP_TRANSFER2, 0); - net2272_ep_write(ep, EP_TRANSFER1, 0); - net2272_ep_write(ep, EP_TRANSFER0, 0); - } - - if (!list_empty(&ep->queue)) { - int status; - - req = list_entry(ep->queue.next, - struct net2272_request, queue); - status = net2272_kick_dma(ep, req); - if ((status < 0) && - !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))) - goto top; - } - return 1; - } - } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY))); - - return 0; -} - -static void -net2272_pio_advance(struct net2272_ep *ep) -{ - struct net2272_request *req; - - if (unlikely(list_empty(&ep->queue))) - return; - - req = list_entry(ep->queue.next, struct net2272_request, queue); - (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); -} - -/* returns 0 on success, else negative errno */ -static int -net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf, - unsigned len, unsigned dir) -{ - dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n", - ep, buf, len, dir); - - /* The NET2272 only supports a single dma channel */ - if (dev->dma_busy) - return -EBUSY; - /* - * EP_TRANSFER (used to determine the number of bytes received - * in an OUT transfer) is 24 bits wide; don't ask for more than that. - */ - if ((dir == 1) && (len > 0x1000000)) - return -EINVAL; - - dev->dma_busy = 1; - - /* initialize platform's dma */ -#ifdef CONFIG_USB_PCI - /* NET2272 addr, buffer addr, length, etc. */ - switch (dev->dev_id) { - case PCI_DEVICE_ID_RDK1: - /* Setup PLX 9054 DMA mode */ - writel((1 << LOCAL_BUS_WIDTH) | - (1 << TA_READY_INPUT_ENABLE) | - (0 << LOCAL_BURST_ENABLE) | - (1 << DONE_INTERRUPT_ENABLE) | - (1 << LOCAL_ADDRESSING_MODE) | - (1 << DEMAND_MODE) | - (1 << DMA_EOT_ENABLE) | - (1 << FAST_SLOW_TERMINATE_MODE_SELECT) | - (1 << DMA_CHANNEL_INTERRUPT_SELECT), - dev->rdk1.plx9054_base_addr + DMAMODE0); - - writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0); - writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0); - writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0); - writel((dir << DIRECTION_OF_TRANSFER) | - (1 << INTERRUPT_AFTER_TERMINAL_COUNT), - dev->rdk1.plx9054_base_addr + DMADPR0); - writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) | - readl(dev->rdk1.plx9054_base_addr + INTCSR), - dev->rdk1.plx9054_base_addr + INTCSR); - - break; - } -#endif - - net2272_write(dev, DMAREQ, - (0 << DMA_BUFFER_VALID) | - (1 << DMA_REQUEST_ENABLE) | - (1 << DMA_CONTROL_DACK) | - (dev->dma_eot_polarity << EOT_POLARITY) | - (dev->dma_dack_polarity << DACK_POLARITY) | - (dev->dma_dreq_polarity << DREQ_POLARITY) | - ((ep >> 1) << DMA_ENDPOINT_SELECT)); - - (void) net2272_read(dev, SCRATCH); - - return 0; -} - -static void -net2272_start_dma(struct net2272 *dev) -{ - /* start platform's dma controller */ -#ifdef CONFIG_USB_PCI - switch (dev->dev_id) { - case PCI_DEVICE_ID_RDK1: - writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START), - dev->rdk1.plx9054_base_addr + DMACSR0); - break; - } -#endif -} - -/* returns 0 on success, else negative errno */ -static int -net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) -{ - unsigned size; - u8 tmp; - - if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma) - return -EINVAL; - - /* don't use dma for odd-length transfers - * otherwise, we'd need to deal with the last byte with pio - */ - if (req->req.length & 1) - return -EINVAL; - - dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", - ep->ep.name, req, (unsigned long long) req->req.dma); - - net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS); - - /* The NET2272 can only use DMA on one endpoint at a time */ - if (ep->dev->dma_busy) - return -EBUSY; - - /* Make sure we only DMA an even number of bytes (we'll use - * pio to complete the transfer) - */ - size = req->req.length; - size &= ~1; - - /* device-to-host transfer */ - if (ep->is_in) { - /* initialize platform's dma controller */ - if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) - /* unable to obtain DMA channel; return error and use pio mode */ - return -EBUSY; - req->req.actual += size; - - /* host-to-device transfer */ - } else { - tmp = net2272_ep_read(ep, EP_STAT0); - - /* initialize platform's dma controller */ - if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) - /* unable to obtain DMA channel; return error and use pio mode */ - return -EBUSY; - - if (!(tmp & (1 << BUFFER_EMPTY))) - ep->not_empty = 1; - else - ep->not_empty = 0; - - - /* allow the endpoint's buffer to fill */ - net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); - - /* this transfer completed and data's already in the fifo - * return error so pio gets used. - */ - if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) { - - /* deassert dreq */ - net2272_write(ep->dev, DMAREQ, - (0 << DMA_BUFFER_VALID) | - (0 << DMA_REQUEST_ENABLE) | - (1 << DMA_CONTROL_DACK) | - (ep->dev->dma_eot_polarity << EOT_POLARITY) | - (ep->dev->dma_dack_polarity << DACK_POLARITY) | - (ep->dev->dma_dreq_polarity << DREQ_POLARITY) | - ((ep->num >> 1) << DMA_ENDPOINT_SELECT)); - - return -EBUSY; - } - } - - /* Don't use per-packet interrupts: use dma interrupts only */ - net2272_ep_write(ep, EP_IRQENB, 0); - - net2272_start_dma(ep->dev); - - return 0; -} - -static void net2272_cancel_dma(struct net2272 *dev) -{ -#ifdef CONFIG_USB_PCI - switch (dev->dev_id) { - case PCI_DEVICE_ID_RDK1: - writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0); - writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0); - while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) & - (1 << CHANNEL_DONE))) - continue; /* wait for dma to stabalize */ - - /* dma abort generates an interrupt */ - writeb(1 << CHANNEL_CLEAR_INTERRUPT, - dev->rdk1.plx9054_base_addr + DMACSR0); - break; - } -#endif - - dev->dma_busy = 0; -} - -/*---------------------------------------------------------------------------*/ - -static int -net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) -{ - struct net2272_request *req; - struct net2272_ep *ep; - struct net2272 *dev; - unsigned long flags; - int status = -1; - u8 s; - - req = container_of(_req, struct net2272_request, req); - if (!_req || !_req->complete || !_req->buf - || !list_empty(&req->queue)) - return -EINVAL; - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || (!ep->desc && ep->num != 0)) - return -EINVAL; - dev = ep->dev; - if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - /* set up dma mapping in case the caller didn't */ - if (use_dma && ep->dma) { - status = usb_gadget_map_request(&dev->gadget, _req, - ep->is_in); - if (status) - return status; - } - - dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", - _ep->name, _req, _req->length, _req->buf, - (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero"); - - spin_lock_irqsave(&dev->lock, flags); - - _req->status = -EINPROGRESS; - _req->actual = 0; - - /* kickstart this i/o queue? */ - if (list_empty(&ep->queue) && !ep->stopped) { - /* maybe there's no control data, just status ack */ - if (ep->num == 0 && _req->length == 0) { - net2272_done(ep, req, 0); - dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name); - goto done; - } - - /* Return zlp, don't let it block subsequent packets */ - s = net2272_ep_read(ep, EP_STAT0); - if (s & (1 << BUFFER_EMPTY)) { - /* Buffer is empty check for a blocking zlp, handle it */ - if ((s & (1 << NAK_OUT_PACKETS)) && - net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) { - dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n"); - /* - * Request is going to terminate with a short packet ... - * hope the client is ready for it! - */ - status = net2272_read_fifo(ep, req); - /* clear short packet naking */ - net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS)); - goto done; - } - } - - /* try dma first */ - status = net2272_kick_dma(ep, req); - - if (status < 0) { - /* dma failed (most likely in use by another endpoint) - * fallback to pio - */ - status = 0; - - if (ep->is_in) - status = net2272_write_fifo(ep, req); - else { - s = net2272_ep_read(ep, EP_STAT0); - if ((s & (1 << BUFFER_EMPTY)) == 0) - status = net2272_read_fifo(ep, req); - } - - if (unlikely(status != 0)) { - if (status > 0) - status = 0; - req = NULL; - } - } - } - if (likely(req)) - list_add_tail(&req->queue, &ep->queue); - - if (likely(!list_empty(&ep->queue))) - net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS); - done: - spin_unlock_irqrestore(&dev->lock, flags); - - return 0; -} - -/* dequeue ALL requests */ -static void -net2272_dequeue_all(struct net2272_ep *ep) -{ - struct net2272_request *req; - - /* called with spinlock held */ - ep->stopped = 1; - - while (!list_empty(&ep->queue)) { - req = list_entry(ep->queue.next, - struct net2272_request, - queue); - net2272_done(ep, req, -ESHUTDOWN); - } -} - -/* dequeue JUST ONE request */ -static int -net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req) -{ - struct net2272_ep *ep; - struct net2272_request *req = NULL, *iter; - unsigned long flags; - int stopped; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || (!ep->desc && ep->num != 0) || !_req) - return -EINVAL; - - spin_lock_irqsave(&ep->dev->lock, flags); - stopped = ep->stopped; - ep->stopped = 1; - - /* make sure it's still queued on this endpoint */ - list_for_each_entry(iter, &ep->queue, queue) { - if (&iter->req != _req) - continue; - req = iter; - break; - } - if (!req) { - ep->stopped = stopped; - spin_unlock_irqrestore(&ep->dev->lock, flags); - return -EINVAL; - } - - /* queue head may be partially complete */ - if (ep->queue.next == &req->queue) { - dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name); - net2272_done(ep, req, -ECONNRESET); - } - ep->stopped = stopped; - - spin_unlock_irqrestore(&ep->dev->lock, flags); - return 0; -} - -/*---------------------------------------------------------------------------*/ - -static int -net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged) -{ - struct net2272_ep *ep; - unsigned long flags; - int ret = 0; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || (!ep->desc && ep->num != 0)) - return -EINVAL; - if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc)) - return -EINVAL; - - spin_lock_irqsave(&ep->dev->lock, flags); - if (!list_empty(&ep->queue)) - ret = -EAGAIN; - else if (ep->is_in && value && net2272_fifo_status(_ep) != 0) - ret = -EAGAIN; - else { - dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name, - value ? "set" : "clear", - wedged ? "wedge" : "halt"); - /* set/clear */ - if (value) { - if (ep->num == 0) - ep->dev->protocol_stall = 1; - else - set_halt(ep); - if (wedged) - ep->wedged = 1; - } else { - clear_halt(ep); - ep->wedged = 0; - } - } - spin_unlock_irqrestore(&ep->dev->lock, flags); - - return ret; -} - -static int -net2272_set_halt(struct usb_ep *_ep, int value) -{ - return net2272_set_halt_and_wedge(_ep, value, 0); -} - -static int -net2272_set_wedge(struct usb_ep *_ep) -{ - if (!_ep || _ep->name == ep0name) - return -EINVAL; - return net2272_set_halt_and_wedge(_ep, 1, 1); -} - -static int -net2272_fifo_status(struct usb_ep *_ep) -{ - struct net2272_ep *ep; - u16 avail; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || (!ep->desc && ep->num != 0)) - return -ENODEV; - if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) - return -ESHUTDOWN; - - avail = net2272_ep_read(ep, EP_AVAIL1) << 8; - avail |= net2272_ep_read(ep, EP_AVAIL0); - if (avail > ep->fifo_size) - return -EOVERFLOW; - if (ep->is_in) - avail = ep->fifo_size - avail; - return avail; -} - -static void -net2272_fifo_flush(struct usb_ep *_ep) -{ - struct net2272_ep *ep; - - ep = container_of(_ep, struct net2272_ep, ep); - if (!_ep || (!ep->desc && ep->num != 0)) - return; - if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) - return; - - net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); -} - -static const struct usb_ep_ops net2272_ep_ops = { - .enable = net2272_enable, - .disable = net2272_disable, - - .alloc_request = net2272_alloc_request, - .free_request = net2272_free_request, - - .queue = net2272_queue, - .dequeue = net2272_dequeue, - - .set_halt = net2272_set_halt, - .set_wedge = net2272_set_wedge, - .fifo_status = net2272_fifo_status, - .fifo_flush = net2272_fifo_flush, -}; - -/*---------------------------------------------------------------------------*/ - -static int -net2272_get_frame(struct usb_gadget *_gadget) -{ - struct net2272 *dev; - unsigned long flags; - u16 ret; - - if (!_gadget) - return -ENODEV; - dev = container_of(_gadget, struct net2272, gadget); - spin_lock_irqsave(&dev->lock, flags); - - ret = net2272_read(dev, FRAME1) << 8; - ret |= net2272_read(dev, FRAME0); - - spin_unlock_irqrestore(&dev->lock, flags); - return ret; -} - -static int -net2272_wakeup(struct usb_gadget *_gadget) -{ - struct net2272 *dev; - u8 tmp; - unsigned long flags; - - if (!_gadget) - return 0; - dev = container_of(_gadget, struct net2272, gadget); - - spin_lock_irqsave(&dev->lock, flags); - tmp = net2272_read(dev, USBCTL0); - if (tmp & (1 << IO_WAKEUP_ENABLE)) - net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME)); - - spin_unlock_irqrestore(&dev->lock, flags); - - return 0; -} - -static int -net2272_set_selfpowered(struct usb_gadget *_gadget, int value) -{ - if (!_gadget) - return -ENODEV; - - _gadget->is_selfpowered = (value != 0); - - return 0; -} - -static int -net2272_pullup(struct usb_gadget *_gadget, int is_on) -{ - struct net2272 *dev; - u8 tmp; - unsigned long flags; - - if (!_gadget) - return -ENODEV; - dev = container_of(_gadget, struct net2272, gadget); - - spin_lock_irqsave(&dev->lock, flags); - tmp = net2272_read(dev, USBCTL0); - dev->softconnect = (is_on != 0); - if (is_on) - tmp |= (1 << USB_DETECT_ENABLE); - else - tmp &= ~(1 << USB_DETECT_ENABLE); - net2272_write(dev, USBCTL0, tmp); - spin_unlock_irqrestore(&dev->lock, flags); - - return 0; -} - -static int net2272_start(struct usb_gadget *_gadget, - struct usb_gadget_driver *driver); -static int net2272_stop(struct usb_gadget *_gadget); -static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable); - -static const struct usb_gadget_ops net2272_ops = { - .get_frame = net2272_get_frame, - .wakeup = net2272_wakeup, - .set_selfpowered = net2272_set_selfpowered, - .pullup = net2272_pullup, - .udc_start = net2272_start, - .udc_stop = net2272_stop, - .udc_async_callbacks = net2272_async_callbacks, -}; - -/*---------------------------------------------------------------------------*/ - -static ssize_t -registers_show(struct device *_dev, struct device_attribute *attr, char *buf) -{ - struct net2272 *dev; - char *next; - unsigned size, t; - unsigned long flags; - u8 t1, t2; - int i; - const char *s; - - dev = dev_get_drvdata(_dev); - next = buf; - size = PAGE_SIZE; - spin_lock_irqsave(&dev->lock, flags); - - /* Main Control Registers */ - t = scnprintf(next, size, "%s version %s," - "chiprev %02x, locctl %02x\n" - "irqenb0 %02x irqenb1 %02x " - "irqstat0 %02x irqstat1 %02x\n", - driver_name, driver_vers, dev->chiprev, - net2272_read(dev, LOCCTL), - net2272_read(dev, IRQENB0), - net2272_read(dev, IRQENB1), - net2272_read(dev, IRQSTAT0), - net2272_read(dev, IRQSTAT1)); - size -= t; - next += t; - - /* DMA */ - t1 = net2272_read(dev, DMAREQ); - t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n", - t1, ep_name[(t1 & 0x01) + 1], - t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "", - t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "", - t1 & (1 << DMA_REQUEST) ? "req " : "", - t1 & (1 << DMA_BUFFER_VALID) ? "valid " : ""); - size -= t; - next += t; - - /* USB Control Registers */ - t1 = net2272_read(dev, USBCTL1); - if (t1 & (1 << VBUS_PIN)) { - if (t1 & (1 << USB_HIGH_SPEED)) - s = "high speed"; - else if (dev->gadget.speed == USB_SPEED_UNKNOWN) - s = "powered"; - else - s = "full speed"; - } else - s = "not attached"; - t = scnprintf(next, size, - "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n", - net2272_read(dev, USBCTL0), t1, - net2272_read(dev, OURADDR), s); - size -= t; - next += t; - - /* Endpoint Registers */ - for (i = 0; i < 4; ++i) { - struct net2272_ep *ep; - - ep = &dev->ep[i]; - if (i && !ep->desc) - continue; - - t1 = net2272_ep_read(ep, EP_CFG); - t2 = net2272_ep_read(ep, EP_RSPSET); - t = scnprintf(next, size, - "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s" - "irqenb %02x\n", - ep->ep.name, t1, t2, - (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "", - (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "", - (t2 & (1 << AUTOVALIDATE)) ? "auto " : "", - (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "", - (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "", - (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "", - (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ", - (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "", - net2272_ep_read(ep, EP_IRQENB)); - size -= t; - next += t; - - t = scnprintf(next, size, - "\tstat0 %02x stat1 %02x avail %04x " - "(ep%d%s-%s)%s\n", - net2272_ep_read(ep, EP_STAT0), - net2272_ep_read(ep, EP_STAT1), - (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0), - t1 & 0x0f, - ep->is_in ? "in" : "out", - type_string(t1 >> 5), - ep->stopped ? "*" : ""); - size -= t; - next += t; - - t = scnprintf(next, size, - "\tep_transfer %06x\n", - ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) | - ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) | - ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff))); - size -= t; - next += t; - - t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03; - t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03; - t = scnprintf(next, size, - "\tbuf-a %s buf-b %s\n", - buf_state_string(t1), - buf_state_string(t2)); - size -= t; - next += t; - } - - spin_unlock_irqrestore(&dev->lock, flags); - - return PAGE_SIZE - size; -} -static DEVICE_ATTR_RO(registers); - -/*---------------------------------------------------------------------------*/ - -static void -net2272_set_fifo_mode(struct net2272 *dev, int mode) -{ - u8 tmp; - - tmp = net2272_read(dev, LOCCTL) & 0x3f; - tmp |= (mode << 6); - net2272_write(dev, LOCCTL, tmp); - - INIT_LIST_HEAD(&dev->gadget.ep_list); - - /* always ep-a, ep-c ... maybe not ep-b */ - list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list); - - switch (mode) { - case 0: - list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); - dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512; - break; - case 1: - list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); - dev->ep[1].fifo_size = 1024; - dev->ep[2].fifo_size = 512; - break; - case 2: - list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list); - dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024; - break; - case 3: - dev->ep[1].fifo_size = 1024; - break; - } - - /* ep-c is always 2 512 byte buffers */ - list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list); - dev->ep[3].fifo_size = 512; -} - -/*---------------------------------------------------------------------------*/ - -static void -net2272_usb_reset(struct net2272 *dev) -{ - dev->gadget.speed = USB_SPEED_UNKNOWN; - - net2272_cancel_dma(dev); - - net2272_write(dev, IRQENB0, 0); - net2272_write(dev, IRQENB1, 0); - - /* clear irq state */ - net2272_write(dev, IRQSTAT0, 0xff); - net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT)); - - net2272_write(dev, DMAREQ, - (0 << DMA_BUFFER_VALID) | - (0 << DMA_REQUEST_ENABLE) | - (1 << DMA_CONTROL_DACK) | - (dev->dma_eot_polarity << EOT_POLARITY) | - (dev->dma_dack_polarity << DACK_POLARITY) | - (dev->dma_dreq_polarity << DREQ_POLARITY) | - ((dma_ep >> 1) << DMA_ENDPOINT_SELECT)); - - net2272_cancel_dma(dev); - net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0); - - /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping - * note that the higher level gadget drivers are expected to convert data to little endian. - * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here - */ - net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH)); - net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE)); -} - -static void -net2272_usb_reinit(struct net2272 *dev) -{ - int i; - - /* basic endpoint init */ - for (i = 0; i < 4; ++i) { - struct net2272_ep *ep = &dev->ep[i]; - - ep->ep.name = ep_name[i]; - ep->dev = dev; - ep->num = i; - ep->not_empty = 0; - - if (use_dma && ep->num == dma_ep) - ep->dma = 1; - - if (i > 0 && i <= 3) - ep->fifo_size = 512; - else - ep->fifo_size = 64; - net2272_ep_reset(ep); - - if (i == 0) { - ep->ep.caps.type_control = true; - } else { - ep->ep.caps.type_iso = true; - ep->ep.caps.type_bulk = true; - ep->ep.caps.type_int = true; - } - - ep->ep.caps.dir_in = true; - ep->ep.caps.dir_out = true; - } - usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64); - - dev->gadget.ep0 = &dev->ep[0].ep; - dev->ep[0].stopped = 0; - INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); -} - -static void -net2272_ep0_start(struct net2272 *dev) -{ - struct net2272_ep *ep0 = &dev->ep[0]; - - net2272_ep_write(ep0, EP_RSPSET, - (1 << NAK_OUT_PACKETS_MODE) | - (1 << ALT_NAK_OUT_PACKETS)); - net2272_ep_write(ep0, EP_RSPCLR, - (1 << HIDE_STATUS_PHASE) | - (1 << CONTROL_STATUS_PHASE_HANDSHAKE)); - net2272_write(dev, USBCTL0, - (dev->softconnect << USB_DETECT_ENABLE) | - (1 << USB_ROOT_PORT_WAKEUP_ENABLE) | - (1 << IO_WAKEUP_ENABLE)); - net2272_write(dev, IRQENB0, - (1 << SETUP_PACKET_INTERRUPT_ENABLE) | - (1 << ENDPOINT_0_INTERRUPT_ENABLE) | - (1 << DMA_DONE_INTERRUPT_ENABLE)); - net2272_write(dev, IRQENB1, - (1 << VBUS_INTERRUPT_ENABLE) | - (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) | - (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE)); -} - -/* when a driver is successfully registered, it will receive - * control requests including set_configuration(), which enables - * non-control requests. then usb traffic follows until a - * disconnect is reported. then a host may connect again, or - * the driver might get unbound. - */ -static int net2272_start(struct usb_gadget *_gadget, - struct usb_gadget_driver *driver) -{ - struct net2272 *dev; - unsigned i; - - if (!driver || !driver->setup || - driver->max_speed != USB_SPEED_HIGH) - return -EINVAL; - - dev = container_of(_gadget, struct net2272, gadget); - - for (i = 0; i < 4; ++i) - dev->ep[i].irqs = 0; - /* hook up the driver ... */ - dev->softconnect = 1; - dev->driver = driver; - - /* ... then enable host detection and ep0; and we're ready - * for set_configuration as well as eventual disconnect. - */ - net2272_ep0_start(dev); - - return 0; -} - -static void -stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) -{ - int i; - - /* don't disconnect if it's not connected */ - if (dev->gadget.speed == USB_SPEED_UNKNOWN) - driver = NULL; - - /* stop hardware; prevent new request submissions; - * and kill any outstanding requests. - */ - net2272_usb_reset(dev); - for (i = 0; i < 4; ++i) - net2272_dequeue_all(&dev->ep[i]); - - /* report disconnect; the driver is already quiesced */ - if (dev->async_callbacks && driver) { - spin_unlock(&dev->lock); - driver->disconnect(&dev->gadget); - spin_lock(&dev->lock); - } - - net2272_usb_reinit(dev); -} - -static int net2272_stop(struct usb_gadget *_gadget) -{ - struct net2272 *dev; - unsigned long flags; - - dev = container_of(_gadget, struct net2272, gadget); - - spin_lock_irqsave(&dev->lock, flags); - stop_activity(dev, NULL); - spin_unlock_irqrestore(&dev->lock, flags); - - dev->driver = NULL; - - return 0; -} - -static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable) -{ - struct net2272 *dev = container_of(_gadget, struct net2272, gadget); - - spin_lock_irq(&dev->lock); - dev->async_callbacks = enable; - spin_unlock_irq(&dev->lock); -} - -/*---------------------------------------------------------------------------*/ -/* handle ep-a/ep-b dma completions */ -static void -net2272_handle_dma(struct net2272_ep *ep) -{ - struct net2272_request *req; - unsigned len; - int status; - - if (!list_empty(&ep->queue)) - req = list_entry(ep->queue.next, - struct net2272_request, queue); - else - req = NULL; - - dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); - - /* Ensure DREQ is de-asserted */ - net2272_write(ep->dev, DMAREQ, - (0 << DMA_BUFFER_VALID) - | (0 << DMA_REQUEST_ENABLE) - | (1 << DMA_CONTROL_DACK) - | (ep->dev->dma_eot_polarity << EOT_POLARITY) - | (ep->dev->dma_dack_polarity << DACK_POLARITY) - | (ep->dev->dma_dreq_polarity << DREQ_POLARITY) - | (ep->dma << DMA_ENDPOINT_SELECT)); - - ep->dev->dma_busy = 0; - - net2272_ep_write(ep, EP_IRQENB, - (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) - | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) - | net2272_ep_read(ep, EP_IRQENB)); - - /* device-to-host transfer completed */ - if (ep->is_in) { - /* validate a short packet or zlp if necessary */ - if ((req->req.length % ep->ep.maxpacket != 0) || - req->req.zero) - set_fifo_bytecount(ep, 0); - - net2272_done(ep, req, 0); - if (!list_empty(&ep->queue)) { - req = list_entry(ep->queue.next, - struct net2272_request, queue); - status = net2272_kick_dma(ep, req); - if (status < 0) - net2272_pio_advance(ep); - } - - /* host-to-device transfer completed */ - } else { - /* terminated with a short packet? */ - if (net2272_read(ep->dev, IRQSTAT0) & - (1 << DMA_DONE_INTERRUPT)) { - /* abort system dma */ - net2272_cancel_dma(ep->dev); - } - - /* EP_TRANSFER will contain the number of bytes - * actually received. - * NOTE: There is no overflow detection on EP_TRANSFER: - * We can't deal with transfers larger than 2^24 bytes! - */ - len = (net2272_ep_read(ep, EP_TRANSFER2) << 16) - | (net2272_ep_read(ep, EP_TRANSFER1) << 8) - | (net2272_ep_read(ep, EP_TRANSFER0)); - - if (ep->not_empty) - len += 4; - - req->req.actual += len; - - /* get any remaining data */ - net2272_pio_advance(ep); - } -} - -/*---------------------------------------------------------------------------*/ - -static void -net2272_handle_ep(struct net2272_ep *ep) -{ - struct net2272_request *req; - u8 stat0, stat1; - - if (!list_empty(&ep->queue)) - req = list_entry(ep->queue.next, - struct net2272_request, queue); - else - req = NULL; - - /* ack all, and handle what we care about */ - stat0 = net2272_ep_read(ep, EP_STAT0); - stat1 = net2272_ep_read(ep, EP_STAT1); - ep->irqs++; - - dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", - ep->ep.name, stat0, stat1, req ? &req->req : NULL); - - net2272_ep_write(ep, EP_STAT0, stat0 & - ~((1 << NAK_OUT_PACKETS) - | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))); - net2272_ep_write(ep, EP_STAT1, stat1); - - /* data packet(s) received (in the fifo, OUT) - * direction must be validated, otherwise control read status phase - * could be interpreted as a valid packet - */ - if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT))) - net2272_pio_advance(ep); - /* data packet(s) transmitted (IN) */ - else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)) - net2272_pio_advance(ep); -} - -static struct net2272_ep * -net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex) -{ - struct net2272_ep *ep; - - if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) - return &dev->ep[0]; - - list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) { - u8 bEndpointAddress; - - if (!ep->desc) - continue; - bEndpointAddress = ep->desc->bEndpointAddress; - if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) - continue; - if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f)) - return ep; - } - return NULL; -} - -/* - * USB Test Packet: - * JKJKJKJK * 9 - * JJKKJJKK * 8 - * JJJJKKKK * 8 - * JJJJJJJKKKKKKK * 8 - * JJJJJJJK * 8 - * {JKKKKKKK * 10}, JK - */ -static const u8 net2272_test_packet[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, - 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, - 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, - 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E -}; - -static void -net2272_set_test_mode(struct net2272 *dev, int mode) -{ - int i; - - /* Disable all net2272 interrupts: - * Nothing but a power cycle should stop the test. - */ - net2272_write(dev, IRQENB0, 0x00); - net2272_write(dev, IRQENB1, 0x00); - - /* Force tranceiver to high-speed */ - net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED); - - net2272_write(dev, PAGESEL, 0); - net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT); - net2272_write(dev, EP_RSPCLR, - (1 << CONTROL_STATUS_PHASE_HANDSHAKE) - | (1 << HIDE_STATUS_PHASE)); - net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION); - net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH); - - /* wait for status phase to complete */ - while (!(net2272_read(dev, EP_STAT0) & - (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))) - ; - - /* Enable test mode */ - net2272_write(dev, USBTEST, mode); - - /* load test packet */ - if (mode == USB_TEST_PACKET) { - /* switch to 8 bit mode */ - net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) & - ~(1 << DATA_WIDTH)); - - for (i = 0; i < sizeof(net2272_test_packet); ++i) - net2272_write(dev, EP_DATA, net2272_test_packet[i]); - - /* Validate test packet */ - net2272_write(dev, EP_TRANSFER0, 0); - } -} - -static void -net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat) -{ - struct net2272_ep *ep; - u8 num, scratch; - - /* starting a control request? */ - if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) { - union { - u8 raw[8]; - struct usb_ctrlrequest r; - } u; - int tmp = 0; - struct net2272_request *req; - - if (dev->gadget.speed == USB_SPEED_UNKNOWN) { - if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED)) - dev->gadget.speed = USB_SPEED_HIGH; - else - dev->gadget.speed = USB_SPEED_FULL; - dev_dbg(dev->dev, "%s\n", - usb_speed_string(dev->gadget.speed)); - } - - ep = &dev->ep[0]; - ep->irqs++; - - /* make sure any leftover interrupt state is cleared */ - stat &= ~(1 << ENDPOINT_0_INTERRUPT); - while (!list_empty(&ep->queue)) { - req = list_entry(ep->queue.next, - struct net2272_request, queue); - net2272_done(ep, req, - (req->req.actual == req->req.length) ? 0 : -EPROTO); - } - ep->stopped = 0; - dev->protocol_stall = 0; - net2272_ep_write(ep, EP_STAT0, - (1 << DATA_IN_TOKEN_INTERRUPT) - | (1 << DATA_OUT_TOKEN_INTERRUPT) - | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT) - | (1 << DATA_PACKET_RECEIVED_INTERRUPT) - | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)); - net2272_ep_write(ep, EP_STAT1, - (1 << TIMEOUT) - | (1 << USB_OUT_ACK_SENT) - | (1 << USB_OUT_NAK_SENT) - | (1 << USB_IN_ACK_RCVD) - | (1 << USB_IN_NAK_SENT) - | (1 << USB_STALL_SENT) - | (1 << LOCAL_OUT_ZLP)); - - /* - * Ensure Control Read pre-validation setting is beyond maximum size - * - Control Writes can leave non-zero values in EP_TRANSFER. If - * an EP0 transfer following the Control Write is a Control Read, - * the NET2272 sees the non-zero EP_TRANSFER as an unexpected - * pre-validation count. - * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures - * the pre-validation count cannot cause an unexpected validatation - */ - net2272_write(dev, PAGESEL, 0); - net2272_write(dev, EP_TRANSFER2, 0xff); - net2272_write(dev, EP_TRANSFER1, 0xff); - net2272_write(dev, EP_TRANSFER0, 0xff); - - u.raw[0] = net2272_read(dev, SETUP0); - u.raw[1] = net2272_read(dev, SETUP1); - u.raw[2] = net2272_read(dev, SETUP2); - u.raw[3] = net2272_read(dev, SETUP3); - u.raw[4] = net2272_read(dev, SETUP4); - u.raw[5] = net2272_read(dev, SETUP5); - u.raw[6] = net2272_read(dev, SETUP6); - u.raw[7] = net2272_read(dev, SETUP7); - /* - * If you have a big endian cpu make sure le16_to_cpus - * performs the proper byte swapping here... - */ - le16_to_cpus(&u.r.wValue); - le16_to_cpus(&u.r.wIndex); - le16_to_cpus(&u.r.wLength); - - /* ack the irq */ - net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT); - stat ^= (1 << SETUP_PACKET_INTERRUPT); - - /* watch control traffic at the token level, and force - * synchronization before letting the status phase happen. - */ - ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0; - if (ep->is_in) { - scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE) - | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) - | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); - stop_out_naking(ep); - } else - scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) - | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE) - | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE); - net2272_ep_write(ep, EP_IRQENB, scratch); - - if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) - goto delegate; - switch (u.r.bRequest) { - case USB_REQ_GET_STATUS: { - struct net2272_ep *e; - u16 status = 0; - - switch (u.r.bRequestType & USB_RECIP_MASK) { - case USB_RECIP_ENDPOINT: - e = net2272_get_ep_by_addr(dev, u.r.wIndex); - if (!e || u.r.wLength > 2) - goto do_stall; - if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT)) - status = cpu_to_le16(1); - else - status = cpu_to_le16(0); - - /* don't bother with a request object! */ - net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); - writew(status, net2272_reg_addr(dev, EP_DATA)); - set_fifo_bytecount(&dev->ep[0], 0); - allow_status(ep); - dev_vdbg(dev->dev, "%s stat %02x\n", - ep->ep.name, status); - goto next_endpoints; - case USB_RECIP_DEVICE: - if (u.r.wLength > 2) - goto do_stall; - if (dev->gadget.is_selfpowered) - status = (1 << USB_DEVICE_SELF_POWERED); - - /* don't bother with a request object! */ - net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); - writew(status, net2272_reg_addr(dev, EP_DATA)); - set_fifo_bytecount(&dev->ep[0], 0); - allow_status(ep); - dev_vdbg(dev->dev, "device stat %02x\n", status); - goto next_endpoints; - case USB_RECIP_INTERFACE: - if (u.r.wLength > 2) - goto do_stall; - - /* don't bother with a request object! */ - net2272_ep_write(&dev->ep[0], EP_IRQENB, 0); - writew(status, net2272_reg_addr(dev, EP_DATA)); - set_fifo_bytecount(&dev->ep[0], 0); - allow_status(ep); - dev_vdbg(dev->dev, "interface status %02x\n", status); - goto next_endpoints; - } - - break; - } - case USB_REQ_CLEAR_FEATURE: { - struct net2272_ep *e; - - if (u.r.bRequestType != USB_RECIP_ENDPOINT) - goto delegate; - if (u.r.wValue != USB_ENDPOINT_HALT || - u.r.wLength != 0) - goto do_stall; - e = net2272_get_ep_by_addr(dev, u.r.wIndex); - if (!e) - goto do_stall; - if (e->wedged) { - dev_vdbg(dev->dev, "%s wedged, halt not cleared\n", - ep->ep.name); - } else { - dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name); - clear_halt(e); - } - allow_status(ep); - goto next_endpoints; - } - case USB_REQ_SET_FEATURE: { - struct net2272_ep *e; - - if (u.r.bRequestType == USB_RECIP_DEVICE) { - if (u.r.wIndex != NORMAL_OPERATION) - net2272_set_test_mode(dev, (u.r.wIndex >> 8)); - allow_status(ep); - dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex); - goto next_endpoints; - } else if (u.r.bRequestType != USB_RECIP_ENDPOINT) - goto delegate; - if (u.r.wValue != USB_ENDPOINT_HALT || - u.r.wLength != 0) - goto do_stall; - e = net2272_get_ep_by_addr(dev, u.r.wIndex); - if (!e) - goto do_stall; - set_halt(e); - allow_status(ep); - dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name); - goto next_endpoints; - } - case USB_REQ_SET_ADDRESS: { - net2272_write(dev, OURADDR, u.r.wValue & 0xff); - allow_status(ep); - break; - } - default: - delegate: - dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x " - "ep_cfg %08x\n", - u.r.bRequestType, u.r.bRequest, - u.r.wValue, u.r.wIndex, - net2272_ep_read(ep, EP_CFG)); - if (dev->async_callbacks) { - spin_unlock(&dev->lock); - tmp = dev->driver->setup(&dev->gadget, &u.r); - spin_lock(&dev->lock); - } - } - - /* stall ep0 on error */ - if (tmp < 0) { - do_stall: - dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", - u.r.bRequestType, u.r.bRequest, tmp); - dev->protocol_stall = 1; - } - /* endpoint dma irq? */ - } else if (stat & (1 << DMA_DONE_INTERRUPT)) { - net2272_cancel_dma(dev); - net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT); - stat &= ~(1 << DMA_DONE_INTERRUPT); - num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT)) - ? 2 : 1; - - ep = &dev->ep[num]; - net2272_handle_dma(ep); - } - - next_endpoints: - /* endpoint data irq? */ - scratch = stat & 0x0f; - stat &= ~0x0f; - for (num = 0; scratch; num++) { - u8 t; - - /* does this endpoint's FIFO and queue need tending? */ - t = 1 << num; - if ((scratch & t) == 0) - continue; - scratch ^= t; - - ep = &dev->ep[num]; - net2272_handle_ep(ep); - } - - /* some interrupts we can just ignore */ - stat &= ~(1 << SOF_INTERRUPT); - - if (stat) - dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat); -} - -static void -net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat) -{ - u8 tmp, mask; - - /* after disconnect there's nothing else to do! */ - tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT); - mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED); - - if (stat & tmp) { - bool reset = false; - bool disconnect = false; - - /* - * Ignore disconnects and resets if the speed hasn't been set. - * VBUS can bounce and there's always an initial reset. - */ - net2272_write(dev, IRQSTAT1, tmp); - if (dev->gadget.speed != USB_SPEED_UNKNOWN) { - if ((stat & (1 << VBUS_INTERRUPT)) && - (net2272_read(dev, USBCTL1) & - (1 << VBUS_PIN)) == 0) { - disconnect = true; - dev_dbg(dev->dev, "disconnect %s\n", - dev->driver->driver.name); - } else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) && - (net2272_read(dev, USBCTL1) & mask) - == 0) { - reset = true; - dev_dbg(dev->dev, "reset %s\n", - dev->driver->driver.name); - } - - if (disconnect || reset) { - stop_activity(dev, dev->driver); - net2272_ep0_start(dev); - if (dev->async_callbacks) { - spin_unlock(&dev->lock); - if (reset) - usb_gadget_udc_reset(&dev->gadget, dev->driver); - else - (dev->driver->disconnect)(&dev->gadget); - spin_lock(&dev->lock); - } - return; - } - } - stat &= ~tmp; - - if (!stat) - return; - } - - tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT); - if (stat & tmp) { - net2272_write(dev, IRQSTAT1, tmp); - if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) { - if (dev->async_callbacks && dev->driver->suspend) - dev->driver->suspend(&dev->gadget); - if (!enable_suspend) { - stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT); - dev_dbg(dev->dev, "Suspend disabled, ignoring\n"); - } - } else { - if (dev->async_callbacks && dev->driver->resume) - dev->driver->resume(&dev->gadget); - } - stat &= ~tmp; - } - - /* clear any other status/irqs */ - if (stat) - net2272_write(dev, IRQSTAT1, stat); - - /* some status we can just ignore */ - stat &= ~((1 << CONTROL_STATUS_INTERRUPT) - | (1 << SUSPEND_REQUEST_INTERRUPT) - | (1 << RESUME_INTERRUPT)); - if (!stat) - return; - else - dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat); -} - -static irqreturn_t net2272_irq(int irq, void *_dev) -{ - struct net2272 *dev = _dev; -#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2) - u32 intcsr; -#endif -#if defined(PLX_PCI_RDK) - u8 dmareq; -#endif - spin_lock(&dev->lock); -#if defined(PLX_PCI_RDK) - intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); - - if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) { - writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE), - dev->rdk1.plx9054_base_addr + INTCSR); - net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); - net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); - intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR); - writel(intcsr | (1 << PCI_INTERRUPT_ENABLE), - dev->rdk1.plx9054_base_addr + INTCSR); - } - if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) { - writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), - dev->rdk1.plx9054_base_addr + DMACSR0); - - dmareq = net2272_read(dev, DMAREQ); - if (dmareq & 0x01) - net2272_handle_dma(&dev->ep[2]); - else - net2272_handle_dma(&dev->ep[1]); - } -#endif -#if defined(PLX_PCI_RDK2) - /* see if PCI int for us by checking irqstat */ - intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); - if (!(intcsr & (1 << NET2272_PCI_IRQ))) { - spin_unlock(&dev->lock); - return IRQ_NONE; - } - /* check dma interrupts */ -#endif - /* Platform/device interrupt handler */ -#if !defined(PLX_PCI_RDK) - net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1)); - net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0)); -#endif - spin_unlock(&dev->lock); - - return IRQ_HANDLED; -} - -static int net2272_present(struct net2272 *dev) -{ - /* - * Quick test to see if CPU can communicate properly with the NET2272. - * Verifies connection using writes and reads to write/read and - * read-only registers. - * - * This routine is strongly recommended especially during early bring-up - * of new hardware, however for designs that do not apply Power On System - * Tests (POST) it may discarded (or perhaps minimized). - */ - unsigned int ii; - u8 val, refval; - - /* Verify NET2272 write/read SCRATCH register can write and read */ - refval = net2272_read(dev, SCRATCH); - for (ii = 0; ii < 0x100; ii += 7) { - net2272_write(dev, SCRATCH, ii); - val = net2272_read(dev, SCRATCH); - if (val != ii) { - dev_dbg(dev->dev, - "%s: write/read SCRATCH register test failed: " - "wrote:0x%2.2x, read:0x%2.2x\n", - __func__, ii, val); - return -EINVAL; - } - } - /* To be nice, we write the original SCRATCH value back: */ - net2272_write(dev, SCRATCH, refval); - - /* Verify NET2272 CHIPREV register is read-only: */ - refval = net2272_read(dev, CHIPREV_2272); - for (ii = 0; ii < 0x100; ii += 7) { - net2272_write(dev, CHIPREV_2272, ii); - val = net2272_read(dev, CHIPREV_2272); - if (val != refval) { - dev_dbg(dev->dev, - "%s: write/read CHIPREV register test failed: " - "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n", - __func__, ii, val, refval); - return -EINVAL; - } - } - - /* - * Verify NET2272's "NET2270 legacy revision" register - * - NET2272 has two revision registers. The NET2270 legacy revision - * register should read the same value, regardless of the NET2272 - * silicon revision. The legacy register applies to NET2270 - * firmware being applied to the NET2272. - */ - val = net2272_read(dev, CHIPREV_LEGACY); - if (val != NET2270_LEGACY_REV) { - /* - * Unexpected legacy revision value - * - Perhaps the chip is a NET2270? - */ - dev_dbg(dev->dev, - "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n" - " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n", - __func__, NET2270_LEGACY_REV, val); - return -EINVAL; - } - - /* - * Verify NET2272 silicon revision - * - This revision register is appropriate for the silicon version - * of the NET2272 - */ - val = net2272_read(dev, CHIPREV_2272); - switch (val) { - case CHIPREV_NET2272_R1: - /* - * NET2272 Rev 1 has DMA related errata: - * - Newer silicon (Rev 1A or better) required - */ - dev_dbg(dev->dev, - "%s: Rev 1 detected: newer silicon recommended for DMA support\n", - __func__); - break; - case CHIPREV_NET2272_R1A: - break; - default: - /* NET2272 silicon version *may* not work with this firmware */ - dev_dbg(dev->dev, - "%s: unexpected silicon revision register value: " - " CHIPREV_2272: 0x%2.2x\n", - __func__, val); - /* - * Return Success, even though the chip rev is not an expected value - * - Older, pre-built firmware can attempt to operate on newer silicon - * - Often, new silicon is perfectly compatible - */ - } - - /* Success: NET2272 checks out OK */ - return 0; -} - -static void -net2272_gadget_release(struct device *_dev) -{ - struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev); - - kfree(dev); -} - -/*---------------------------------------------------------------------------*/ - -static void -net2272_remove(struct net2272 *dev) -{ - if (dev->added) - usb_del_gadget(&dev->gadget); - free_irq(dev->irq, dev); - iounmap(dev->base_addr); - device_remove_file(dev->dev, &dev_attr_registers); - - dev_info(dev->dev, "unbind\n"); -} - -static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) -{ - struct net2272 *ret; - - if (!irq) { - dev_dbg(dev, "No IRQ!\n"); - return ERR_PTR(-ENODEV); - } - - /* alloc, and start init */ - ret = kzalloc(sizeof(*ret), GFP_KERNEL); - if (!ret) - return ERR_PTR(-ENOMEM); - - spin_lock_init(&ret->lock); - ret->irq = irq; - ret->dev = dev; - ret->gadget.ops = &net2272_ops; - ret->gadget.max_speed = USB_SPEED_HIGH; - - /* the "gadget" abstracts/virtualizes the controller */ - ret->gadget.name = driver_name; - usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release); - - return ret; -} - -static int -net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) -{ - int ret; - - /* See if there... */ - if (net2272_present(dev)) { - dev_warn(dev->dev, "2272 not found!\n"); - ret = -ENODEV; - goto err; - } - - net2272_usb_reset(dev); - net2272_usb_reinit(dev); - - ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev); - if (ret) { - dev_err(dev->dev, "request interrupt %i failed\n", dev->irq); - goto err; - } - - dev->chiprev = net2272_read(dev, CHIPREV_2272); - - /* done */ - dev_info(dev->dev, "%s\n", driver_desc); - dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n", - dev->irq, dev->base_addr, dev->chiprev, - dma_mode_string()); - dev_info(dev->dev, "version: %s\n", driver_vers); - - ret = device_create_file(dev->dev, &dev_attr_registers); - if (ret) - goto err_irq; - - ret = usb_add_gadget(&dev->gadget); - if (ret) - goto err_add_udc; - dev->added = 1; - - return 0; - -err_add_udc: - device_remove_file(dev->dev, &dev_attr_registers); - err_irq: - free_irq(dev->irq, dev); - err: - return ret; -} - -#ifdef CONFIG_USB_PCI - -/* - * wrap this driver around the specified device, but - * don't respond over USB until a gadget driver binds to us - */ - -static int -net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev) -{ - unsigned long resource, len, tmp; - void __iomem *mem_mapped_addr[4]; - int ret, i; - - /* - * BAR 0 holds PLX 9054 config registers - * BAR 1 is i/o memory; unused here - * BAR 2 holds EPLD config registers - * BAR 3 holds NET2272 registers - */ - - /* Find and map all address spaces */ - for (i = 0; i < 4; ++i) { - if (i == 1) - continue; /* BAR1 unused */ - - resource = pci_resource_start(pdev, i); - len = pci_resource_len(pdev, i); - - if (!request_mem_region(resource, len, driver_name)) { - dev_dbg(dev->dev, "controller already in use\n"); - ret = -EBUSY; - goto err; - } - - mem_mapped_addr[i] = ioremap(resource, len); - if (mem_mapped_addr[i] == NULL) { - release_mem_region(resource, len); - dev_dbg(dev->dev, "can't map memory\n"); - ret = -EFAULT; - goto err; - } - } - - dev->rdk1.plx9054_base_addr = mem_mapped_addr[0]; - dev->rdk1.epld_base_addr = mem_mapped_addr[2]; - dev->base_addr = mem_mapped_addr[3]; - - /* Set PLX 9054 bus width (16 bits) */ - tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1); - writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT, - dev->rdk1.plx9054_base_addr + LBRD1); - - /* Enable PLX 9054 Interrupts */ - writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) | - (1 << PCI_INTERRUPT_ENABLE) | - (1 << LOCAL_INTERRUPT_INPUT_ENABLE), - dev->rdk1.plx9054_base_addr + INTCSR); - - writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)), - dev->rdk1.plx9054_base_addr + DMACSR0); - - /* reset */ - writeb((1 << EPLD_DMA_ENABLE) | - (1 << DMA_CTL_DACK) | - (1 << DMA_TIMEOUT_ENABLE) | - (1 << USER) | - (0 << MPX_MODE) | - (1 << BUSWIDTH) | - (1 << NET2272_RESET), - dev->base_addr + EPLD_IO_CONTROL_REGISTER); - - mb(); - writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) & - ~(1 << NET2272_RESET), - dev->base_addr + EPLD_IO_CONTROL_REGISTER); - udelay(200); - - return 0; - - err: - while (--i >= 0) { - if (i == 1) - continue; /* BAR1 unused */ - iounmap(mem_mapped_addr[i]); - release_mem_region(pci_resource_start(pdev, i), - pci_resource_len(pdev, i)); - } - - return ret; -} - -static int -net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev) -{ - unsigned long resource, len; - void __iomem *mem_mapped_addr[2]; - int ret, i; - - /* - * BAR 0 holds FGPA config registers - * BAR 1 holds NET2272 registers - */ - - /* Find and map all address spaces, bar2-3 unused in rdk 2 */ - for (i = 0; i < 2; ++i) { - resource = pci_resource_start(pdev, i); - len = pci_resource_len(pdev, i); - - if (!request_mem_region(resource, len, driver_name)) { - dev_dbg(dev->dev, "controller already in use\n"); - ret = -EBUSY; - goto err; - } - - mem_mapped_addr[i] = ioremap(resource, len); - if (mem_mapped_addr[i] == NULL) { - release_mem_region(resource, len); - dev_dbg(dev->dev, "can't map memory\n"); - ret = -EFAULT; - goto err; - } - } - - dev->rdk2.fpga_base_addr = mem_mapped_addr[0]; - dev->base_addr = mem_mapped_addr[1]; - - mb(); - /* Set 2272 bus width (16 bits) and reset */ - writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); - udelay(200); - writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK); - /* Print fpga version number */ - dev_info(dev->dev, "RDK2 FPGA version %08x\n", - readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV)); - /* Enable FPGA Interrupts */ - writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB); - - return 0; - - err: - while (--i >= 0) { - iounmap(mem_mapped_addr[i]); - release_mem_region(pci_resource_start(pdev, i), - pci_resource_len(pdev, i)); - } - - return ret; -} - -static int -net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct net2272 *dev; - int ret; - - dev = net2272_probe_init(&pdev->dev, pdev->irq); - if (IS_ERR(dev)) - return PTR_ERR(dev); - dev->dev_id = pdev->device; - - if (pci_enable_device(pdev) < 0) { - ret = -ENODEV; - goto err_put; - } - - pci_set_master(pdev); - - switch (pdev->device) { - case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break; - case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break; - default: BUG(); - } - if (ret) - goto err_pci; - - ret = net2272_probe_fin(dev, 0); - if (ret) - goto err_pci; - - pci_set_drvdata(pdev, dev); - - return 0; - - err_pci: - pci_disable_device(pdev); - err_put: - usb_put_gadget(&dev->gadget); - - return ret; -} - -static void -net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev) -{ - int i; - - /* disable PLX 9054 interrupts */ - writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & - ~(1 << PCI_INTERRUPT_ENABLE), - dev->rdk1.plx9054_base_addr + INTCSR); - - /* clean up resources allocated during probe() */ - iounmap(dev->rdk1.plx9054_base_addr); - iounmap(dev->rdk1.epld_base_addr); - - for (i = 0; i < 4; ++i) { - if (i == 1) - continue; /* BAR1 unused */ - release_mem_region(pci_resource_start(pdev, i), - pci_resource_len(pdev, i)); - } -} - -static void -net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev) -{ - int i; - - /* disable fpga interrupts - writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) & - ~(1 << PCI_INTERRUPT_ENABLE), - dev->rdk1.plx9054_base_addr + INTCSR); - */ - - /* clean up resources allocated during probe() */ - iounmap(dev->rdk2.fpga_base_addr); - - for (i = 0; i < 2; ++i) - release_mem_region(pci_resource_start(pdev, i), - pci_resource_len(pdev, i)); -} - -static void -net2272_pci_remove(struct pci_dev *pdev) -{ - struct net2272 *dev = pci_get_drvdata(pdev); - - net2272_remove(dev); - - switch (pdev->device) { - case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break; - case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break; - default: BUG(); - } - - pci_disable_device(pdev); - - usb_put_gadget(&dev->gadget); -} - -/* Table of matching PCI IDs */ -static struct pci_device_id pci_ids[] = { - { /* RDK 1 card */ - .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), - .class_mask = 0, - .vendor = PCI_VENDOR_ID_PLX, - .device = PCI_DEVICE_ID_RDK1, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - { /* RDK 2 card */ - .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe), - .class_mask = 0, - .vendor = PCI_VENDOR_ID_PLX, - .device = PCI_DEVICE_ID_RDK2, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - }, - { } -}; -MODULE_DEVICE_TABLE(pci, pci_ids); - -static struct pci_driver net2272_pci_driver = { - .name = driver_name, - .id_table = pci_ids, - - .probe = net2272_pci_probe, - .remove = net2272_pci_remove, -}; - -static int net2272_pci_register(void) -{ - return pci_register_driver(&net2272_pci_driver); -} - -static void net2272_pci_unregister(void) -{ - pci_unregister_driver(&net2272_pci_driver); -} - -#else -static inline int net2272_pci_register(void) { return 0; } -static inline void net2272_pci_unregister(void) { } -#endif - -/*---------------------------------------------------------------------------*/ - -static int -net2272_plat_probe(struct platform_device *pdev) -{ - struct net2272 *dev; - int ret; - unsigned int irqflags; - resource_size_t base, len; - struct resource *iomem, *iomem_bus, *irq_res; - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0); - if (!irq_res || !iomem) { - dev_err(&pdev->dev, "must provide irq/base addr"); - return -EINVAL; - } - - dev = net2272_probe_init(&pdev->dev, irq_res->start); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - irqflags = 0; - if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) - irqflags |= IRQF_TRIGGER_RISING; - if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) - irqflags |= IRQF_TRIGGER_FALLING; - if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) - irqflags |= IRQF_TRIGGER_HIGH; - if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) - irqflags |= IRQF_TRIGGER_LOW; - - base = iomem->start; - len = resource_size(iomem); - if (iomem_bus) - dev->base_shift = iomem_bus->start; - - if (!request_mem_region(base, len, driver_name)) { - dev_dbg(dev->dev, "get request memory region!\n"); - ret = -EBUSY; - goto err; - } - dev->base_addr = ioremap(base, len); - if (!dev->base_addr) { - dev_dbg(dev->dev, "can't map memory\n"); - ret = -EFAULT; - goto err_req; - } - - ret = net2272_probe_fin(dev, irqflags); - if (ret) - goto err_io; - - platform_set_drvdata(pdev, dev); - dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n", - (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no "); - - return 0; - - err_io: - iounmap(dev->base_addr); - err_req: - release_mem_region(base, len); - err: - usb_put_gadget(&dev->gadget); - - return ret; -} - -static void -net2272_plat_remove(struct platform_device *pdev) -{ - struct net2272 *dev = platform_get_drvdata(pdev); - - net2272_remove(dev); - - release_mem_region(pdev->resource[0].start, - resource_size(&pdev->resource[0])); - - usb_put_gadget(&dev->gadget); -} - -static struct platform_driver net2272_plat_driver = { - .probe = net2272_plat_probe, - .remove = net2272_plat_remove, - .driver = { - .name = driver_name, - }, - /* FIXME .suspend, .resume */ -}; -MODULE_ALIAS("platform:net2272"); - -static int __init net2272_init(void) -{ - int ret; - - ret = net2272_pci_register(); - if (ret) - return ret; - ret = platform_driver_register(&net2272_plat_driver); - if (ret) - goto err_pci; - return ret; - -err_pci: - net2272_pci_unregister(); - return ret; -} -module_init(net2272_init); - -static void __exit net2272_cleanup(void) -{ - net2272_pci_unregister(); - platform_driver_unregister(&net2272_plat_driver); -} -module_exit(net2272_cleanup); - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_AUTHOR("PLX Technology, Inc."); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h deleted file mode 100644 index a9994f737588..000000000000 --- a/drivers/usb/gadget/udc/net2272.h +++ /dev/null @@ -1,584 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * PLX NET2272 high/full speed USB device controller - * - * Copyright (C) 2005-2006 PLX Technology, Inc. - * Copyright (C) 2006-2011 Analog Devices, Inc. - */ - -#ifndef __NET2272_H__ -#define __NET2272_H__ - -/* Main Registers */ -#define REGADDRPTR 0x00 -#define REGDATA 0x01 -#define IRQSTAT0 0x02 -#define ENDPOINT_0_INTERRUPT 0 -#define ENDPOINT_A_INTERRUPT 1 -#define ENDPOINT_B_INTERRUPT 2 -#define ENDPOINT_C_INTERRUPT 3 -#define VIRTUALIZED_ENDPOINT_INTERRUPT 4 -#define SETUP_PACKET_INTERRUPT 5 -#define DMA_DONE_INTERRUPT 6 -#define SOF_INTERRUPT 7 -#define IRQSTAT1 0x03 -#define CONTROL_STATUS_INTERRUPT 1 -#define VBUS_INTERRUPT 2 -#define SUSPEND_REQUEST_INTERRUPT 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT 4 -#define RESUME_INTERRUPT 5 -#define ROOT_PORT_RESET_INTERRUPT 6 -#define RESET_STATUS 7 -#define PAGESEL 0x04 -#define DMAREQ 0x1c -#define DMA_ENDPOINT_SELECT 0 -#define DREQ_POLARITY 1 -#define DACK_POLARITY 2 -#define EOT_POLARITY 3 -#define DMA_CONTROL_DACK 4 -#define DMA_REQUEST_ENABLE 5 -#define DMA_REQUEST 6 -#define DMA_BUFFER_VALID 7 -#define SCRATCH 0x1d -#define IRQENB0 0x20 -#define ENDPOINT_0_INTERRUPT_ENABLE 0 -#define ENDPOINT_A_INTERRUPT_ENABLE 1 -#define ENDPOINT_B_INTERRUPT_ENABLE 2 -#define ENDPOINT_C_INTERRUPT_ENABLE 3 -#define VIRTUALIZED_ENDPOINT_INTERRUPT_ENABLE 4 -#define SETUP_PACKET_INTERRUPT_ENABLE 5 -#define DMA_DONE_INTERRUPT_ENABLE 6 -#define SOF_INTERRUPT_ENABLE 7 -#define IRQENB1 0x21 -#define VBUS_INTERRUPT_ENABLE 2 -#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3 -#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 4 -#define RESUME_INTERRUPT_ENABLE 5 -#define ROOT_PORT_RESET_INTERRUPT_ENABLE 6 -#define LOCCTL 0x22 -#define DATA_WIDTH 0 -#define LOCAL_CLOCK_OUTPUT 1 -#define LOCAL_CLOCK_OUTPUT_OFF 0 -#define LOCAL_CLOCK_OUTPUT_3_75MHZ 1 -#define LOCAL_CLOCK_OUTPUT_7_5MHZ 2 -#define LOCAL_CLOCK_OUTPUT_15MHZ 3 -#define LOCAL_CLOCK_OUTPUT_30MHZ 4 -#define LOCAL_CLOCK_OUTPUT_60MHZ 5 -#define DMA_SPLIT_BUS_MODE 4 -#define BYTE_SWAP 5 -#define BUFFER_CONFIGURATION 6 -#define BUFFER_CONFIGURATION_EPA512_EPB512 0 -#define BUFFER_CONFIGURATION_EPA1024_EPB512 1 -#define BUFFER_CONFIGURATION_EPA1024_EPB1024 2 -#define BUFFER_CONFIGURATION_EPA1024DB 3 -#define CHIPREV_LEGACY 0x23 -#define NET2270_LEGACY_REV 0x40 -#define LOCCTL1 0x24 -#define DMA_MODE 0 -#define SLOW_DREQ 0 -#define FAST_DREQ 1 -#define BURST_MODE 2 -#define DMA_DACK_ENABLE 2 -#define CHIPREV_2272 0x25 -#define CHIPREV_NET2272_R1 0x10 -#define CHIPREV_NET2272_R1A 0x11 -/* USB Registers */ -#define USBCTL0 0x18 -#define IO_WAKEUP_ENABLE 1 -#define USB_DETECT_ENABLE 3 -#define USB_ROOT_PORT_WAKEUP_ENABLE 5 -#define USBCTL1 0x19 -#define VBUS_PIN 0 -#define USB_FULL_SPEED 1 -#define USB_HIGH_SPEED 2 -#define GENERATE_RESUME 3 -#define VIRTUAL_ENDPOINT_ENABLE 4 -#define FRAME0 0x1a -#define FRAME1 0x1b -#define OURADDR 0x30 -#define FORCE_IMMEDIATE 7 -#define USBDIAG 0x31 -#define FORCE_TRANSMIT_CRC_ERROR 0 -#define PREVENT_TRANSMIT_BIT_STUFF 1 -#define FORCE_RECEIVE_ERROR 2 -#define FAST_TIMES 4 -#define USBTEST 0x32 -#define TEST_MODE_SELECT 0 -#define NORMAL_OPERATION 0 -#define XCVRDIAG 0x33 -#define FORCE_FULL_SPEED 2 -#define FORCE_HIGH_SPEED 3 -#define OPMODE 4 -#define NORMAL_OPERATION 0 -#define NON_DRIVING 1 -#define DISABLE_BITSTUFF_AND_NRZI_ENCODE 2 -#define LINESTATE 6 -#define SE0_STATE 0 -#define J_STATE 1 -#define K_STATE 2 -#define SE1_STATE 3 -#define VIRTOUT0 0x34 -#define VIRTOUT1 0x35 -#define VIRTIN0 0x36 -#define VIRTIN1 0x37 -#define SETUP0 0x40 -#define SETUP1 0x41 -#define SETUP2 0x42 -#define SETUP3 0x43 -#define SETUP4 0x44 -#define SETUP5 0x45 -#define SETUP6 0x46 -#define SETUP7 0x47 -/* Endpoint Registers (Paged via PAGESEL) */ -#define EP_DATA 0x05 -#define EP_STAT0 0x06 -#define DATA_IN_TOKEN_INTERRUPT 0 -#define DATA_OUT_TOKEN_INTERRUPT 1 -#define DATA_PACKET_TRANSMITTED_INTERRUPT 2 -#define DATA_PACKET_RECEIVED_INTERRUPT 3 -#define SHORT_PACKET_TRANSFERRED_INTERRUPT 4 -#define NAK_OUT_PACKETS 5 -#define BUFFER_EMPTY 6 -#define BUFFER_FULL 7 -#define EP_STAT1 0x07 -#define TIMEOUT 0 -#define USB_OUT_ACK_SENT 1 -#define USB_OUT_NAK_SENT 2 -#define USB_IN_ACK_RCVD 3 -#define USB_IN_NAK_SENT 4 -#define USB_STALL_SENT 5 -#define LOCAL_OUT_ZLP 6 -#define BUFFER_FLUSH 7 -#define EP_TRANSFER0 0x08 -#define EP_TRANSFER1 0x09 -#define EP_TRANSFER2 0x0a -#define EP_IRQENB 0x0b -#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0 -#define DATA_OUT_TOKEN_INTERRUPT_ENABLE 1 -#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2 -#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3 -#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 4 -#define EP_AVAIL0 0x0c -#define EP_AVAIL1 0x0d -#define EP_RSPCLR 0x0e -#define EP_RSPSET 0x0f -#define ENDPOINT_HALT 0 -#define ENDPOINT_TOGGLE 1 -#define NAK_OUT_PACKETS_MODE 2 -#define CONTROL_STATUS_PHASE_HANDSHAKE 3 -#define INTERRUPT_MODE 4 -#define AUTOVALIDATE 5 -#define HIDE_STATUS_PHASE 6 -#define ALT_NAK_OUT_PACKETS 7 -#define EP_MAXPKT0 0x28 -#define EP_MAXPKT1 0x29 -#define ADDITIONAL_TRANSACTION_OPPORTUNITIES 3 -#define NONE_ADDITIONAL_TRANSACTION 0 -#define ONE_ADDITIONAL_TRANSACTION 1 -#define TWO_ADDITIONAL_TRANSACTION 2 -#define EP_CFG 0x2a -#define ENDPOINT_NUMBER 0 -#define ENDPOINT_DIRECTION 4 -#define ENDPOINT_TYPE 5 -#define ENDPOINT_ENABLE 7 -#define EP_HBW 0x2b -#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 0 -#define DATA0_PID 0 -#define DATA1_PID 1 -#define DATA2_PID 2 -#define MDATA_PID 3 -#define EP_BUFF_STATES 0x2c -#define BUFFER_A_STATE 0 -#define BUFFER_B_STATE 2 -#define BUFF_FREE 0 -#define BUFF_VALID 1 -#define BUFF_LCL 2 -#define BUFF_USB 3 - -/*---------------------------------------------------------------------------*/ - -#define PCI_DEVICE_ID_RDK1 0x9054 - -/* PCI-RDK EPLD Registers */ -#define RDK_EPLD_IO_REGISTER1 0x00000000 -#define RDK_EPLD_USB_RESET 0 -#define RDK_EPLD_USB_POWERDOWN 1 -#define RDK_EPLD_USB_WAKEUP 2 -#define RDK_EPLD_USB_EOT 3 -#define RDK_EPLD_DPPULL 4 -#define RDK_EPLD_IO_REGISTER2 0x00000004 -#define RDK_EPLD_BUSWIDTH 0 -#define RDK_EPLD_USER 2 -#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3 -#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4 -#define RDK_EPLD_STATUS_REGISTER 0x00000008 -#define RDK_EPLD_USB_LRESET 0 -#define RDK_EPLD_REVISION_REGISTER 0x0000000c - -/* PCI-RDK PLX 9054 Registers */ -#define INTCSR 0x68 -#define PCI_INTERRUPT_ENABLE 8 -#define LOCAL_INTERRUPT_INPUT_ENABLE 11 -#define LOCAL_INPUT_INTERRUPT_ACTIVE 15 -#define LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE 18 -#define LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE 19 -#define DMA_CHANNEL_0_INTERRUPT_ACTIVE 21 -#define DMA_CHANNEL_1_INTERRUPT_ACTIVE 22 -#define CNTRL 0x6C -#define RELOAD_CONFIGURATION_REGISTERS 29 -#define PCI_ADAPTER_SOFTWARE_RESET 30 -#define DMAMODE0 0x80 -#define LOCAL_BUS_WIDTH 0 -#define INTERNAL_WAIT_STATES 2 -#define TA_READY_INPUT_ENABLE 6 -#define LOCAL_BURST_ENABLE 8 -#define SCATTER_GATHER_MODE 9 -#define DONE_INTERRUPT_ENABLE 10 -#define LOCAL_ADDRESSING_MODE 11 -#define DEMAND_MODE 12 -#define DMA_EOT_ENABLE 14 -#define FAST_SLOW_TERMINATE_MODE_SELECT 15 -#define DMA_CHANNEL_INTERRUPT_SELECT 17 -#define DMAPADR0 0x84 -#define DMALADR0 0x88 -#define DMASIZ0 0x8c -#define DMADPR0 0x90 -#define DESCRIPTOR_LOCATION 0 -#define END_OF_CHAIN 1 -#define INTERRUPT_AFTER_TERMINAL_COUNT 2 -#define DIRECTION_OF_TRANSFER 3 -#define DMACSR0 0xa8 -#define CHANNEL_ENABLE 0 -#define CHANNEL_START 1 -#define CHANNEL_ABORT 2 -#define CHANNEL_CLEAR_INTERRUPT 3 -#define CHANNEL_DONE 4 -#define DMATHR 0xb0 -#define LBRD1 0xf8 -#define MEMORY_SPACE_LOCAL_BUS_WIDTH 0 -#define W8_BIT 0 -#define W16_BIT 1 - -/* Special OR'ing of INTCSR bits */ -#define LOCAL_INTERRUPT_TEST \ - ((1 << LOCAL_INPUT_INTERRUPT_ACTIVE) | \ - (1 << LOCAL_INTERRUPT_INPUT_ENABLE)) - -#define DMA_CHANNEL_0_TEST \ - ((1 << DMA_CHANNEL_0_INTERRUPT_ACTIVE) | \ - (1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE)) - -#define DMA_CHANNEL_1_TEST \ - ((1 << DMA_CHANNEL_1_INTERRUPT_ACTIVE) | \ - (1 << LOCAL_DMA_CHANNEL_1_INTERRUPT_ENABLE)) - -/* EPLD Registers */ -#define RDK_EPLD_IO_REGISTER1 0x00000000 -#define RDK_EPLD_USB_RESET 0 -#define RDK_EPLD_USB_POWERDOWN 1 -#define RDK_EPLD_USB_WAKEUP 2 -#define RDK_EPLD_USB_EOT 3 -#define RDK_EPLD_DPPULL 4 -#define RDK_EPLD_IO_REGISTER2 0x00000004 -#define RDK_EPLD_BUSWIDTH 0 -#define RDK_EPLD_USER 2 -#define RDK_EPLD_RESET_INTERRUPT_ENABLE 3 -#define RDK_EPLD_DMA_TIMEOUT_ENABLE 4 -#define RDK_EPLD_STATUS_REGISTER 0x00000008 -#define RDK_EPLD_USB_LRESET 0 -#define RDK_EPLD_REVISION_REGISTER 0x0000000c - -#define EPLD_IO_CONTROL_REGISTER 0x400 -#define NET2272_RESET 0 -#define BUSWIDTH 1 -#define MPX_MODE 3 -#define USER 4 -#define DMA_TIMEOUT_ENABLE 5 -#define DMA_CTL_DACK 6 -#define EPLD_DMA_ENABLE 7 -#define EPLD_DMA_CONTROL_REGISTER 0x800 -#define SPLIT_DMA_MODE 0 -#define SPLIT_DMA_DIRECTION 1 -#define SPLIT_DMA_ENABLE 2 -#define SPLIT_DMA_INTERRUPT_ENABLE 3 -#define SPLIT_DMA_INTERRUPT 4 -#define EPLD_DMA_MODE 5 -#define EPLD_DMA_CONTROLLER_ENABLE 7 -#define SPLIT_DMA_ADDRESS_LOW 0xc00 -#define SPLIT_DMA_ADDRESS_HIGH 0x1000 -#define SPLIT_DMA_BYTE_COUNT_LOW 0x1400 -#define SPLIT_DMA_BYTE_COUNT_HIGH 0x1800 -#define EPLD_REVISION_REGISTER 0x1c00 -#define SPLIT_DMA_RAM 0x4000 -#define DMA_RAM_SIZE 0x1000 - -/*---------------------------------------------------------------------------*/ - -#define PCI_DEVICE_ID_RDK2 0x3272 - -/* PCI-RDK version 2 registers */ - -/* Main Control Registers */ - -#define RDK2_IRQENB 0x00 -#define RDK2_IRQSTAT 0x04 -#define PB7 23 -#define PB6 22 -#define PB5 21 -#define PB4 20 -#define PB3 19 -#define PB2 18 -#define PB1 17 -#define PB0 16 -#define GP3 23 -#define GP2 23 -#define GP1 23 -#define GP0 23 -#define DMA_RETRY_ABORT 6 -#define DMA_PAUSE_DONE 5 -#define DMA_ABORT_DONE 4 -#define DMA_OUT_FIFO_TRANSFER_DONE 3 -#define DMA_LOCAL_DONE 2 -#define DMA_PCI_DONE 1 -#define NET2272_PCI_IRQ 0 - -#define RDK2_LOCCTLRDK 0x08 -#define CHIP_RESET 3 -#define SPLIT_DMA 2 -#define MULTIPLEX_MODE 1 -#define BUS_WIDTH 0 - -#define RDK2_GPIOCTL 0x10 -#define GP3_OUT_ENABLE 7 -#define GP2_OUT_ENABLE 6 -#define GP1_OUT_ENABLE 5 -#define GP0_OUT_ENABLE 4 -#define GP3_DATA 3 -#define GP2_DATA 2 -#define GP1_DATA 1 -#define GP0_DATA 0 - -#define RDK2_LEDSW 0x14 -#define LED3 27 -#define LED2 26 -#define LED1 25 -#define LED0 24 -#define PBUTTON 16 -#define DIPSW 0 - -#define RDK2_DIAG 0x18 -#define RDK2_FAST_TIMES 2 -#define FORCE_PCI_SERR 1 -#define FORCE_PCI_INT 0 -#define RDK2_FPGAREV 0x1C - -/* Dma Control registers */ -#define RDK2_DMACTL 0x80 -#define ADDR_HOLD 24 -#define RETRY_COUNT 16 /* 23:16 */ -#define FIFO_THRESHOLD 11 /* 15:11 */ -#define MEM_WRITE_INVALIDATE 10 -#define READ_MULTIPLE 9 -#define READ_LINE 8 -#define RDK2_DMA_MODE 6 /* 7:6 */ -#define CONTROL_DACK 5 -#define EOT_ENABLE 4 -#define EOT_POLARITY 3 -#define DACK_POLARITY 2 -#define DREQ_POLARITY 1 -#define DMA_ENABLE 0 - -#define RDK2_DMASTAT 0x84 -#define GATHER_COUNT 12 /* 14:12 */ -#define FIFO_COUNT 6 /* 11:6 */ -#define FIFO_FLUSH 5 -#define FIFO_TRANSFER 4 -#define PAUSE_DONE 3 -#define ABORT_DONE 2 -#define DMA_ABORT 1 -#define DMA_START 0 - -#define RDK2_DMAPCICOUNT 0x88 -#define DMA_DIRECTION 31 -#define DMA_PCI_BYTE_COUNT 0 /* 0:23 */ - -#define RDK2_DMALOCCOUNT 0x8C /* 0:23 dma local byte count */ - -#define RDK2_DMAADDR 0x90 /* 2:31 PCI bus starting address */ - -/*---------------------------------------------------------------------------*/ - -#define REG_INDEXED_THRESHOLD (1 << 5) - -/* DRIVER DATA STRUCTURES and UTILITIES */ -struct net2272_ep { - struct usb_ep ep; - struct net2272 *dev; - unsigned long irqs; - - /* analogous to a host-side qh */ - struct list_head queue; - const struct usb_endpoint_descriptor *desc; - unsigned num:8, - fifo_size:12, - stopped:1, - wedged:1, - is_in:1, - is_iso:1, - dma:1, - not_empty:1; -}; - -struct net2272 { - /* each device provides one gadget, several endpoints */ - struct usb_gadget gadget; - struct device *dev; - unsigned short dev_id; - - spinlock_t lock; - struct net2272_ep ep[4]; - struct usb_gadget_driver *driver; - unsigned protocol_stall:1, - softconnect:1, - wakeup:1, - added:1, - async_callbacks:1, - dma_eot_polarity:1, - dma_dack_polarity:1, - dma_dreq_polarity:1, - dma_busy:1; - u16 chiprev; - u8 pagesel; - - unsigned int irq; - unsigned short fifo_mode; - - unsigned int base_shift; - u16 __iomem *base_addr; - union { -#ifdef CONFIG_USB_PCI - struct { - void __iomem *plx9054_base_addr; - void __iomem *epld_base_addr; - } rdk1; - struct { - /* Bar0, Bar1 is base_addr both mem-mapped */ - void __iomem *fpga_base_addr; - } rdk2; -#endif - }; -}; - -static void __iomem * -net2272_reg_addr(struct net2272 *dev, unsigned int reg) -{ - return dev->base_addr + (reg << dev->base_shift); -} - -static void -net2272_write(struct net2272 *dev, unsigned int reg, u8 value) -{ - if (reg >= REG_INDEXED_THRESHOLD) { - /* - * Indexed register; use REGADDRPTR/REGDATA - * - Save and restore REGADDRPTR. This prevents REGADDRPTR from - * changes between other code sections, but it is time consuming. - * - Performance tips: either do not save and restore REGADDRPTR (if it - * is safe) or do save/restore operations only in critical sections. - u8 tmp = readb(dev->base_addr + REGADDRPTR); - */ - writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR)); - writeb(value, net2272_reg_addr(dev, REGDATA)); - /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */ - } else - writeb(value, net2272_reg_addr(dev, reg)); -} - -static u8 -net2272_read(struct net2272 *dev, unsigned int reg) -{ - u8 ret; - - if (reg >= REG_INDEXED_THRESHOLD) { - /* - * Indexed register; use REGADDRPTR/REGDATA - * - Save and restore REGADDRPTR. This prevents REGADDRPTR from - * changes between other code sections, but it is time consuming. - * - Performance tips: either do not save and restore REGADDRPTR (if it - * is safe) or do save/restore operations only in critical sections. - u8 tmp = readb(dev->base_addr + REGADDRPTR); - */ - writeb((u8)reg, net2272_reg_addr(dev, REGADDRPTR)); - ret = readb(net2272_reg_addr(dev, REGDATA)); - /* writeb(tmp, net2272_reg_addr(dev, REGADDRPTR)); */ - } else - ret = readb(net2272_reg_addr(dev, reg)); - - return ret; -} - -static void -net2272_ep_write(struct net2272_ep *ep, unsigned int reg, u8 value) -{ - struct net2272 *dev = ep->dev; - - if (dev->pagesel != ep->num) { - net2272_write(dev, PAGESEL, ep->num); - dev->pagesel = ep->num; - } - net2272_write(dev, reg, value); -} - -static u8 -net2272_ep_read(struct net2272_ep *ep, unsigned int reg) -{ - struct net2272 *dev = ep->dev; - - if (dev->pagesel != ep->num) { - net2272_write(dev, PAGESEL, ep->num); - dev->pagesel = ep->num; - } - return net2272_read(dev, reg); -} - -static void allow_status(struct net2272_ep *ep) -{ - /* ep0 only */ - net2272_ep_write(ep, EP_RSPCLR, - (1 << CONTROL_STATUS_PHASE_HANDSHAKE) | - (1 << ALT_NAK_OUT_PACKETS) | - (1 << NAK_OUT_PACKETS_MODE)); - ep->stopped = 1; -} - -static void set_halt(struct net2272_ep *ep) -{ - /* ep0 and bulk/intr endpoints */ - net2272_ep_write(ep, EP_RSPCLR, 1 << CONTROL_STATUS_PHASE_HANDSHAKE); - net2272_ep_write(ep, EP_RSPSET, 1 << ENDPOINT_HALT); -} - -static void clear_halt(struct net2272_ep *ep) -{ - /* ep0 and bulk/intr endpoints */ - net2272_ep_write(ep, EP_RSPCLR, - (1 << ENDPOINT_HALT) | (1 << ENDPOINT_TOGGLE)); -} - -/* count (<= 4) bytes in the next fifo write will be valid */ -static void set_fifo_bytecount(struct net2272_ep *ep, unsigned count) -{ - /* net2272_ep_write will truncate to u8 for us */ - net2272_ep_write(ep, EP_TRANSFER2, count >> 16); - net2272_ep_write(ep, EP_TRANSFER1, count >> 8); - net2272_ep_write(ep, EP_TRANSFER0, count); -} - -struct net2272_request { - struct usb_request req; - struct list_head queue; - unsigned mapped:1, - valid:1; -}; - -#endif diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 89b304cf6d03..3e4d56457597 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2397,8 +2397,7 @@ static int renesas_usb3_stop(struct usb_gadget *gadget) rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false); renesas_usb3_stop_controller(usb3); - if (usb3->phy) - phy_exit(usb3->phy); + phy_exit(usb3->phy); pm_runtime_put(usb3_to_dev(usb3)); @@ -2984,8 +2983,7 @@ static int renesas_usb3_suspend(struct device *dev) return 0; renesas_usb3_stop_controller(usb3); - if (usb3->phy) - phy_exit(usb3->phy); + phy_exit(usb3->phy); pm_runtime_put(dev); return 0; diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index c7fdbc55fb0b..2957316fd3d0 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) val = xudc_readl(xudc, CTRL); val &= ~CTRL_RUN; xudc_writel(xudc, val, CTRL); + + val = xudc_readl(xudc, ST); + if (val & ST_RC) + xudc_writel(xudc, ST_RC, ST); } dev_info(xudc->dev, "ep %u disabled\n", ep->index); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index ae2aeb271897..fa94cc065274 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -2178,8 +2178,6 @@ fail: /** * xudc_remove - Releases the resources allocated during the initialization. * @pdev: pointer to the platform device structure. - * - * Return: 0 always */ static void xudc_remove(struct platform_device *pdev) { diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d011d6c753ed..109100cc77a3 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -104,6 +104,15 @@ config USB_XHCI_RZV2M Say 'Y' to enable the support for the xHCI host controller found in Renesas RZ/V2M SoC. +config USB_XHCI_SIDEBAND + bool "xHCI support for sideband" + help + Say 'Y' to enable the support for the xHCI sideband capability. + Provide a mechanism for a sideband datapath for payload associated + with audio class endpoints. This allows for an audio DSP to use + xHCI USB endpoints directly, allowing CPU to sleep while playing + audio. + config USB_XHCI_TEGRA tristate "xHCI support for NVIDIA Tegra SoCs" depends on PHY_TEGRA_XUSB @@ -225,7 +234,7 @@ config USB_EHCI_HCD_OMAP tristate "EHCI support for OMAP3 and later chips" depends on ARCH_OMAP || COMPILE_TEST depends on NOP_USB_XCEIV - default y + default ARCH_OMAP help Enables support for the on-chip EHCI controller on OMAP3 and later chips. diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index be4e5245c52f..4df946c05ba0 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -32,6 +32,10 @@ endif xhci-rcar-hcd-y += xhci-rcar.o xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o +ifneq ($(CONFIG_USB_XHCI_SIDEBAND),) + xhci-hcd-y += xhci-sideband.o +endif + obj-$(CONFIG_USB_PCI) += pci-quirks.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 26f13278d4d6..6ed2fa5418a4 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -410,15 +410,13 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } -struct ehci_fsl { - struct ehci_hcd ehci; - -#ifdef CONFIG_PM +struct ehci_fsl_priv { /* Saved USB PHY settings, need to restore after deep sleep. */ u32 usb_ctrl; -#endif }; +#define hcd_to_ehci_fsl_priv(h) ((struct ehci_fsl_priv *) hcd_to_ehci(h)->priv) + #ifdef CONFIG_PM #ifdef CONFIG_PPC_MPC512x @@ -566,17 +564,10 @@ static inline int ehci_fsl_mpc512x_drv_resume(struct device *dev) } #endif /* CONFIG_PPC_MPC512x */ -static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - - return container_of(ehci, struct ehci_fsl, ehci); -} - static int ehci_fsl_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); void __iomem *non_ehci = hcd->regs; if (of_device_is_compatible(dev->parent->of_node, @@ -589,14 +580,14 @@ static int ehci_fsl_drv_suspend(struct device *dev) if (!fsl_deep_sleep()) return 0; - ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); + priv->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL); return 0; } static int ehci_fsl_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); - struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_fsl_priv *priv = hcd_to_ehci_fsl_priv(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd); void __iomem *non_ehci = hcd->regs; @@ -612,7 +603,7 @@ static int ehci_fsl_drv_resume(struct device *dev) usb_root_hub_lost_power(hcd->self.root_hub); /* Restore USB PHY settings and enable the controller. */ - iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); + iowrite32be(priv->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL); ehci_reset(ehci); ehci_fsl_reinit(ehci); @@ -671,7 +662,7 @@ static int ehci_start_port_reset(struct usb_hcd *hcd, unsigned port) #endif /* CONFIG_USB_OTG */ static const struct ehci_driver_overrides ehci_fsl_overrides __initconst = { - .extra_priv_size = sizeof(struct ehci_fsl), + .extra_priv_size = sizeof(struct ehci_fsl_priv), .reset = ehci_fsl_setup, }; diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index a7c934404ebc..62318291f566 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) } /* Get and enable clock if any specified */ - uhci->clk = devm_clk_get(&pdev->dev, NULL); + uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(uhci->clk)) { ret = PTR_ERR(uhci->clk); goto err_rmr; diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h index f6b9a00a0ab9..4b8ff4815644 100644 --- a/drivers/usb/host/xhci-caps.h +++ b/drivers/usb/host/xhci-caps.h @@ -62,8 +62,8 @@ #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) -/* db_off bitmask - bits 0:1 reserved */ -#define DBOFF_MASK (~0x3) +/* db_off bitmask - bits 31:2 Doorbell Array Offset */ +#define DBOFF_MASK (0xfffffffc) /* run_regs_off bitmask - bits 0:4 reserved */ #define RTSOFF_MASK (~0x1f) diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index fd7895b24367..0d4ce5734165 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -823,6 +823,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; union xhci_trb *evt; + enum evtreturn ret = EVT_DONE; u32 ctrl, portsc; bool update_erdp = false; @@ -909,6 +910,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); + ret = EVT_XFER_DONE; break; default: break; @@ -927,7 +929,7 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) lo_hi_writeq(deq, &dbc->regs->erdp); } - return EVT_DONE; + return ret; } static void xhci_dbc_handle_events(struct work_struct *work) @@ -936,6 +938,7 @@ static void xhci_dbc_handle_events(struct work_struct *work) struct xhci_dbc *dbc; unsigned long flags; unsigned int poll_interval; + unsigned long busypoll_timelimit; dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); poll_interval = dbc->poll_interval; @@ -954,11 +957,21 @@ static void xhci_dbc_handle_events(struct work_struct *work) dbc->driver->disconnect(dbc); break; case EVT_DONE: - /* set fast poll rate if there are pending data transfers */ + /* + * Set fast poll rate if there are pending out transfers, or + * a transfer was recently processed + */ + busypoll_timelimit = dbc->xfer_timestamp + + msecs_to_jiffies(DBC_XFER_INACTIVITY_TIMEOUT); + if (!list_empty(&dbc->eps[BULK_OUT].list_pending) || - !list_empty(&dbc->eps[BULK_IN].list_pending)) + time_is_after_jiffies(busypoll_timelimit)) poll_interval = 0; break; + case EVT_XFER_DONE: + dbc->xfer_timestamp = jiffies; + poll_interval = 0; + break; default: dev_info(dbc->dev, "stop handling dbc events\n"); return; diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index 9dc8f4d8077c..47ac72c2286d 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -96,6 +96,7 @@ struct dbc_ep { #define DBC_WRITE_BUF_SIZE 8192 #define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */ #define DBC_POLL_INTERVAL_MAX 5000 /* milliseconds */ +#define DBC_XFER_INACTIVITY_TIMEOUT 10 /* milliseconds */ /* * Private structure for DbC hardware state: */ @@ -142,6 +143,7 @@ struct xhci_dbc { enum dbc_state state; struct delayed_work event_work; unsigned int poll_interval; /* ms */ + unsigned long xfer_timestamp; unsigned resume_required:1; struct dbc_ep eps[2]; @@ -187,6 +189,7 @@ struct dbc_request { enum evtreturn { EVT_ERR = -1, EVT_DONE, + EVT_XFER_DONE, EVT_GSER, EVT_DISC, }; diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 1f5ef174abea..c6d44977193f 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -631,6 +631,112 @@ static void xhci_debugfs_create_ports(struct xhci_hcd *xhci, } } +static int xhci_port_bw_show(struct xhci_hcd *xhci, u8 dev_speed, + struct seq_file *s) +{ + unsigned int num_ports; + unsigned int i; + int ret; + struct xhci_container_ctx *ctx; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + struct device *dev = hcd->self.controller; + + ret = pm_runtime_get_sync(dev); + if (ret < 0) + return ret; + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + + ctx = xhci_alloc_port_bw_ctx(xhci, 0); + if (!ctx) { + pm_runtime_put_sync(dev); + return -ENOMEM; + } + + /* get roothub port bandwidth */ + ret = xhci_get_port_bandwidth(xhci, ctx, dev_speed); + if (ret) + goto err_out; + + /* print all roothub ports available bandwidth + * refer to xhci rev1_2 protocol 6.2.6 , byte 0 is reserved + */ + for (i = 1; i < num_ports+1; i++) + seq_printf(s, "port[%d] available bw: %d%%.\n", i, + ctx->bytes[i]); +err_out: + pm_runtime_put_sync(dev); + xhci_free_port_bw_ctx(xhci, ctx); + return ret; +} + +static int xhci_ss_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_SUPER, s); + return ret; +} + +static int xhci_hs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_HIGH, s); + return ret; +} + +static int xhci_fs_bw_show(struct seq_file *s, void *unused) +{ + int ret; + struct xhci_hcd *xhci = (struct xhci_hcd *)s->private; + + ret = xhci_port_bw_show(xhci, USB_SPEED_FULL, s); + return ret; +} + +static struct xhci_file_map bw_context_files[] = { + {"SS_BW", xhci_ss_bw_show, }, + {"HS_BW", xhci_hs_bw_show, }, + {"FS_BW", xhci_fs_bw_show, }, +}; + +static int bw_context_open(struct inode *inode, struct file *file) +{ + int i; + struct xhci_file_map *f_map; + const char *file_name = file_dentry(file)->d_iname; + + for (i = 0; i < ARRAY_SIZE(bw_context_files); i++) { + f_map = &bw_context_files[i]; + + if (strcmp(f_map->name, file_name) == 0) + break; + } + + return single_open(file, f_map->show, inode->i_private); +} + +static const struct file_operations bw_fops = { + .open = bw_context_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xhci_debugfs_create_bandwidth(struct xhci_hcd *xhci, + struct dentry *parent) +{ + parent = debugfs_create_dir("port_bandwidth", parent); + + xhci_debugfs_create_files(xhci, bw_context_files, + ARRAY_SIZE(bw_context_files), + xhci, + parent, &bw_fops); +} + void xhci_debugfs_init(struct xhci_hcd *xhci) { struct device *dev = xhci_to_hcd(xhci)->self.controller; @@ -681,6 +787,8 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) xhci->debugfs_slots = debugfs_create_dir("devices", xhci->debugfs_root); xhci_debugfs_create_ports(xhci, xhci->debugfs_root); + + xhci_debugfs_create_bandwidth(xhci, xhci->debugfs_root); } void xhci_debugfs_exit(struct xhci_hcd *xhci) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 486347776cb2..92bb84f8132a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1907,7 +1907,7 @@ int xhci_bus_resume(struct usb_hcd *hcd) * prevent port event interrupts from interfering * with usb2 port resume process */ - xhci_disable_interrupter(xhci->interrupters[0]); + xhci_disable_interrupter(xhci, xhci->interrupters[0]); disabled_irq = true; } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index d698095fc88d..bd745a0f2f78 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -484,6 +484,35 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, kfree(ctx); } +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags) +{ + struct xhci_container_ctx *ctx; + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + + ctx = kzalloc_node(sizeof(*ctx), flags, dev_to_node(dev)); + if (!ctx) + return NULL; + + ctx->size = GET_PORT_BW_ARRAY_SIZE; + + ctx->bytes = dma_pool_zalloc(xhci->port_bw_pool, flags, &ctx->dma); + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } + return ctx; +} + +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + if (!ctx) + return; + dma_pool_free(xhci->port_bw_pool, ctx->bytes, ctx->dma); + kfree(ctx); +} + struct xhci_input_control_ctx *xhci_get_input_control_ctx( struct xhci_container_ctx *ctx) { @@ -1802,10 +1831,10 @@ xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) */ if (ir->ir_set) { tmp = readl(&ir->ir_set->erst_size); - tmp &= ERST_SIZE_MASK; + tmp &= ~ERST_SIZE_MASK; writel(tmp, &ir->ir_set->erst_size); - xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); + xhci_update_erst_dequeue(xhci, ir, true); } } @@ -1848,6 +1877,11 @@ void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrup return; } + /* + * Cleanup secondary interrupter to ensure there are no pending events. + * This also updates event ring dequeue pointer back to the start. + */ + xhci_skip_sec_intr_events(xhci, ir->event_ring, ir); intr_num = ir->intr_num; xhci_remove_interrupter(xhci, ir); @@ -1907,6 +1941,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed small stream array pool"); + dma_pool_destroy(xhci->port_bw_pool); + xhci->port_bw_pool = NULL; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Freed xhci port bw array pool"); + dma_pool_destroy(xhci->medium_streams_pool); xhci->medium_streams_pool = NULL; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -2282,37 +2321,25 @@ xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags) return ir; } -static int -xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, - unsigned int intr_num) +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num) { + struct xhci_interrupter *ir; u64 erst_base; u32 erst_size; - if (intr_num >= xhci->max_interrupters) { - xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", - intr_num, xhci->max_interrupters); - return -EINVAL; - } - - if (xhci->interrupters[intr_num]) { - xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); - return -EINVAL; - } - - xhci->interrupters[intr_num] = ir; + ir = xhci->interrupters[intr_num]; ir->intr_num = intr_num; ir->ir_set = &xhci->run_regs->ir_set[intr_num]; /* set ERST count with the number of entries in the segment table */ erst_size = readl(&ir->ir_set->erst_size); - erst_size &= ERST_SIZE_MASK; + erst_size &= ~ERST_SIZE_MASK; erst_size |= ir->event_ring->num_segs; writel(erst_size, &ir->ir_set->erst_size); erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); - erst_base &= ERST_BASE_RSVDP; - erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP; + erst_base &= ~ERST_BASE_ADDRESS_MASK; + erst_base |= ir->erst.erst_dma_addr & ERST_BASE_ADDRESS_MASK; if (xhci->quirks & XHCI_WRITE_64_HI_LO) hi_lo_writeq(erst_base, &ir->ir_set->erst_base); else @@ -2320,20 +2347,19 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, /* Set the event ring dequeue address of this interrupter */ xhci_set_hc_event_deq(xhci, ir); - - return 0; } struct xhci_interrupter * xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, - u32 imod_interval) + u32 imod_interval, unsigned int intr_num) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_interrupter *ir; unsigned int i; int err = -ENOSPC; - if (!xhci->interrupters || xhci->max_interrupters <= 1) + if (!xhci->interrupters || xhci->max_interrupters <= 1 || + intr_num >= xhci->max_interrupters) return NULL; ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL); @@ -2341,15 +2367,23 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, return NULL; spin_lock_irq(&xhci->lock); - - /* Find available secondary interrupter, interrupter 0 is reserved for primary */ - for (i = 1; i < xhci->max_interrupters; i++) { - if (xhci->interrupters[i] == NULL) { - err = xhci_add_interrupter(xhci, ir, i); - break; + if (!intr_num) { + /* Find available secondary interrupter, interrupter 0 is reserved for primary */ + for (i = 1; i < xhci->max_interrupters; i++) { + if (!xhci->interrupters[i]) { + xhci->interrupters[i] = ir; + xhci_add_interrupter(xhci, i); + err = 0; + break; + } + } + } else { + if (!xhci->interrupters[intr_num]) { + xhci->interrupters[intr_num] = ir; + xhci_add_interrupter(xhci, intr_num); + err = 0; } } - spin_unlock_irq(&xhci->lock); if (err) { @@ -2359,78 +2393,32 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, return NULL; } - err = xhci_set_interrupter_moderation(ir, imod_interval); - if (err) - xhci_warn(xhci, "Failed to set interrupter %d moderation to %uns\n", - i, imod_interval); + xhci_set_interrupter_moderation(ir, imod_interval); xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", - i, xhci->max_interrupters); + ir->intr_num, xhci->max_interrupters); return ir; } EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); -static void xhci_hcd_page_size(struct xhci_hcd *xhci) -{ - u32 page_size; - - page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; - if (!is_power_of_2(page_size)) { - xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); - /* Fallback to 4K page size, since that's common */ - page_size = 1; - } - - xhci->page_size = page_size << 12; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", - xhci->page_size >> 10); -} - int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { - struct xhci_interrupter *ir; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; dma_addr_t dma; - unsigned int val, val2; - u64 val_64; - u32 temp; - int i; - - INIT_LIST_HEAD(&xhci->cmd_list); - - /* init command timeout work */ - INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); - init_completion(&xhci->cmd_ring_stop_completion); - - xhci_hcd_page_size(xhci); - - /* - * Program the Number of Device Slots Enabled field in the CONFIG - * register with the max value of slots the HC can handle. - */ - val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1)); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// xHC can handle at most %d device slots.", val); - val2 = readl(&xhci->op_regs->config_reg); - val |= (val2 & ~HCS_SLOTS_MASK); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting Max device slots reg = 0x%x.", val); - writel(val, &xhci->op_regs->config_reg); /* * xHCI section 5.4.6 - Device Context array must be * "physically contiguous and 64-byte (cache line) aligned". */ - xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, - flags); + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, flags); if (!xhci->dcbaa) goto fail; + xhci->dcbaa->dma = dma; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Device context base array address = 0x%pad (DMA), %p (virt)", - &xhci->dcbaa->dma, xhci->dcbaa); - xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); + "Device context base array address = 0x%pad (DMA), %p (virt)", + &xhci->dcbaa->dma, xhci->dcbaa); /* * Initialize the ring segment pool. The ring must be a contiguous @@ -2446,92 +2434,77 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) else xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size); + if (!xhci->segment_pool) + goto fail; /* See Table 46 and Note on Figure 55 */ - xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, - 2112, 64, xhci->page_size); - if (!xhci->segment_pool || !xhci->device_pool) + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 2112, 64, + xhci->page_size); + if (!xhci->device_pool) goto fail; - /* Linear stream context arrays don't have any boundary restrictions, + /* + * Linear stream context arrays don't have any boundary restrictions, * and only need to be 16-byte aligned. */ - xhci->small_streams_pool = - dma_pool_create("xHCI 256 byte stream ctx arrays", - dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); - xhci->medium_streams_pool = - dma_pool_create("xHCI 1KB stream ctx arrays", - dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); - /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE - * will be allocated with dma_alloc_coherent() + xhci->small_streams_pool = dma_pool_create("xHCI 256 byte stream ctx arrays", + dev, SMALL_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->small_streams_pool) + goto fail; + + /* + * Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE will be + * allocated with dma_alloc_coherent(). */ - if (!xhci->small_streams_pool || !xhci->medium_streams_pool) + xhci->medium_streams_pool = dma_pool_create("xHCI 1KB stream ctx arrays", + dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0); + if (!xhci->medium_streams_pool) + goto fail; + + /* + * refer to xhci rev1_2 protocol 5.3.3 max ports is 255. + * refer to xhci rev1_2 protocol 6.4.3.14 port bandwidth buffer need + * to be 16-byte aligned. + */ + xhci->port_bw_pool = dma_pool_create("xHCI 256 port bw ctx arrays", + dev, GET_PORT_BW_ARRAY_SIZE, 16, 0); + if (!xhci->port_bw_pool) goto fail; /* Set up the command ring to have one segments for now. */ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocated command ring at %p", xhci->cmd_ring); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", - &xhci->cmd_ring->first_seg->dma); - /* Set the address in the Command Ring Control register */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%016llx", val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocated command ring at %p", xhci->cmd_ring); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%pad", + &xhci->cmd_ring->first_seg->dma); - /* Reserve one command ring TRB for disabling LPM. + /* + * Reserve one command ring TRB for disabling LPM. * Since the USB core grabs the shared usb_bus bandwidth mutex before * disabling LPM, we only need to reserve one TRB for all devices. */ xhci->cmd_ring_reserved_trbs++; - val = readl(&xhci->cap_regs->db_off); - val &= DBOFF_MASK; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Doorbell array is located at offset 0x%x from cap regs base addr", - val); - xhci->dba = (void __iomem *) xhci->cap_regs + val; - /* Allocate and set up primary interrupter 0 with an event ring. */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Allocating primary event ring"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Allocating primary event ring"); xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), flags, dev_to_node(dev)); - - ir = xhci_alloc_interrupter(xhci, 0, flags); - if (!ir) + if (!xhci->interrupters) goto fail; - if (xhci_add_interrupter(xhci, ir, 0)) + xhci->interrupters[0] = xhci_alloc_interrupter(xhci, 0, flags); + if (!xhci->interrupters[0]) goto fail; - ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; - - for (i = 0; i < MAX_HC_SLOTS; i++) - xhci->devs[i] = NULL; - if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) goto fail; - /* Enable USB 3.0 device notifications for function remote wake, which - * is necessary for allowing USB 3.0 devices to do remote wakeup from - * U3 (device suspend). - */ - temp = readl(&xhci->op_regs->dev_notification); - temp &= ~DEV_NOTE_MASK; - temp |= DEV_NOTE_FWAKE; - writel(temp, &xhci->op_regs->dev_notification); - return 0; fail: diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3155e3a842da..6dab142e7278 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -267,6 +267,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s device_property_read_u32(tmpdev, "imod-interval-ns", &xhci->imod_interval); + device_property_read_u16(tmpdev, "num-hc-interrupters", + &xhci->max_interrupters); } /* diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b906bc2eea5f..e038ad3375dc 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -699,7 +699,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, int new_cycle; dma_addr_t addr; u64 hw_dequeue; - bool cycle_found = false; + bool hw_dequeue_found = false; bool td_last_trb_found = false; u32 trb_sct = 0; int ret; @@ -715,25 +715,24 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - new_cycle = hw_dequeue & 0x1; + new_cycle = le32_to_cpu(td->end_trb->generic.field[3]) & TRB_CYCLE; /* - * We want to find the pointer, segment and cycle state of the new trb - * (the one after current TD's end_trb). We know the cycle state at - * hw_dequeue, so walk the ring until both hw_dequeue and end_trb are - * found. + * Walk the ring until both the next TRB and hw_dequeue are found (don't + * move hw_dequeue back if it went forward due to a HW bug). Cycle state + * is loaded from a known good TRB, track later toggles to maintain it. */ do { - if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) + if (!hw_dequeue_found && xhci_trb_virt_to_dma(new_seg, new_deq) == (dma_addr_t)(hw_dequeue & ~0xf)) { - cycle_found = true; + hw_dequeue_found = true; if (td_last_trb_found) break; } if (new_deq == td->end_trb) td_last_trb_found = true; - if (cycle_found && trb_is_link(new_deq) && + if (td_last_trb_found && trb_is_link(new_deq) && link_trb_toggles_cycle(new_deq)) new_cycle ^= 0x1; @@ -745,7 +744,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, return -EINVAL; } - } while (!cycle_found || !td_last_trb_found); + } while (!hw_dequeue_found || !td_last_trb_found); /* Don't update the ring cycle state for the producer (us). */ addr = xhci_trb_virt_to_dma(new_seg, new_deq); @@ -1899,6 +1898,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_NEC_GET_FW: xhci_handle_cmd_nec_get_fw(xhci, event); break; + case TRB_GET_BW: + break; default: /* Skip over unknown commands on the event ring */ xhci_info(xhci, "INFO unknown command type %d\n", cmd_type); @@ -2978,9 +2979,6 @@ debug_finding_td: (unsigned long long)xhci_trb_virt_to_dma(td->start_seg, td->start_trb), (unsigned long long)xhci_trb_virt_to_dma(td->end_seg, td->end_trb)); - xhci_for_each_ring_seg(ep_ring->first_seg, ep_seg) - xhci_warn(xhci, "Ring seg %u dma %pad\n", ep_seg->num, &ep_seg->dma); - return -ESHUTDOWN; err_out: @@ -3051,9 +3049,9 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter * - When all events have finished * - To avoid "Event Ring Full Error" condition */ -static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, - struct xhci_interrupter *ir, - bool clear_ehb) +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb) { u64 temp_64; dma_addr_t deq; @@ -3084,11 +3082,14 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) { if (!ir->ip_autoclear) { - u32 irq_pending; + u32 iman; + + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IP; + writel(iman, &ir->ir_set->iman); - irq_pending = readl(&ir->ir_set->irq_pending); - irq_pending |= IMAN_IP; - writel(irq_pending, &ir->ir_set->irq_pending); + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); } } @@ -3096,10 +3097,11 @@ static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) * Handle all OS-owned events on an interrupter event ring. It may drop * and reaquire xhci->lock between event processing. */ -static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + bool skip_events) { int event_loop = 0; - int err; + int err = 0; u64 temp; xhci_clear_interrupt_pending(ir); @@ -3122,7 +3124,8 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir /* Process all OS owned event TRBs on this event ring */ while (unhandled_event_trb(ir->event_ring)) { - err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); + if (!skip_events) + err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue); /* * If half a segment of events have been handled in one go then @@ -3150,6 +3153,37 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir } /* + * Move the event ring dequeue pointer to skip events kept in the secondary + * event ring. This is used to ensure that pending events in the ring are + * acknowledged, so the xHCI HCD can properly enter suspend/resume. The + * secondary ring is typically maintained by an external component. + */ +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, struct xhci_interrupter *ir) +{ + union xhci_trb *current_trb; + u64 erdp_reg; + dma_addr_t deq; + + /* disable irq, ack pending interrupt and ack all pending events */ + xhci_disable_interrupter(xhci, ir); + + /* last acked event trb is in erdp reg */ + erdp_reg = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + deq = (dma_addr_t)(erdp_reg & ERST_PTR_MASK); + if (!deq) { + xhci_err(xhci, "event ring handling not required\n"); + return; + } + + current_trb = ir->event_ring->dequeue; + /* read cycle state of the last acked trb to find out CCS */ + ring->cycle_state = le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE; + + xhci_handle_events(xhci, ir, true); +} + +/* * xHCI spec says we can get an interrupt, and if the HC has an error condition, * we might get bad data out of the event ring. Section 4.10.2.7 has a list of * indicators of an event TRB error, but we check the status *first* to be safe. @@ -3193,7 +3227,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) writel(status, &xhci->op_regs->status); /* This is the handler of the primary interrupter */ - xhci_handle_events(xhci, xhci->interrupters[0]); + xhci_handle_events(xhci, xhci->interrupters[0], false); out: spin_unlock(&xhci->lock); @@ -4415,6 +4449,17 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, command_must_succeed); } +/* Queue a get root hub port bandwidth command TRB */ +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed) +{ + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), + upper_32_bits(in_ctx_ptr), 0, + TRB_TYPE(TRB_GET_BW) | DEV_SPEED_FOR_TRB(dev_speed), + command_must_succeed); +} + /* Queue an evaluate context command TRB */ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) diff --git a/drivers/usb/host/xhci-sideband.c b/drivers/usb/host/xhci-sideband.c new file mode 100644 index 000000000000..d49f9886dd84 --- /dev/null +++ b/drivers/usb/host/xhci-sideband.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * xHCI host controller sideband support + * + * Copyright (c) 2023-2025, Intel Corporation. + * + * Author: Mathias Nyman + */ + +#include <linux/usb/xhci-sideband.h> +#include <linux/dma-direct.h> + +#include "xhci.h" + +/* sideband internal helpers */ +static struct sg_table * +xhci_ring_to_sgtable(struct xhci_sideband *sb, struct xhci_ring *ring) +{ + struct xhci_segment *seg; + struct sg_table *sgt; + unsigned int n_pages; + struct page **pages; + struct device *dev; + size_t sz; + int i; + + dev = xhci_to_hcd(sb->xhci)->self.sysdev; + sz = ring->num_segs * TRB_SEGMENT_SIZE; + n_pages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + pages = kvmalloc_array(n_pages, sizeof(struct page *), GFP_KERNEL); + if (!pages) + return NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + kvfree(pages); + return NULL; + } + + seg = ring->first_seg; + if (!seg) + goto err; + /* + * Rings can potentially have multiple segments, create an array that + * carries page references to allocated segments. Utilize the + * sg_alloc_table_from_pages() to create the sg table, and to ensure + * that page links are created. + */ + for (i = 0; i < ring->num_segs; i++) { + dma_get_sgtable(dev, sgt, seg->trbs, seg->dma, + TRB_SEGMENT_SIZE); + pages[i] = sg_page(sgt->sgl); + sg_free_table(sgt); + seg = seg->next; + } + + if (sg_alloc_table_from_pages(sgt, pages, n_pages, 0, sz, GFP_KERNEL)) + goto err; + + /* + * Save first segment dma address to sg dma_address field for the sideband + * client to have access to the IOVA of the ring. + */ + sg_dma_address(sgt->sgl) = ring->first_seg->dma; + + return sgt; + +err: + kvfree(pages); + kfree(sgt); + + return NULL; +} + +static void +__xhci_sideband_remove_endpoint(struct xhci_sideband *sb, struct xhci_virt_ep *ep) +{ + /* + * Issue a stop endpoint command when an endpoint is removed. + * The stop ep cmd handler will handle the ring cleanup. + */ + xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); + + ep->sideband = NULL; + sb->eps[ep->ep_index] = NULL; +} + +/* sideband api functions */ + +/** + * xhci_sideband_notify_ep_ring_free - notify client of xfer ring free + * @sb: sideband instance for this usb device + * @ep_index: usb endpoint index + * + * Notifies the xHCI sideband client driver of a xHCI transfer ring free + * routine. This will allow for the client to ensure that all transfers + * are completed. + * + * The callback should be synchronous, as the ring free happens after. + */ +void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb, + unsigned int ep_index) +{ + struct xhci_sideband_event evt; + + evt.type = XHCI_SIDEBAND_XFER_RING_FREE; + evt.evt_data = &ep_index; + + if (sb->notify_client) + sb->notify_client(sb->intf, &evt); +} +EXPORT_SYMBOL_GPL(xhci_sideband_notify_ep_ring_free); + +/** + * xhci_sideband_add_endpoint - add endpoint to sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Adds an endpoint to the list of sideband accessed endpoints for this usb + * device. + * After an endpoint is added the sideband client can get the endpoint transfer + * ring buffer by calling xhci_sideband_endpoint_buffer() + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_add_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = &sb->vdev->eps[ep_index]; + + if (ep->ep_state & EP_HAS_STREAMS) { + mutex_unlock(&sb->mutex); + return -EINVAL; + } + + /* + * Note, we don't know the DMA mask of the audio DSP device, if its + * smaller than for xhci it won't be able to access the endpoint ring + * buffer. This could be solved by not allowing the audio class driver + * to add the endpoint the normal way, but instead offload it immediately, + * and let this function add the endpoint and allocate the ring buffer + * with the smallest common DMA mask + */ + if (sb->eps[ep_index] || ep->sideband) { + mutex_unlock(&sb->mutex); + return -EBUSY; + } + + ep->sideband = sb; + sb->eps[ep_index] = ep; + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_add_endpoint); + +/** + * xhci_sideband_remove_endpoint - remove endpoint from sideband access list + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Removes an endpoint from the list of sideband accessed endpoints for this usb + * device. + * sideband client should no longer touch the endpoint transfer buffer after + * calling this. + * + * Return: 0 on success, negative error otherwise. + */ +int +xhci_sideband_remove_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + mutex_lock(&sb->mutex); + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) { + mutex_unlock(&sb->mutex); + return -ENODEV; + } + + __xhci_sideband_remove_endpoint(sb, ep); + xhci_initialize_ring_info(ep->ring); + mutex_unlock(&sb->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_endpoint); + +int +xhci_sideband_stop_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->sideband || ep->sideband != sb) + return -EINVAL; + + return xhci_stop_endpoint_sync(sb->xhci, ep, 0, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(xhci_sideband_stop_endpoint); + +/** + * xhci_sideband_get_endpoint_buffer - gets the endpoint transfer buffer address + * @sb: sideband instance for this usb device + * @host_ep: usb host endpoint + * + * Returns the address of the endpoint buffer where xHC controller reads queued + * transfer TRBs from. This is the starting address of the ringbuffer where the + * sideband client should write TRBs to. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep) +{ + struct xhci_virt_ep *ep; + unsigned int ep_index; + + ep_index = xhci_get_endpoint_index(&host_ep->desc); + ep = sb->eps[ep_index]; + + if (!ep || !ep->ring || !ep->sideband || ep->sideband != sb) + return NULL; + + return xhci_ring_to_sgtable(sb, ep->ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_endpoint_buffer); + +/** + * xhci_sideband_get_event_buffer - return the event buffer for this device + * @sb: sideband instance for this usb device + * + * If a secondary xhci interupter is set up for this usb device then this + * function returns the address of the event buffer where xHC writes + * the transfer completion events. + * + * Caller needs to free the returned sg_table + * + * Return: struct sg_table * if successful. NULL otherwise. + */ +struct sg_table * +xhci_sideband_get_event_buffer(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return NULL; + + return xhci_ring_to_sgtable(sb, sb->ir->event_ring); +} +EXPORT_SYMBOL_GPL(xhci_sideband_get_event_buffer); + +/** + * xhci_sideband_create_interrupter - creates a new interrupter for this sideband + * @sb: sideband instance for this usb device + * @num_seg: number of event ring segments to allocate + * @ip_autoclear: IP autoclearing support such as MSI implemented + * + * Sets up a xhci interrupter that can be used for this sideband accessed usb + * device. Transfer events for this device can be routed to this interrupters + * event ring by setting the 'Interrupter Target' field correctly when queueing + * the transfer TRBs. + * Once this interrupter is created the interrupter target ID can be obtained + * by calling xhci_sideband_interrupter_id() + * + * Returns 0 on success, negative error otherwise + */ +int +xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg, + bool ip_autoclear, u32 imod_interval, int intr_num) +{ + int ret = 0; + + if (!sb || !sb->xhci) + return -ENODEV; + + mutex_lock(&sb->mutex); + if (sb->ir) { + ret = -EBUSY; + goto out; + } + + sb->ir = xhci_create_secondary_interrupter(xhci_to_hcd(sb->xhci), + num_seg, imod_interval, + intr_num); + if (!sb->ir) { + ret = -ENOMEM; + goto out; + } + + sb->ir->ip_autoclear = ip_autoclear; + +out: + mutex_unlock(&sb->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xhci_sideband_create_interrupter); + +/** + * xhci_sideband_remove_interrupter - remove the interrupter from a sideband + * @sb: sideband instance for this usb device + * + * Removes a registered interrupt for a sideband. This would allow for other + * sideband users to utilize this interrupter. + */ +void +xhci_sideband_remove_interrupter(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return; + + mutex_lock(&sb->mutex); + xhci_remove_secondary_interrupter(xhci_to_hcd(sb->xhci), sb->ir); + + sb->ir = NULL; + mutex_unlock(&sb->mutex); +} +EXPORT_SYMBOL_GPL(xhci_sideband_remove_interrupter); + +/** + * xhci_sideband_interrupter_id - return the interrupter target id + * @sb: sideband instance for this usb device + * + * If a secondary xhci interrupter is set up for this usb device then this + * function returns the ID used by the interrupter. The sideband client + * needs to write this ID to the 'Interrupter Target' field of the transfer TRBs + * it queues on the endpoints transfer ring to ensure transfer completion event + * are written by xHC to the correct interrupter event ring. + * + * Returns interrupter id on success, negative error othgerwise + */ +int +xhci_sideband_interrupter_id(struct xhci_sideband *sb) +{ + if (!sb || !sb->ir) + return -ENODEV; + + return sb->ir->intr_num; +} +EXPORT_SYMBOL_GPL(xhci_sideband_interrupter_id); + +/** + * xhci_sideband_register - register a sideband for a usb device + * @intf: usb interface associated with the sideband device + * + * Allows for clients to utilize XHCI interrupters and fetch transfer and event + * ring parameters for executing data transfers. + * + * Return: pointer to a new xhci_sideband instance if successful. NULL otherwise. + */ +struct xhci_sideband * +xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type, + int (*notify_client)(struct usb_interface *intf, + struct xhci_sideband_event *evt)) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *vdev; + struct xhci_sideband *sb; + + /* + * Make sure the usb device is connected to a xhci controller. Fail + * registration if the type is anything other than XHCI_SIDEBAND_VENDOR, + * as this is the only type that is currently supported by xhci-sideband. + */ + if (!udev->slot_id || type != XHCI_SIDEBAND_VENDOR) + return NULL; + + sb = kzalloc_node(sizeof(*sb), GFP_KERNEL, dev_to_node(hcd->self.sysdev)); + if (!sb) + return NULL; + + mutex_init(&sb->mutex); + + /* check this device isn't already controlled via sideband */ + spin_lock_irq(&xhci->lock); + + vdev = xhci->devs[udev->slot_id]; + + if (!vdev || vdev->sideband) { + xhci_warn(xhci, "XHCI sideband for slot %d already in use\n", + udev->slot_id); + spin_unlock_irq(&xhci->lock); + kfree(sb); + return NULL; + } + + sb->xhci = xhci; + sb->vdev = vdev; + sb->intf = intf; + sb->type = type; + sb->notify_client = notify_client; + vdev->sideband = sb; + + spin_unlock_irq(&xhci->lock); + + return sb; +} +EXPORT_SYMBOL_GPL(xhci_sideband_register); + +/** + * xhci_sideband_unregister - unregister sideband access to a usb device + * @sb: sideband instance to be unregistered + * + * Unregisters sideband access to a usb device and frees the sideband + * instance. + * After this the endpoint and interrupter event buffers should no longer + * be accessed via sideband. The xhci driver can now take over handling + * the buffers. + */ +void +xhci_sideband_unregister(struct xhci_sideband *sb) +{ + struct xhci_hcd *xhci; + int i; + + if (!sb) + return; + + xhci = sb->xhci; + + mutex_lock(&sb->mutex); + for (i = 0; i < EP_CTX_PER_DEV; i++) + if (sb->eps[i]) + __xhci_sideband_remove_endpoint(sb, sb->eps[i]); + mutex_unlock(&sb->mutex); + + xhci_sideband_remove_interrupter(sb); + + spin_lock_irq(&xhci->lock); + sb->xhci = NULL; + sb->vdev->sideband = NULL; + spin_unlock_irq(&xhci->lock); + + kfree(sb); +} +EXPORT_SYMBOL_GPL(xhci_sideband_unregister); +MODULE_DESCRIPTION("xHCI sideband driver for secondary interrupter management"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index b5c362c2051d..0c7af44d4dae 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1364,6 +1364,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, tegra->otg_usb2_port); + pm_runtime_get_sync(tegra->dev); if (tegra->host_mode) { /* switch to host mode */ if (tegra->otg_usb3_port >= 0) { @@ -1393,6 +1394,7 @@ static void tegra_xhci_id_work(struct work_struct *work) } tegra_xhci_set_port_power(tegra, true, true); + pm_runtime_mark_last_busy(tegra->dev); } else { if (tegra->otg_usb3_port >= 0) @@ -1400,6 +1402,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra_xhci_set_port_power(tegra, true, false); } + pm_runtime_put_autosuspend(tegra->dev); } #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 90eb491267b5..803047bec3e7 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -20,6 +20,7 @@ #include <linux/string_choices.h> #include <linux/dmi.h> #include <linux/dma-mapping.h> +#include <linux/usb/xhci-sideband.h> #include "xhci.h" #include "xhci-trace.h" @@ -329,21 +330,29 @@ int xhci_enable_interrupter(struct xhci_interrupter *ir) if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman |= IMAN_IE; + writel(iman, &ir->ir_set->iman); + /* Read operation to guarantee the write has been flushed from posted buffers */ + readl(&ir->ir_set->iman); return 0; } -int xhci_disable_interrupter(struct xhci_interrupter *ir) +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { u32 iman; if (!ir || !ir->ir_set) return -EINVAL; - iman = readl(&ir->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending); + iman = readl(&ir->ir_set->iman); + iman &= ~IMAN_IE; + writel(iman, &ir->ir_set->iman); + + iman = readl(&ir->ir_set->iman); + if (iman & IMAN_IP) + xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); return 0; } @@ -354,13 +363,16 @@ int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, { u32 imod; - if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250) + if (!ir || !ir->ir_set) return -EINVAL; - imod = readl(&ir->ir_set->irq_control); - imod &= ~ER_IRQ_INTERVAL_MASK; - imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(imod, &ir->ir_set->irq_control); + /* IMODI value in IMOD register is in 250ns increments */ + imod_interval = umin(imod_interval / 250, IMODI_MASK); + + imod = readl(&ir->ir_set->imod); + imod &= ~IMODI_MASK; + imod |= imod_interval; + writel(imod, &ir->ir_set->imod); return 0; } @@ -460,6 +472,82 @@ static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); } +static void xhci_hcd_page_size(struct xhci_hcd *xhci) +{ + u32 page_size; + + page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; + if (!is_power_of_2(page_size)) { + xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); + /* Fallback to 4K page size, since that's common */ + page_size = 1; + } + + xhci->page_size = page_size << 12; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", + xhci->page_size >> 10); +} + +static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) +{ + u32 config_reg; + u32 max_slots; + + max_slots = HCS_MAX_SLOTS(xhci->hcs_params1); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", + max_slots); + + config_reg = readl(&xhci->op_regs->config_reg); + config_reg &= ~HCS_SLOTS_MASK; + config_reg |= max_slots; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", + config_reg); + writel(config_reg, &xhci->op_regs->config_reg); +} + +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + dma_addr_t deq_dma; + u64 crcr; + + deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); + deq_dma &= CMD_RING_PTR_MASK; + + crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + crcr &= ~CMD_RING_PTR_MASK; + crcr |= deq_dma; + + crcr &= ~CMD_RING_CYCLE; + crcr |= xhci->cmd_ring->cycle_state; + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); + xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); +} + +static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) +{ + u32 offset; + + offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; + xhci->dba = (void __iomem *)xhci->cap_regs + offset; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Doorbell array is located at offset 0x%x from cap regs base addr", offset); +} + +/* + * Enable USB 3.0 device notifications for function remote wake, which is necessary + * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). + */ +static void xhci_set_dev_notifications(struct xhci_hcd *xhci) +{ + u32 dev_notf; + + dev_notf = readl(&xhci->op_regs->dev_notification); + dev_notf &= ~DEV_NOTE_MASK; + dev_notf |= DEV_NOTE_FWAKE; + writel(dev_notf, &xhci->op_regs->dev_notification); +} /* * Initialize memory for HCD and xHC (one-time init). @@ -473,11 +561,37 @@ static int xhci_init(struct usb_hcd *hcd) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int retval; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); spin_lock_init(&xhci->lock); + INIT_LIST_HEAD(&xhci->cmd_list); + INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); + init_completion(&xhci->cmd_ring_stop_completion); + xhci_hcd_page_size(xhci); + memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); + retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); + if (retval) + return retval; + + /* Set the Number of Device Slots Enabled to the maximum supported value */ + xhci_enable_max_dev_slots(xhci); + + /* Set the address in the Command Ring Control register */ + xhci_set_cmd_ring_deq(xhci); + + /* Set Device Context Base Address Array pointer */ + xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); + + /* Set Doorbell array pointer */ + xhci_set_doorbell_ptr(xhci); + + /* Set USB 3.0 device notifications for function remote wake */ + xhci_set_dev_notifications(xhci); + + /* Initialize the Primary interrupter */ + xhci_add_interrupter(xhci, 0); + xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; /* Initializing Compliance Mode Recovery Data If Needed */ if (xhci_compliance_mode_recovery_timer_quirk_check()) { @@ -485,7 +599,8 @@ static int xhci_init(struct usb_hcd *hcd) compliance_mode_recovery_timer_init(xhci); } - return retval; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); + return 0; } /*-------------------------------------------------------------------------*/ @@ -640,7 +755,7 @@ void xhci_stop(struct usb_hcd *hcd) "// Disabling event ring interrupts"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(ir); + xhci_disable_interrupter(xhci, ir); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); @@ -719,8 +834,8 @@ static void xhci_save_registers(struct xhci_hcd *xhci) ir->s3_erst_size = readl(&ir->ir_set->erst_size); ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); - ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); - ir->s3_irq_control = readl(&ir->ir_set->irq_control); + ir->s3_iman = readl(&ir->ir_set->iman); + ir->s3_imod = readl(&ir->ir_set->imod); } } @@ -743,28 +858,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) writel(ir->s3_erst_size, &ir->ir_set->erst_size); xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); - writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); - writel(ir->s3_irq_control, &ir->ir_set->irq_control); + writel(ir->s3_iman, &ir->ir_set->iman); + writel(ir->s3_imod, &ir->ir_set->imod); } } -static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) -{ - u64 val_64; - - /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%llx", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); -} - /* * The whole command ring must be cleared to zero when we suspend the host. * @@ -1092,7 +1190,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - xhci_disable_interrupter(xhci->interrupters[0]); + xhci_disable_interrupter(xhci, xhci->interrupters[0]); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -1359,6 +1457,7 @@ static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and * HCDs. Find the index for an endpoint given its descriptor. Use the return * value to right shift 1 for the bitmask. + * @desc: USB endpoint descriptor to determine index for * * Index = (epnum * 2) + direction - 1, * where direction = 0 for OUT, 1 for IN. @@ -3089,6 +3188,42 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) } EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); +/* Get the available bandwidth of the ports under the xhci roothub */ +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed) +{ + struct xhci_command *cmd; + unsigned long flags; + int ret; + + if (!ctx || !xhci) + return -EINVAL; + + cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->in_ctx = ctx; + + /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ + spin_lock_irqsave(&xhci->lock, flags); + + ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); + if (ret) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto err_out; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + wait_for_completion(cmd->completion); +err_out: + kfree(cmd->completion); + kfree(cmd); + + return ret; +} + static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, struct xhci_container_ctx *in_ctx, struct xhci_container_ctx *out_ctx, @@ -3911,6 +4046,8 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, } if (ep->ring) { + if (ep->sideband) + xhci_sideband_notify_ep_ring_free(ep->sideband, i); xhci_debugfs_remove_endpoint(xhci, virt_dev, i); xhci_free_endpoint_ring(xhci, virt_dev, i); } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 242ab9fbc8ae..49887a303e43 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -152,10 +152,6 @@ struct xhci_op_regs { #define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) #define XHCI_RESET_SHORT_USEC (250 * 1000) -/* IMAN - Interrupt Management Register */ -#define IMAN_IE (1 << 1) -#define IMAN_IP (1 << 0) - /* USBSTS - USB status - status bitmasks */ /* HC not running - set to 1 when run/stop bit is cleared. */ #define STS_HALT XHCI_STS_HALT @@ -184,23 +180,22 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ -#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1) +#define DEV_NOTE_FWAKE (1 << 1) /* CRCR - Command Ring Control Register - cmd_ring bitmasks */ -/* bit 0 is the command ring cycle state */ +/* bit 0 - Cycle bit indicates the ownership of the command ring */ +#define CMD_RING_CYCLE (1 << 0) /* stop ring operation after completion of the currently executing command */ #define CMD_RING_PAUSE (1 << 1) /* stop ring immediately - abort the currently executing command */ #define CMD_RING_ABORT (1 << 2) /* true: command ring is running */ #define CMD_RING_RUNNING (1 << 3) -/* bits 4:5 reserved and should be preserved */ -/* Command Ring pointer - bit mask for the lower 32 bits. */ -#define CMD_RING_RSVD_BITS (0x3f) +/* bits 63:6 - Command Ring pointer */ +#define CMD_RING_PTR_MASK GENMASK_ULL(63, 6) /* CONFIG - Configure Register - config_reg bitmasks */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ @@ -215,14 +210,13 @@ struct xhci_op_regs { #define XHCI_PAGE_SIZE_MASK 0xffff /** - * struct xhci_intr_reg - Interrupt Register Set - * @irq_pending: IMAN - Interrupt Management Register. Used to enable + * struct xhci_intr_reg - Interrupt Register Set, v1.2 section 5.5.2. + * @iman: IMAN - Interrupt Management Register. Used to enable * interrupts and check for pending interrupts. - * @irq_control: IMOD - Interrupt Moderation Register. - * Used to throttle interrupts. - * @erst_size: Number of segments in the Event Ring Segment Table (ERST). - * @erst_base: ERST base address. - * @erst_dequeue: Event ring dequeue pointer. + * @imod: IMOD - Interrupt Moderation Register. Used to throttle interrupts. + * @erst_size: ERSTSZ - Number of segments in the Event Ring Segment Table (ERST). + * @erst_base: ERSTBA - Event ring segment table base address. + * @erst_dequeue: ERDP - Event ring dequeue pointer. * * Each interrupter (defined by a MSI-X vector) has an event ring and an Event * Ring Segment Table (ERST) associated with it. The event ring is comprised of @@ -232,48 +226,51 @@ struct xhci_op_regs { * updates the dequeue pointer. */ struct xhci_intr_reg { - __le32 irq_pending; - __le32 irq_control; + __le32 iman; + __le32 imod; __le32 erst_size; __le32 rsvd; __le64 erst_base; __le64 erst_dequeue; }; -/* irq_pending bitmasks */ -#define ER_IRQ_PENDING(p) ((p) & 0x1) -/* bits 2:31 need to be preserved */ -/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */ -#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe) -#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2) -#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2)) - -/* irq_control bitmasks */ -/* Minimum interval between interrupts (in 250ns intervals). The interval - * between interrupts will be longer if there are no events on the event ring. - * Default is 4000 (1 ms). +/* iman bitmasks */ +/* bit 0 - Interrupt Pending (IP), whether there is an interrupt pending. Write-1-to-clear. */ +#define IMAN_IP (1 << 0) +/* bit 1 - Interrupt Enable (IE), whether the interrupter is capable of generating an interrupt */ +#define IMAN_IE (1 << 1) + +/* imod bitmasks */ +/* + * bits 15:0 - Interrupt Moderation Interval, the minimum interval between interrupts + * (in 250ns intervals). The interval between interrupts will be longer if there are no + * events on the event ring. Default is 4000 (1 ms). */ -#define ER_IRQ_INTERVAL_MASK (0xffff) -/* Counter used to count down the time to the next interrupt - HW use only */ -#define ER_IRQ_COUNTER_MASK (0xffff << 16) +#define IMODI_MASK (0xffff) +/* bits 31:16 - Interrupt Moderation Counter, used to count down the time to the next interrupt */ +#define IMODC_MASK (0xffff << 16) /* erst_size bitmasks */ -/* Preserve bits 16:31 of erst_size */ -#define ERST_SIZE_MASK (0xffff << 16) +/* bits 15:0 - Event Ring Segment Table Size, number of ERST entries */ +#define ERST_SIZE_MASK (0xffff) /* erst_base bitmasks */ -#define ERST_BASE_RSVDP (GENMASK_ULL(5, 0)) +/* bits 63:6 - Event Ring Segment Table Base Address Register */ +#define ERST_BASE_ADDRESS_MASK GENMASK_ULL(63, 6) /* erst_dequeue bitmasks */ -/* Dequeue ERST Segment Index (DESI) - Segment number (or alias) - * where the current dequeue pointer lies. This is an optional HW hint. +/* + * bits 2:0 - Dequeue ERST Segment Index (DESI), is the segment number (or alias) where the + * current dequeue pointer lies. This is an optional HW hint. */ #define ERST_DESI_MASK (0x7) -/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by +/* + * bit 3 - Event Handler Busy (EHB), whether the event ring is scheduled to be serviced by * a work queue (or delayed service routine)? */ #define ERST_EHB (1 << 3) -#define ERST_PTR_MASK (GENMASK_ULL(63, 4)) +/* bits 63:4 - Event Ring Dequeue Pointer */ +#define ERST_PTR_MASK GENMASK_ULL(63, 4) /** * struct xhci_run_regs @@ -589,6 +586,7 @@ struct xhci_stream_info { #define SMALL_STREAM_ARRAY_SIZE 256 #define MEDIUM_STREAM_ARRAY_SIZE 1024 +#define GET_PORT_BW_ARRAY_SIZE 256 /* Some Intel xHCI host controllers need software to keep track of the bus * bandwidth. Keep track of endpoint info here. Each root port is allocated @@ -700,6 +698,8 @@ struct xhci_virt_ep { int next_frame_id; /* Use new Isoch TRB layout needed for extended TBC support */ bool use_extended_tbc; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; enum xhci_overhead_type { @@ -762,6 +762,8 @@ struct xhci_virt_device { u16 current_mel; /* Used for the debugfs interfaces. */ void *debugfs_private; + /* set if this endpoint is controlled via sideband access*/ + struct xhci_sideband *sideband; }; /* @@ -1002,6 +1004,9 @@ enum xhci_setup_dev { /* bits 16:23 are the virtual function ID */ /* bits 24:31 are the slot ID */ +/* bits 19:16 are the dev speed */ +#define DEV_SPEED_FOR_TRB(p) ((p) << 16) + /* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ #define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23) #define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23) @@ -1447,8 +1452,8 @@ struct xhci_interrupter { bool ip_autoclear; u32 isoc_bei_interval; /* For interrupter registers save and restore over suspend/resume */ - u32 s3_irq_pending; - u32 s3_irq_control; + u32 s3_iman; + u32 s3_imod; u32 s3_erst_size; u64 s3_erst_base; u64 s3_erst_dequeue; @@ -1554,6 +1559,7 @@ struct xhci_hcd { struct dma_pool *device_pool; struct dma_pool *segment_pool; struct dma_pool *small_streams_pool; + struct dma_pool *port_bw_pool; struct dma_pool *medium_streams_pool; /* Host controller watchdog timer structures */ @@ -1846,11 +1852,18 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags); void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_container_ctx *xhci_alloc_port_bw_ctx(struct xhci_hcd *xhci, + gfp_t flags); +void xhci_free_port_bw_ctx(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); struct xhci_interrupter * xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs, - u32 imod_interval); + u32 imod_interval, unsigned int intr_num); void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir); +void xhci_skip_sec_intr_events(struct xhci_hcd *xhci, + struct xhci_ring *ring, + struct xhci_interrupter *ir); /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); @@ -1891,7 +1904,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, u32 imod_interval); int xhci_enable_interrupter(struct xhci_interrupter *ir); -int xhci_disable_interrupter(struct xhci_interrupter *ir); +int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); @@ -1916,6 +1929,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); +int xhci_queue_get_port_bw(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, + u8 dev_speed, bool command_must_succeed); +int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, + u8 dev_speed); int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed); int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, @@ -1936,6 +1954,10 @@ unsigned int count_trbs(u64 addr, u64 len); int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, gfp_t gfp_flags); void xhci_process_cancelled_tds(struct xhci_virt_ep *ep); +void xhci_update_erst_dequeue(struct xhci_hcd *xhci, + struct xhci_interrupter *ir, + bool clear_ehb); +void xhci_add_interrupter(struct xhci_hcd *xhci, unsigned int intr_num); /* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c index 75ac3c6aa92d..1048e3912068 100644 --- a/drivers/usb/misc/onboard_usb_dev.c +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -36,9 +36,10 @@ #define USB5744_CMD_CREG_ACCESS 0x99 #define USB5744_CMD_CREG_ACCESS_LSB 0x37 #define USB5744_CREG_MEM_ADDR 0x00 +#define USB5744_CREG_MEM_RD_ADDR 0x04 #define USB5744_CREG_WRITE 0x00 -#define USB5744_CREG_RUNTIMEFLAGS2 0x41 -#define USB5744_CREG_RUNTIMEFLAGS2_LSB 0x1D +#define USB5744_CREG_READ 0x01 +#define USB5744_CREG_RUNTIMEFLAGS2 0x411D #define USB5744_CREG_BYPASS_UDC_SUSPEND BIT(3) static void onboard_dev_attach_usb_driver(struct work_struct *work); @@ -309,11 +310,88 @@ static void onboard_dev_attach_usb_driver(struct work_struct *work) pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err)); } +static int onboard_dev_5744_i2c_read_byte(struct i2c_client *client, u16 addr, u8 *data) +{ + struct i2c_msg msg[2]; + u8 rd_buf[3]; + int ret; + + u8 wr_buf[7] = {0, USB5744_CREG_MEM_ADDR, 4, + USB5744_CREG_READ, 1, + addr >> 8 & 0xff, + addr & 0xff}; + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = sizeof(wr_buf); + msg[0].buf = wr_buf; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + wr_buf[0] = USB5744_CMD_CREG_ACCESS; + wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB; + wr_buf[2] = 0; + msg[0].len = 3; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + wr_buf[0] = 0; + wr_buf[1] = USB5744_CREG_MEM_RD_ADDR; + msg[0].len = 2; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = 2; + msg[1].buf = rd_buf; + + ret = i2c_transfer(client->adapter, msg, 2); + if (ret < 0) + return ret; + *data = rd_buf[1]; + + return 0; +} + +static int onboard_dev_5744_i2c_write_byte(struct i2c_client *client, u16 addr, u8 data) +{ + struct i2c_msg msg[2]; + int ret; + + u8 wr_buf[8] = {0, USB5744_CREG_MEM_ADDR, 5, + USB5744_CREG_WRITE, 1, + addr >> 8 & 0xff, + addr & 0xff, + data}; + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = sizeof(wr_buf); + msg[0].buf = wr_buf; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + msg[0].len = 3; + wr_buf[0] = USB5744_CMD_CREG_ACCESS; + wr_buf[1] = USB5744_CMD_CREG_ACCESS_LSB; + wr_buf[2] = 0; + + ret = i2c_transfer(client->adapter, msg, 1); + if (ret < 0) + return ret; + + return 0; +} + static int onboard_dev_5744_i2c_init(struct i2c_client *client) { #if IS_ENABLED(CONFIG_USB_ONBOARD_DEV_USB5744) struct device *dev = &client->dev; int ret; + u8 reg; /* * Set BYPASS_UDC_SUSPEND bit to ensure MCU is always enabled @@ -321,20 +399,16 @@ static int onboard_dev_5744_i2c_init(struct i2c_client *client) * The command writes 5 bytes to memory and single data byte in * configuration register. */ - char wr_buf[7] = {USB5744_CREG_MEM_ADDR, 5, - USB5744_CREG_WRITE, 1, - USB5744_CREG_RUNTIMEFLAGS2, - USB5744_CREG_RUNTIMEFLAGS2_LSB, - USB5744_CREG_BYPASS_UDC_SUSPEND}; - - ret = i2c_smbus_write_block_data(client, 0, sizeof(wr_buf), wr_buf); + ret = onboard_dev_5744_i2c_read_byte(client, + USB5744_CREG_RUNTIMEFLAGS2, ®); if (ret) - return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n"); + return dev_err_probe(dev, ret, "CREG_RUNTIMEFLAGS2 read failed\n"); - ret = i2c_smbus_write_word_data(client, USB5744_CMD_CREG_ACCESS, - USB5744_CMD_CREG_ACCESS_LSB); + reg |= USB5744_CREG_BYPASS_UDC_SUSPEND; + ret = onboard_dev_5744_i2c_write_byte(client, + USB5744_CREG_RUNTIMEFLAGS2, reg); if (ret) - return dev_err_probe(dev, ret, "Configuration Register Access Command failed\n"); + return dev_err_probe(dev, ret, "BYPASS_UDC_SUSPEND bit configuration failed\n"); /* Send SMBus command to boot hub. */ ret = i2c_smbus_write_word_data(client, USB5744_CMD_ATTACH, @@ -490,6 +564,7 @@ static struct platform_driver onboard_dev_driver = { #define VENDOR_ID_CYPRESS 0x04b4 #define VENDOR_ID_GENESYS 0x05e3 #define VENDOR_ID_MICROCHIP 0x0424 +#define VENDOR_ID_PARADE 0x1da0 #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_TI 0x0451 #define VENDOR_ID_VIA 0x2109 @@ -569,8 +644,14 @@ static void onboard_dev_usbdev_disconnect(struct usb_device *udev) } static const struct usb_device_id onboard_dev_id_table[] = { - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */ - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6500) }, /* CYUSB330x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6502) }, /* CYUSB330x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6503) }, /* CYUSB33{0,1}x 2.0 HUB, Vendor Mode */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB331x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB331x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6507) }, /* CYUSB332x 2.0 HUB, Vendor Mode */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6508) }, /* CYUSB332x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x650a) }, /* CYUSB332x 2.0 HUB */ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */ @@ -580,14 +661,19 @@ static const struct usb_device_id onboard_dev_id_table[] = { { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 HUB */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 HUB */ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_PARADE, 0x5511) }, /* PS5511 USB 3.2 */ + { USB_DEVICE(VENDOR_ID_PARADE, 0x55a1) }, /* PS5511 USB 2.0 */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 HUB */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 HUB */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 HUB */ { USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 HUB */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0179) }, /* RTL8188ETV 2.4GHz WiFi */ { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 HUB */ { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 HUB */ { USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 HUB */ { USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8440) }, /* TI USB8044 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8442) }, /* TI USB8044 2.0 HUB */ { USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 HUB */ { USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 HUB */ { USB_DEVICE(VENDOR_ID_XMOS, 0x0013) }, /* XMOS XVF3500 Voice Processor */ diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h index 933797a7e084..e017b8e22f93 100644 --- a/drivers/usb/misc/onboard_usb_dev.h +++ b/drivers/usb/misc/onboard_usb_dev.h @@ -38,6 +38,13 @@ static const struct onboard_dev_pdata microchip_usb5744_data = { .is_hub = true, }; +static const struct onboard_dev_pdata parade_ps5511_data = { + .reset_us = 500, + .num_supplies = 2, + .supply_names = { "vddd11", "vdd33"}, + .is_hub = true, +}; + static const struct onboard_dev_pdata realtek_rts5411_data = { .reset_us = 0, .num_supplies = 1, @@ -45,6 +52,13 @@ static const struct onboard_dev_pdata realtek_rts5411_data = { .is_hub = true, }; +static const struct onboard_dev_pdata realtek_rtl8188etv_data = { + .reset_us = 0, + .num_supplies = 1, + .supply_names = { "vdd" }, + .is_hub = false, +}; + static const struct onboard_dev_pdata ti_tusb8020b_data = { .reset_us = 3000, .num_supplies = 1, @@ -111,6 +125,8 @@ static const struct of_device_id onboard_dev_match[] = { { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, }, { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, + { .compatible = "usb451,8440", .data = &ti_tusb8041_data, }, + { .compatible = "usb451,8442", .data = &ti_tusb8041_data, }, { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, }, { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, }, { .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, }, @@ -118,10 +134,13 @@ static const struct of_device_id onboard_dev_match[] = { { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, }, { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, }, { .compatible = "usb5e3,626", .data = &genesys_gl852g_data, }, + { .compatible = "usbbda,179", .data = &realtek_rtl8188etv_data, }, { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, { .compatible = "usbbda,5414", .data = &realtek_rts5411_data, }, + { .compatible = "usb1da0,5511", .data = ¶de_ps5511_data, }, + { .compatible = "usb1da0,55a1", .data = ¶de_ps5511_data, }, { .compatible = "usb2109,817", .data = &vialab_vl817_data, }, { .compatible = "usb2109,2817", .data = &vialab_vl817_data, }, { .compatible = "usb20b1,0013", .data = &xmos_xvf3500_data, }, diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 5f629d7cad64..b7acf3966cd7 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -126,18 +126,6 @@ config USB_ISP1301 To compile this driver as a module, choose M here: the module will be called phy-isp1301. -config USB_MV_OTG - tristate "Marvell USB OTG support" - depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG - depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' - select USB_PHY - help - Say Y here if you want to build Marvell USB OTG transceiver - driver in kernel (including PXA and MMP series). This driver - implements role switch between EHCI host driver and gadget driver. - - To compile this driver as a module, choose M here. - config USB_MXS_PHY tristate "Freescale MXS USB PHY support" depends on ARCH_MXC || ARCH_MXS diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index e5d619b4d8f6..ceae46c5341d 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -18,7 +18,6 @@ obj-$(CONFIG_TWL6030_USB) += phy-twl6030-usb.o obj-$(CONFIG_USB_TEGRA_PHY) += phy-tegra-usb.o obj-$(CONFIG_USB_GPIO_VBUS) += phy-gpio-vbus-usb.o obj-$(CONFIG_USB_ISP1301) += phy-isp1301.o -obj-$(CONFIG_USB_MV_OTG) += phy-mv-usb.o obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-usb.o obj-$(CONFIG_USB_ULPI) += phy-ulpi.o obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c deleted file mode 100644 index 638fba58420c..000000000000 --- a/drivers/usb/phy/phy-mv-usb.c +++ /dev/null @@ -1,881 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Copyright (C) 2011 Marvell International Ltd. All rights reserved. - * Author: Chao Xie <chao.xie@marvell.com> - * Neil Zhang <zhangwm@marvell.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/io.h> -#include <linux/iopoll.h> -#include <linux/uaccess.h> -#include <linux/device.h> -#include <linux/proc_fs.h> -#include <linux/clk.h> -#include <linux/workqueue.h> -#include <linux/platform_device.h> -#include <linux/string_choices.h> - -#include <linux/usb.h> -#include <linux/usb/ch9.h> -#include <linux/usb/otg.h> -#include <linux/usb/gadget.h> -#include <linux/usb/hcd.h> -#include <linux/platform_data/mv_usb.h> - -#include "phy-mv-usb.h" - -#define DRIVER_DESC "Marvell USB OTG transceiver driver" - -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); - -static const char driver_name[] = "mv-otg"; - -static char *state_string[] = { - "undefined", - "b_idle", - "b_srp_init", - "b_peripheral", - "b_wait_acon", - "b_host", - "a_idle", - "a_wait_vrise", - "a_wait_bcon", - "a_host", - "a_suspend", - "a_peripheral", - "a_wait_vfall", - "a_vbus_err" -}; - -static int mv_otg_set_vbus(struct usb_otg *otg, bool on) -{ - struct mv_otg *mvotg = container_of(otg->usb_phy, struct mv_otg, phy); - if (mvotg->pdata->set_vbus == NULL) - return -ENODEV; - - return mvotg->pdata->set_vbus(on); -} - -static int mv_otg_set_host(struct usb_otg *otg, - struct usb_bus *host) -{ - otg->host = host; - - return 0; -} - -static int mv_otg_set_peripheral(struct usb_otg *otg, - struct usb_gadget *gadget) -{ - otg->gadget = gadget; - - return 0; -} - -static void mv_otg_run_state_machine(struct mv_otg *mvotg, - unsigned long delay) -{ - dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n"); - if (!mvotg->qwork) - return; - - queue_delayed_work(mvotg->qwork, &mvotg->work, delay); -} - -static void mv_otg_timer_await_bcon(struct timer_list *t) -{ - struct mv_otg *mvotg = from_timer(mvotg, t, - otg_ctrl.timer[A_WAIT_BCON_TIMER]); - - mvotg->otg_ctrl.a_wait_bcon_timeout = 1; - - dev_info(&mvotg->pdev->dev, "B Device No Response!\n"); - - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 0); - spin_unlock(&mvotg->wq_lock); - } -} - -static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id) -{ - struct timer_list *timer; - - if (id >= OTG_TIMER_NUM) - return -EINVAL; - - timer = &mvotg->otg_ctrl.timer[id]; - - if (timer_pending(timer)) - timer_delete(timer); - - return 0; -} - -static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id, - unsigned long interval) -{ - struct timer_list *timer; - - if (id >= OTG_TIMER_NUM) - return -EINVAL; - - timer = &mvotg->otg_ctrl.timer[id]; - if (timer_pending(timer)) { - dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id); - return -EBUSY; - } - - timer->expires = jiffies + interval; - add_timer(timer); - - return 0; -} - -static int mv_otg_reset(struct mv_otg *mvotg) -{ - u32 tmp; - int ret; - - /* Stop the controller */ - tmp = readl(&mvotg->op_regs->usbcmd); - tmp &= ~USBCMD_RUN_STOP; - writel(tmp, &mvotg->op_regs->usbcmd); - - /* Reset the controller to get default values */ - writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd); - - ret = readl_poll_timeout_atomic(&mvotg->op_regs->usbcmd, tmp, - (tmp & USBCMD_CTRL_RESET), 10, 10000); - if (ret < 0) { - dev_err(&mvotg->pdev->dev, - "Wait for RESET completed TIMEOUT\n"); - return ret; - } - - writel(0x0, &mvotg->op_regs->usbintr); - tmp = readl(&mvotg->op_regs->usbsts); - writel(tmp, &mvotg->op_regs->usbsts); - - return 0; -} - -static void mv_otg_init_irq(struct mv_otg *mvotg) -{ - u32 otgsc; - - mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID - | OTGSC_INTR_A_VBUS_VALID; - mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID - | OTGSC_INTSTS_A_VBUS_VALID; - - if (mvotg->pdata->vbus == NULL) { - mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID - | OTGSC_INTR_B_SESSION_END; - mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID - | OTGSC_INTSTS_B_SESSION_END; - } - - if (mvotg->pdata->id == NULL) { - mvotg->irq_en |= OTGSC_INTR_USB_ID; - mvotg->irq_status |= OTGSC_INTSTS_USB_ID; - } - - otgsc = readl(&mvotg->op_regs->otgsc); - otgsc |= mvotg->irq_en; - writel(otgsc, &mvotg->op_regs->otgsc); -} - -static void mv_otg_start_host(struct mv_otg *mvotg, int on) -{ -#ifdef CONFIG_USB - struct usb_otg *otg = mvotg->phy.otg; - struct usb_hcd *hcd; - - if (!otg->host) - return; - - dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop"); - - hcd = bus_to_hcd(otg->host); - - if (on) { - usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); - device_wakeup_enable(hcd->self.controller); - } else { - usb_remove_hcd(hcd); - } -#endif /* CONFIG_USB */ -} - -static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on) -{ - struct usb_otg *otg = mvotg->phy.otg; - - if (!otg->gadget) - return; - - dev_info(mvotg->phy.dev, "gadget %s\n", str_on_off(on)); - - if (on) - usb_gadget_vbus_connect(otg->gadget); - else - usb_gadget_vbus_disconnect(otg->gadget); -} - -static void otg_clock_enable(struct mv_otg *mvotg) -{ - clk_prepare_enable(mvotg->clk); -} - -static void otg_clock_disable(struct mv_otg *mvotg) -{ - clk_disable_unprepare(mvotg->clk); -} - -static int mv_otg_enable_internal(struct mv_otg *mvotg) -{ - int retval = 0; - - if (mvotg->active) - return 0; - - dev_dbg(&mvotg->pdev->dev, "otg enabled\n"); - - otg_clock_enable(mvotg); - if (mvotg->pdata->phy_init) { - retval = mvotg->pdata->phy_init(mvotg->phy_regs); - if (retval) { - dev_err(&mvotg->pdev->dev, - "init phy error %d\n", retval); - otg_clock_disable(mvotg); - return retval; - } - } - mvotg->active = 1; - - return 0; - -} - -static int mv_otg_enable(struct mv_otg *mvotg) -{ - if (mvotg->clock_gating) - return mv_otg_enable_internal(mvotg); - - return 0; -} - -static void mv_otg_disable_internal(struct mv_otg *mvotg) -{ - if (mvotg->active) { - dev_dbg(&mvotg->pdev->dev, "otg disabled\n"); - if (mvotg->pdata->phy_deinit) - mvotg->pdata->phy_deinit(mvotg->phy_regs); - otg_clock_disable(mvotg); - mvotg->active = 0; - } -} - -static void mv_otg_disable(struct mv_otg *mvotg) -{ - if (mvotg->clock_gating) - mv_otg_disable_internal(mvotg); -} - -static void mv_otg_update_inputs(struct mv_otg *mvotg) -{ - struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; - u32 otgsc; - - otgsc = readl(&mvotg->op_regs->otgsc); - - if (mvotg->pdata->vbus) { - if (mvotg->pdata->vbus->poll() == VBUS_HIGH) { - otg_ctrl->b_sess_vld = 1; - otg_ctrl->b_sess_end = 0; - } else { - otg_ctrl->b_sess_vld = 0; - otg_ctrl->b_sess_end = 1; - } - } else { - otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID); - otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END); - } - - if (mvotg->pdata->id) - otg_ctrl->id = !!mvotg->pdata->id->poll(); - else - otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID); - - if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id) - otg_ctrl->a_bus_req = 1; - - otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID); - otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID); - - dev_dbg(&mvotg->pdev->dev, "%s: ", __func__); - dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id); - dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld); - dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end); - dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld); - dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld); -} - -static void mv_otg_update_state(struct mv_otg *mvotg) -{ - struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; - int old_state = mvotg->phy.otg->state; - - switch (old_state) { - case OTG_STATE_UNDEFINED: - mvotg->phy.otg->state = OTG_STATE_B_IDLE; - fallthrough; - case OTG_STATE_B_IDLE: - if (otg_ctrl->id == 0) - mvotg->phy.otg->state = OTG_STATE_A_IDLE; - else if (otg_ctrl->b_sess_vld) - mvotg->phy.otg->state = OTG_STATE_B_PERIPHERAL; - break; - case OTG_STATE_B_PERIPHERAL: - if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0) - mvotg->phy.otg->state = OTG_STATE_B_IDLE; - break; - case OTG_STATE_A_IDLE: - if (otg_ctrl->id) - mvotg->phy.otg->state = OTG_STATE_B_IDLE; - else if (!(otg_ctrl->a_bus_drop) && - (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det)) - mvotg->phy.otg->state = OTG_STATE_A_WAIT_VRISE; - break; - case OTG_STATE_A_WAIT_VRISE: - if (otg_ctrl->a_vbus_vld) - mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON; - break; - case OTG_STATE_A_WAIT_BCON: - if (otg_ctrl->id || otg_ctrl->a_bus_drop - || otg_ctrl->a_wait_bcon_timeout) { - mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); - mvotg->otg_ctrl.a_wait_bcon_timeout = 0; - mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL; - otg_ctrl->a_bus_req = 0; - } else if (!otg_ctrl->a_vbus_vld) { - mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); - mvotg->otg_ctrl.a_wait_bcon_timeout = 0; - mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR; - } else if (otg_ctrl->b_conn) { - mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER); - mvotg->otg_ctrl.a_wait_bcon_timeout = 0; - mvotg->phy.otg->state = OTG_STATE_A_HOST; - } - break; - case OTG_STATE_A_HOST: - if (otg_ctrl->id || !otg_ctrl->b_conn - || otg_ctrl->a_bus_drop) - mvotg->phy.otg->state = OTG_STATE_A_WAIT_BCON; - else if (!otg_ctrl->a_vbus_vld) - mvotg->phy.otg->state = OTG_STATE_A_VBUS_ERR; - break; - case OTG_STATE_A_WAIT_VFALL: - if (otg_ctrl->id - || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld) - || otg_ctrl->a_bus_req) - mvotg->phy.otg->state = OTG_STATE_A_IDLE; - break; - case OTG_STATE_A_VBUS_ERR: - if (otg_ctrl->id || otg_ctrl->a_clr_err - || otg_ctrl->a_bus_drop) { - otg_ctrl->a_clr_err = 0; - mvotg->phy.otg->state = OTG_STATE_A_WAIT_VFALL; - } - break; - default: - break; - } -} - -static void mv_otg_work(struct work_struct *work) -{ - struct mv_otg *mvotg; - struct usb_otg *otg; - int old_state; - - mvotg = container_of(to_delayed_work(work), struct mv_otg, work); - -run: - /* work queue is single thread, or we need spin_lock to protect */ - otg = mvotg->phy.otg; - old_state = otg->state; - - if (!mvotg->active) - return; - - mv_otg_update_inputs(mvotg); - mv_otg_update_state(mvotg); - - if (old_state != mvotg->phy.otg->state) { - dev_info(&mvotg->pdev->dev, "change from state %s to %s\n", - state_string[old_state], - state_string[mvotg->phy.otg->state]); - - switch (mvotg->phy.otg->state) { - case OTG_STATE_B_IDLE: - otg->default_a = 0; - if (old_state == OTG_STATE_B_PERIPHERAL) - mv_otg_start_periphrals(mvotg, 0); - mv_otg_reset(mvotg); - mv_otg_disable(mvotg); - usb_phy_set_event(&mvotg->phy, USB_EVENT_NONE); - break; - case OTG_STATE_B_PERIPHERAL: - mv_otg_enable(mvotg); - mv_otg_start_periphrals(mvotg, 1); - usb_phy_set_event(&mvotg->phy, USB_EVENT_ENUMERATED); - break; - case OTG_STATE_A_IDLE: - otg->default_a = 1; - mv_otg_enable(mvotg); - if (old_state == OTG_STATE_A_WAIT_VFALL) - mv_otg_start_host(mvotg, 0); - mv_otg_reset(mvotg); - break; - case OTG_STATE_A_WAIT_VRISE: - mv_otg_set_vbus(otg, 1); - break; - case OTG_STATE_A_WAIT_BCON: - if (old_state != OTG_STATE_A_HOST) - mv_otg_start_host(mvotg, 1); - mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER, - T_A_WAIT_BCON); - /* - * Now, we directly enter A_HOST. So set b_conn = 1 - * here. In fact, it need host driver to notify us. - */ - mvotg->otg_ctrl.b_conn = 1; - break; - case OTG_STATE_A_HOST: - break; - case OTG_STATE_A_WAIT_VFALL: - /* - * Now, we has exited A_HOST. So set b_conn = 0 - * here. In fact, it need host driver to notify us. - */ - mvotg->otg_ctrl.b_conn = 0; - mv_otg_set_vbus(otg, 0); - break; - case OTG_STATE_A_VBUS_ERR: - break; - default: - break; - } - goto run; - } -} - -static irqreturn_t mv_otg_irq(int irq, void *dev) -{ - struct mv_otg *mvotg = dev; - u32 otgsc; - - otgsc = readl(&mvotg->op_regs->otgsc); - writel(otgsc, &mvotg->op_regs->otgsc); - - /* - * if we have vbus, then the vbus detection for B-device - * will be done by mv_otg_inputs_irq(). - */ - if (mvotg->pdata->vbus) - if ((otgsc & OTGSC_STS_USB_ID) && - !(otgsc & OTGSC_INTSTS_USB_ID)) - return IRQ_NONE; - - if ((otgsc & mvotg->irq_status) == 0) - return IRQ_NONE; - - mv_otg_run_state_machine(mvotg, 0); - - return IRQ_HANDLED; -} - -static irqreturn_t mv_otg_inputs_irq(int irq, void *dev) -{ - struct mv_otg *mvotg = dev; - - /* The clock may disabled at this time */ - if (!mvotg->active) { - mv_otg_enable(mvotg); - mv_otg_init_irq(mvotg); - } - - mv_otg_run_state_machine(mvotg, 0); - - return IRQ_HANDLED; -} - -static ssize_t -a_bus_req_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct mv_otg *mvotg = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", - mvotg->otg_ctrl.a_bus_req); -} - -static ssize_t -a_bus_req_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct mv_otg *mvotg = dev_get_drvdata(dev); - - if (count > 2) - return -1; - - /* We will use this interface to change to A device */ - if (mvotg->phy.otg->state != OTG_STATE_B_IDLE - && mvotg->phy.otg->state != OTG_STATE_A_IDLE) - return -1; - - /* The clock may disabled and we need to set irq for ID detected */ - mv_otg_enable(mvotg); - mv_otg_init_irq(mvotg); - - if (buf[0] == '1') { - mvotg->otg_ctrl.a_bus_req = 1; - mvotg->otg_ctrl.a_bus_drop = 0; - dev_dbg(&mvotg->pdev->dev, - "User request: a_bus_req = 1\n"); - - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 0); - spin_unlock(&mvotg->wq_lock); - } - } - - return count; -} - -static DEVICE_ATTR_RW(a_bus_req); - -static ssize_t -a_clr_err_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct mv_otg *mvotg = dev_get_drvdata(dev); - if (!mvotg->phy.otg->default_a) - return -1; - - if (count > 2) - return -1; - - if (buf[0] == '1') { - mvotg->otg_ctrl.a_clr_err = 1; - dev_dbg(&mvotg->pdev->dev, - "User request: a_clr_err = 1\n"); - } - - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 0); - spin_unlock(&mvotg->wq_lock); - } - - return count; -} - -static DEVICE_ATTR_WO(a_clr_err); - -static ssize_t -a_bus_drop_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct mv_otg *mvotg = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%d\n", - mvotg->otg_ctrl.a_bus_drop); -} - -static ssize_t -a_bus_drop_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct mv_otg *mvotg = dev_get_drvdata(dev); - if (!mvotg->phy.otg->default_a) - return -1; - - if (count > 2) - return -1; - - if (buf[0] == '0') { - mvotg->otg_ctrl.a_bus_drop = 0; - dev_dbg(&mvotg->pdev->dev, - "User request: a_bus_drop = 0\n"); - } else if (buf[0] == '1') { - mvotg->otg_ctrl.a_bus_drop = 1; - mvotg->otg_ctrl.a_bus_req = 0; - dev_dbg(&mvotg->pdev->dev, - "User request: a_bus_drop = 1\n"); - dev_dbg(&mvotg->pdev->dev, - "User request: and a_bus_req = 0\n"); - } - - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 0); - spin_unlock(&mvotg->wq_lock); - } - - return count; -} - -static DEVICE_ATTR_RW(a_bus_drop); - -static struct attribute *inputs_attrs[] = { - &dev_attr_a_bus_req.attr, - &dev_attr_a_clr_err.attr, - &dev_attr_a_bus_drop.attr, - NULL, -}; - -static const struct attribute_group inputs_attr_group = { - .name = "inputs", - .attrs = inputs_attrs, -}; - -static const struct attribute_group *mv_otg_groups[] = { - &inputs_attr_group, - NULL, -}; - -static void mv_otg_remove(struct platform_device *pdev) -{ - struct mv_otg *mvotg = platform_get_drvdata(pdev); - - if (mvotg->qwork) - destroy_workqueue(mvotg->qwork); - - mv_otg_disable(mvotg); - - usb_remove_phy(&mvotg->phy); -} - -static int mv_otg_probe(struct platform_device *pdev) -{ - struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct mv_otg *mvotg; - struct usb_otg *otg; - struct resource *r; - int retval = 0, i; - - if (pdata == NULL) { - dev_err(&pdev->dev, "failed to get platform data\n"); - return -ENODEV; - } - - mvotg = devm_kzalloc(&pdev->dev, sizeof(*mvotg), GFP_KERNEL); - if (!mvotg) - return -ENOMEM; - - otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); - if (!otg) - return -ENOMEM; - - platform_set_drvdata(pdev, mvotg); - - mvotg->pdev = pdev; - mvotg->pdata = pdata; - - mvotg->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(mvotg->clk)) - return PTR_ERR(mvotg->clk); - - mvotg->qwork = create_singlethread_workqueue("mv_otg_queue"); - if (!mvotg->qwork) { - dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n"); - return -ENOMEM; - } - - INIT_DELAYED_WORK(&mvotg->work, mv_otg_work); - - /* OTG common part */ - mvotg->pdev = pdev; - mvotg->phy.dev = &pdev->dev; - mvotg->phy.otg = otg; - mvotg->phy.label = driver_name; - - otg->state = OTG_STATE_UNDEFINED; - otg->usb_phy = &mvotg->phy; - otg->set_host = mv_otg_set_host; - otg->set_peripheral = mv_otg_set_peripheral; - otg->set_vbus = mv_otg_set_vbus; - - for (i = 0; i < OTG_TIMER_NUM; i++) - timer_setup(&mvotg->otg_ctrl.timer[i], - mv_otg_timer_await_bcon, 0); - - r = platform_get_resource_byname(mvotg->pdev, - IORESOURCE_MEM, "phyregs"); - if (r == NULL) { - dev_err(&pdev->dev, "no phy I/O memory resource defined\n"); - retval = -ENODEV; - goto err_destroy_workqueue; - } - - mvotg->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); - if (mvotg->phy_regs == NULL) { - dev_err(&pdev->dev, "failed to map phy I/O memory\n"); - retval = -EFAULT; - goto err_destroy_workqueue; - } - - r = platform_get_resource_byname(mvotg->pdev, - IORESOURCE_MEM, "capregs"); - if (r == NULL) { - dev_err(&pdev->dev, "no I/O memory resource defined\n"); - retval = -ENODEV; - goto err_destroy_workqueue; - } - - mvotg->cap_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); - if (mvotg->cap_regs == NULL) { - dev_err(&pdev->dev, "failed to map I/O memory\n"); - retval = -EFAULT; - goto err_destroy_workqueue; - } - - /* we will acces controller register, so enable the udc controller */ - retval = mv_otg_enable_internal(mvotg); - if (retval) { - dev_err(&pdev->dev, "mv otg enable error %d\n", retval); - goto err_destroy_workqueue; - } - - mvotg->op_regs = - (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs - + (readl(mvotg->cap_regs) & CAPLENGTH_MASK)); - - if (pdata->id) { - retval = devm_request_threaded_irq(&pdev->dev, pdata->id->irq, - NULL, mv_otg_inputs_irq, - IRQF_ONESHOT, "id", mvotg); - if (retval) { - dev_info(&pdev->dev, - "Failed to request irq for ID\n"); - pdata->id = NULL; - } - } - - if (pdata->vbus) { - mvotg->clock_gating = 1; - retval = devm_request_threaded_irq(&pdev->dev, pdata->vbus->irq, - NULL, mv_otg_inputs_irq, - IRQF_ONESHOT, "vbus", mvotg); - if (retval) { - dev_info(&pdev->dev, - "Failed to request irq for VBUS, " - "disable clock gating\n"); - mvotg->clock_gating = 0; - pdata->vbus = NULL; - } - } - - if (pdata->disable_otg_clock_gating) - mvotg->clock_gating = 0; - - mv_otg_reset(mvotg); - mv_otg_init_irq(mvotg); - - r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no IRQ resource defined\n"); - retval = -ENODEV; - goto err_disable_clk; - } - - mvotg->irq = r->start; - if (devm_request_irq(&pdev->dev, mvotg->irq, mv_otg_irq, IRQF_SHARED, - driver_name, mvotg)) { - dev_err(&pdev->dev, "Request irq %d for OTG failed\n", - mvotg->irq); - mvotg->irq = 0; - retval = -ENODEV; - goto err_disable_clk; - } - - retval = usb_add_phy(&mvotg->phy, USB_PHY_TYPE_USB2); - if (retval < 0) { - dev_err(&pdev->dev, "can't register transceiver, %d\n", - retval); - goto err_disable_clk; - } - - spin_lock_init(&mvotg->wq_lock); - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 2 * HZ); - spin_unlock(&mvotg->wq_lock); - } - - dev_info(&pdev->dev, - "successful probe OTG device %s clock gating.\n", - mvotg->clock_gating ? "with" : "without"); - - return 0; - -err_disable_clk: - mv_otg_disable_internal(mvotg); -err_destroy_workqueue: - destroy_workqueue(mvotg->qwork); - - return retval; -} - -#ifdef CONFIG_PM -static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct mv_otg *mvotg = platform_get_drvdata(pdev); - - if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) { - dev_info(&pdev->dev, - "OTG state is not B_IDLE, it is %d!\n", - mvotg->phy.otg->state); - return -EAGAIN; - } - - if (!mvotg->clock_gating) - mv_otg_disable_internal(mvotg); - - return 0; -} - -static int mv_otg_resume(struct platform_device *pdev) -{ - struct mv_otg *mvotg = platform_get_drvdata(pdev); - u32 otgsc; - - if (!mvotg->clock_gating) { - mv_otg_enable_internal(mvotg); - - otgsc = readl(&mvotg->op_regs->otgsc); - otgsc |= mvotg->irq_en; - writel(otgsc, &mvotg->op_regs->otgsc); - - if (spin_trylock(&mvotg->wq_lock)) { - mv_otg_run_state_machine(mvotg, 0); - spin_unlock(&mvotg->wq_lock); - } - } - return 0; -} -#endif - -static struct platform_driver mv_otg_driver = { - .probe = mv_otg_probe, - .remove = mv_otg_remove, - .driver = { - .name = driver_name, - .dev_groups = mv_otg_groups, - }, -#ifdef CONFIG_PM - .suspend = mv_otg_suspend, - .resume = mv_otg_resume, -#endif -}; -module_platform_driver(mv_otg_driver); diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 4b35ef216125..f52418fe3fd4 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -685,10 +685,29 @@ static int usbhs_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug); spin_lock_init(usbhs_priv_to_lock(priv)); + /* + * Acquire clocks and enable power management (PM) early in the + * probe process, as the driver accesses registers during + * initialization. Ensure the device is active before proceeding. + */ + pm_runtime_enable(dev); + + ret = usbhsc_clk_get(dev, priv); + if (ret) + goto probe_pm_disable; + + ret = pm_runtime_resume_and_get(dev); + if (ret) + goto probe_clk_put; + + ret = usbhsc_clk_prepare_enable(priv); + if (ret) + goto probe_pm_put; + /* call pipe and module init */ ret = usbhs_pipe_probe(priv); if (ret < 0) - return ret; + goto probe_clk_dis_unprepare; ret = usbhs_fifo_probe(priv); if (ret < 0) @@ -698,19 +717,15 @@ static int usbhs_probe(struct platform_device *pdev) if (ret < 0) goto probe_end_fifo_exit; - /* dev_set_drvdata should be called after usbhs_mod_init */ + /* platform_set_drvdata() should be called after usbhs_mod_probe() */ platform_set_drvdata(pdev, priv); ret = reset_control_deassert(priv->rsts); if (ret) goto probe_fail_rst; - ret = usbhsc_clk_get(dev, priv); - if (ret) - goto probe_fail_clks; - /* - * deviece reset here because + * device reset here because * USB device might be used in boot loader. */ usbhs_sys_clock_ctrl(priv, 0); @@ -721,7 +736,7 @@ static int usbhs_probe(struct platform_device *pdev) if (ret) { dev_warn(dev, "USB function not selected (GPIO)\n"); ret = -ENOTSUPP; - goto probe_end_mod_exit; + goto probe_assert_rest; } } @@ -735,14 +750,19 @@ static int usbhs_probe(struct platform_device *pdev) ret = usbhs_platform_call(priv, hardware_init, pdev); if (ret < 0) { dev_err(dev, "platform init failed.\n"); - goto probe_end_mod_exit; + goto probe_assert_rest; } /* reset phy for connection */ usbhs_platform_call(priv, phy_reset, pdev); - /* power control */ - pm_runtime_enable(dev); + /* + * Disable the clocks that were enabled earlier in the probe path, + * and let the driver handle the clocks beyond this point. + */ + usbhsc_clk_disable_unprepare(priv); + pm_runtime_put(dev); + if (!usbhs_get_dparam(priv, runtime_pwctrl)) { usbhsc_power_ctrl(priv, 1); usbhs_mod_autonomy_mode(priv); @@ -759,9 +779,7 @@ static int usbhs_probe(struct platform_device *pdev) return ret; -probe_end_mod_exit: - usbhsc_clk_put(priv); -probe_fail_clks: +probe_assert_rest: reset_control_assert(priv->rsts); probe_fail_rst: usbhs_mod_remove(priv); @@ -769,6 +787,14 @@ probe_end_fifo_exit: usbhs_fifo_remove(priv); probe_end_pipe_exit: usbhs_pipe_remove(priv); +probe_clk_dis_unprepare: + usbhsc_clk_disable_unprepare(priv); +probe_pm_put: + pm_runtime_put(dev); +probe_clk_put: + usbhsc_clk_put(priv); +probe_pm_disable: + pm_runtime_disable(dev); dev_info(dev, "probe failed (%d)\n", ret); diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index d460d71b4257..1477e31d7763 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -52,6 +52,13 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), +/* Reported-by: Zhihong Zhou <zhouzhihong@greatwall.com.cn> */ +UNUSUAL_DEV(0x0781, 0x55e8, 0x0000, 0x9999, + "SanDisk", + "", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ UNUSUAL_DEV(0x090c, 0x2000, 0x0000, 0x9999, "Hiksemi", diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index ac84a6d64c2f..b09b58d7311d 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -393,6 +393,10 @@ static int dp_altmode_vdm(struct typec_altmode *alt, break; case CMDT_RSP_NAK: switch (cmd) { + case DP_CMD_STATUS_UPDATE: + if (typec_altmode_exit(alt)) + dev_err(&dp->alt->dev, "Exit Mode Failed!\n"); + break; case DP_CMD_CONFIGURE: dp->data.conf = 0; ret = dp_altmode_configured(dp); diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index 49926d6e72c7..182c902c42f6 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -214,7 +214,7 @@ int typec_switch_set(struct typec_switch *sw, sw_dev = sw->sw_devs[i]; ret = sw_dev->set(sw_dev, orientation); - if (ret) + if (ret && ret != -EOPNOTSUPP) return ret; } @@ -378,7 +378,7 @@ int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state) mux_dev = mux->mux_devs[i]; ret = mux_dev->set(mux_dev, state); - if (ret) + if (ret && ret != -EOPNOTSUPP) return ret; } diff --git a/drivers/usb/typec/mux/fsa4480.c b/drivers/usb/typec/mux/fsa4480.c index f71dba8bf07c..c54e42c7e6a1 100644 --- a/drivers/usb/typec/mux/fsa4480.c +++ b/drivers/usb/typec/mux/fsa4480.c @@ -12,6 +12,7 @@ #include <linux/regmap.h> #include <linux/usb/typec_dp.h> #include <linux/usb/typec_mux.h> +#include <linux/regulator/consumer.h> #define FSA4480_DEVICE_ID 0x00 #define FSA4480_DEVICE_ID_VENDOR_ID GENMASK(7, 6) @@ -273,6 +274,10 @@ static int fsa4480_probe(struct i2c_client *client) if (IS_ERR(fsa->regmap)) return dev_err_probe(dev, PTR_ERR(fsa->regmap), "failed to initialize regmap\n"); + ret = devm_regulator_get_enable_optional(dev, "vcc"); + if (ret && ret != -ENODEV) + return dev_err_probe(dev, ret, "Failed to get regulator\n"); + ret = regmap_read(fsa->regmap, FSA4480_DEVICE_ID, &val); if (ret) return dev_err_probe(dev, -ENODEV, "FSA4480 not found\n"); diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c index d42da5720a25..cdbb7c11d714 100644 --- a/drivers/usb/typec/port-mapper.c +++ b/drivers/usb/typec/port-mapper.c @@ -8,6 +8,7 @@ #include <linux/acpi.h> #include <linux/component.h> +#include <linux/thunderbolt.h> #include <linux/usb.h> #include "class.h" @@ -36,6 +37,11 @@ struct each_port_arg { struct component_match *match; }; +static int usb4_port_compare(struct device *dev, void *fwnode) +{ + return usb4_usb3_port_match(dev, fwnode); +} + static int typec_port_compare(struct device *dev, void *fwnode) { return device_match_fwnode(dev, fwnode); @@ -51,9 +57,22 @@ static int typec_port_match(struct device *dev, void *data) if (con_adev == adev) return 0; - if (con_adev->pld_crc == adev->pld_crc) + if (con_adev->pld_crc == adev->pld_crc) { + struct fwnode_handle *adev_fwnode = acpi_fwnode_handle(adev); + component_match_add(&arg->port->dev, &arg->match, typec_port_compare, - acpi_fwnode_handle(adev)); + adev_fwnode); + + /* + * If dev is USB 3.x port, it may have reference to the + * USB4 host interface in which case we can also link the + * Type-C port with the USB4 port. + */ + if (fwnode_property_present(adev_fwnode, "usb4-host-interface")) + component_match_add(&arg->port->dev, &arg->match, + usb4_port_compare, adev_fwnode); + } + return 0; } diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index 19ab6647af70..a56e31b20c21 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -17,6 +17,7 @@ #include <linux/usb/tcpci.h> #include <linux/usb/tcpm.h> #include <linux/usb/typec.h> +#include <linux/regulator/consumer.h> #define PD_RETRY_COUNT_DEFAULT 3 #define PD_RETRY_COUNT_3_0_OR_HIGHER 2 @@ -905,6 +906,10 @@ static int tcpci_probe(struct i2c_client *client) int err; u16 val = 0; + err = devm_regulator_get_enable_optional(&client->dev, "vdd"); + if (err && err != -ENODEV) + return dev_err_probe(&client->dev, err, "Failed to get regulator\n"); + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c index fd1b80593367..b5a5ed40faea 100644 --- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c +++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c @@ -166,7 +166,8 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status) return; } - if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) { + if (count > sizeof(struct pd_message) + 1 || + count + 1 > TCPC_RECEIVE_BUFFER_LEN) { dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count); return; } @@ -536,7 +537,10 @@ static int max_tcpci_probe(struct i2c_client *client) return dev_err_probe(&client->dev, ret, "IRQ initialization failed\n"); - device_init_wakeup(chip->dev, true); + ret = devm_device_init_wakeup(chip->dev); + if (ret) + return dev_err_probe(chip->dev, ret, "Failed to init wakeup\n"); + return 0; } diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a99db4e025cd..199a023b68e6 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -67,6 +67,7 @@ \ S(ACC_UNATTACHED), \ S(DEBUG_ACC_ATTACHED), \ + S(DEBUG_ACC_DEBOUNCE), \ S(AUDIO_ACC_ATTACHED), \ S(AUDIO_ACC_DEBOUNCE), \ \ @@ -313,6 +314,10 @@ struct pd_data { unsigned int operating_snk_mw; }; +#define PD_CAP_REV10 0x1 +#define PD_CAP_REV20 0x2 +#define PD_CAP_REV30 0x3 + struct pd_revision_info { u8 rev_major; u8 rev_minor; @@ -596,6 +601,15 @@ struct pd_rx_event { enum tcpm_transmit_type rx_sop_type; }; +struct altmode_vdm_event { + struct kthread_work work; + struct tcpm_port *port; + u32 header; + u32 *data; + int cnt; + enum tcpm_transmit_type tx_sop_type; +}; + static const char * const pd_rev[] = { [PD_REV10] = "rev1", [PD_REV20] = "rev2", @@ -621,7 +635,8 @@ static const char * const pd_rev[] = { !tcpm_cc_is_source((port)->cc1))) #define tcpm_port_is_debug(port) \ - (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) + ((tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) || \ + (tcpm_cc_is_sink((port)->cc1) && tcpm_cc_is_sink((port)->cc2))) #define tcpm_port_is_audio(port) \ (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2)) @@ -1151,7 +1166,7 @@ static int tcpm_set_attached_state(struct tcpm_port *port, bool attached) port->data_role); } -static int tcpm_set_roles(struct tcpm_port *port, bool attached, +static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state, enum typec_role role, enum typec_data_role data) { enum typec_orientation orientation; @@ -1188,7 +1203,7 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached, } } - ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation); + ret = tcpm_mux_set(port, state, usb_role, orientation); if (ret < 0) return ret; @@ -1608,18 +1623,68 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, mod_vdm_delayed_work(port, 0); } -static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, - const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) +static void tcpm_queue_vdm_work(struct kthread_work *work) { - if (port->state != SRC_READY && port->state != SNK_READY && - port->state != SRC_VDM_IDENTITY_REQUEST) - return; + struct altmode_vdm_event *event = container_of(work, + struct altmode_vdm_event, + work); + struct tcpm_port *port = event->port; mutex_lock(&port->lock); - tcpm_queue_vdm(port, header, data, cnt, tx_sop_type); + if (port->state != SRC_READY && port->state != SNK_READY && + port->state != SRC_VDM_IDENTITY_REQUEST) { + tcpm_log_force(port, "dropping altmode_vdm_event"); + goto port_unlock; + } + + tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type); + +port_unlock: + kfree(event->data); + kfree(event); mutex_unlock(&port->lock); } +static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, + const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type) +{ + struct altmode_vdm_event *event; + u32 *data_cpy; + int ret = -ENOMEM; + + event = kzalloc(sizeof(*event), GFP_KERNEL); + if (!event) + goto err_event; + + data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL); + if (!data_cpy) + goto err_data; + + kthread_init_work(&event->work, tcpm_queue_vdm_work); + event->port = port; + event->header = header; + memcpy(data_cpy, data, sizeof(u32) * cnt); + event->data = data_cpy; + event->cnt = cnt; + event->tx_sop_type = tx_sop_type; + + ret = kthread_queue_work(port->wq, &event->work); + if (!ret) { + ret = -EBUSY; + goto err_queue; + } + + return 0; + +err_queue: + kfree(data_cpy); +err_data: + kfree(event); +err_event: + tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret); + return ret; +} + static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt) { u32 vdo = p[VDO_INDEX_IDH]; @@ -2830,8 +2895,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo) header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP); - return 0; + return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP); } static int tcpm_altmode_exit(struct typec_altmode *altmode) @@ -2847,8 +2911,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode) header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP); - return 0; + return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP); } static int tcpm_altmode_vdm(struct typec_altmode *altmode, @@ -2856,9 +2919,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode, { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); - tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP); - - return 0; + return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP); } static const struct typec_altmode_ops tcpm_altmode_ops = { @@ -2882,8 +2943,7 @@ static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_pl header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME); - return 0; + return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME); } static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop) @@ -2899,8 +2959,7 @@ static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plu header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE); header |= VDO_OPOS(altmode->mode); - tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME); - return 0; + return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME); } static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop, @@ -2908,9 +2967,7 @@ static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug { struct tcpm_port *port = typec_altmode_get_drvdata(altmode); - tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME); - - return 0; + return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME); } static const struct typec_cable_ops tcpm_cable_ops = { @@ -4353,7 +4410,8 @@ static int tcpm_src_attach(struct tcpm_port *port) tcpm_enable_auto_vbus_discharge(port, true); - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port)); + ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, + TYPEC_SOURCE, tcpm_data_role_for_source(port)); if (ret < 0) return ret; @@ -4528,7 +4586,8 @@ static int tcpm_snk_attach(struct tcpm_port *port) tcpm_enable_auto_vbus_discharge(port, true); - ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port)); + ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, + TYPEC_SINK, tcpm_data_role_for_sink(port)); if (ret < 0) return ret; @@ -4551,12 +4610,24 @@ static void tcpm_snk_detach(struct tcpm_port *port) static int tcpm_acc_attach(struct tcpm_port *port) { int ret; + enum typec_role role; + enum typec_data_role data; + int state = TYPEC_STATE_USB; if (port->attached) return 0; - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, - tcpm_data_role_for_source(port)); + role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE; + data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port) + : tcpm_data_role_for_source(port); + + if (tcpm_port_is_audio(port)) + state = TYPEC_MODE_AUDIO; + + if (tcpm_port_is_debug(port)) + state = TYPEC_MODE_DEBUG; + + ret = tcpm_set_roles(port, true, state, role, data); if (ret < 0) return ret; @@ -4665,6 +4736,25 @@ static void tcpm_set_initial_svdm_version(struct tcpm_port *port) } } +static void tcpm_set_initial_negotiated_rev(struct tcpm_port *port) +{ + switch (port->pd_rev.rev_major) { + case PD_CAP_REV10: + port->negotiated_rev = PD_REV10; + break; + case PD_CAP_REV20: + port->negotiated_rev = PD_REV20; + break; + case PD_CAP_REV30: + port->negotiated_rev = PD_REV30; + break; + default: + port->negotiated_rev = PD_MAX_REV; + break; + } + port->negotiated_rev_prime = port->negotiated_rev; +} + static void run_state_machine(struct tcpm_port *port) { int ret; @@ -4782,8 +4872,7 @@ static void run_state_machine(struct tcpm_port *port) typec_set_pwr_opmode(port->typec_port, opmode); port->pwr_opmode = TYPEC_PWR_MODE_USB; port->caps_count = 0; - port->negotiated_rev = PD_MAX_REV; - port->negotiated_rev_prime = PD_MAX_REV; + tcpm_set_initial_negotiated_rev(port); port->message_id = 0; port->message_id_prime = 0; port->rx_msgid = -1; @@ -4964,7 +5053,13 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC); break; case SNK_ATTACH_WAIT: - if ((port->cc1 == TYPEC_CC_OPEN && + if (tcpm_port_is_debug(port)) + tcpm_set_state(port, DEBUG_ACC_ATTACHED, + PD_T_CC_DEBOUNCE); + else if (tcpm_port_is_audio(port)) + tcpm_set_state(port, AUDIO_ACC_ATTACHED, + PD_T_CC_DEBOUNCE); + else if ((port->cc1 == TYPEC_CC_OPEN && port->cc2 != TYPEC_CC_OPEN) || (port->cc1 != TYPEC_CC_OPEN && port->cc2 == TYPEC_CC_OPEN)) @@ -4978,6 +5073,12 @@ static void run_state_machine(struct tcpm_port *port) if (tcpm_port_is_disconnected(port)) tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE); + else if (tcpm_port_is_debug(port)) + tcpm_set_state(port, DEBUG_ACC_ATTACHED, + PD_T_CC_DEBOUNCE); + else if (tcpm_port_is_audio(port)) + tcpm_set_state(port, AUDIO_ACC_ATTACHED, + PD_T_CC_DEBOUNCE); else if (port->vbus_present) tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY @@ -5048,8 +5149,7 @@ static void run_state_machine(struct tcpm_port *port) port->cc2 : port->cc1); typec_set_pwr_opmode(port->typec_port, opmode); port->pwr_opmode = TYPEC_PWR_MODE_USB; - port->negotiated_rev = PD_MAX_REV; - port->negotiated_rev_prime = PD_MAX_REV; + tcpm_set_initial_negotiated_rev(port); port->message_id = 0; port->message_id_prime = 0; port->rx_msgid = -1; @@ -5276,7 +5376,10 @@ static void run_state_machine(struct tcpm_port *port) /* Accessory states */ case ACC_UNATTACHED: tcpm_acc_detach(port); - tcpm_set_state(port, SRC_UNATTACHED, 0); + if (port->port_type == TYPEC_PORT_SRC) + tcpm_set_state(port, SRC_UNATTACHED, 0); + else + tcpm_set_state(port, SNK_UNATTACHED, 0); break; case DEBUG_ACC_ATTACHED: case AUDIO_ACC_ATTACHED: @@ -5284,6 +5387,7 @@ static void run_state_machine(struct tcpm_port *port) if (ret < 0) tcpm_set_state(port, ACC_UNATTACHED, 0); break; + case DEBUG_ACC_DEBOUNCE: case AUDIO_ACC_DEBOUNCE: tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time); break; @@ -5326,7 +5430,7 @@ static void run_state_machine(struct tcpm_port *port) */ tcpm_set_vconn(port, false); tcpm_set_vbus(port, false); - tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, + tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE, tcpm_data_role_for_source(port)); /* * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V + @@ -5358,7 +5462,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_vconn(port, false); if (port->pd_capable) tcpm_set_charge(port, false); - tcpm_set_roles(port, port->self_powered, TYPEC_SINK, + tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK, tcpm_data_role_for_sink(port)); /* * VBUS may or may not toggle, depending on the adapter. @@ -5482,10 +5586,10 @@ static void run_state_machine(struct tcpm_port *port) case DR_SWAP_CHANGE_DR: tcpm_unregister_altmodes(port); if (port->data_role == TYPEC_HOST) - tcpm_set_roles(port, true, port->pwr_role, + tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role, TYPEC_DEVICE); else - tcpm_set_roles(port, true, port->pwr_role, + tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role, TYPEC_HOST); tcpm_ams_finish(port); tcpm_set_state(port, ready_state(port), 0); @@ -5875,7 +5979,8 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, } break; case SNK_UNATTACHED: - if (tcpm_port_is_sink(port)) + if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) || + tcpm_port_is_sink(port)) tcpm_set_state(port, SNK_ATTACH_WAIT, 0); break; case SNK_ATTACH_WAIT: @@ -5938,7 +6043,12 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, case DEBUG_ACC_ATTACHED: if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN) - tcpm_set_state(port, ACC_UNATTACHED, 0); + tcpm_set_state(port, DEBUG_ACC_DEBOUNCE, 0); + break; + + case DEBUG_ACC_DEBOUNCE: + if (tcpm_port_is_debug(port)) + tcpm_set_state(port, DEBUG_ACC_ATTACHED, 0); break; case SNK_TRY: @@ -5965,7 +6075,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, case SNK_TRY_WAIT_DEBOUNCE: if (!tcpm_port_is_sink(port)) { port->max_wait = 0; - tcpm_set_state(port, SRC_TRYWAIT, 0); + tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE); } break; case SRC_TRY_WAIT: diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 7ee721a877c1..dcf141ada078 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -1431,7 +1431,7 @@ static int tps6598x_probe(struct i2c_client *client) tps->wakeup = device_property_read_bool(tps->dev, "wakeup-source"); if (tps->wakeup && client->irq) { - device_init_wakeup(&client->dev, true); + devm_device_init_wakeup(&client->dev); enable_irq_wake(client->irq); } diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig index 75559601fe8f..8bf8fefb4f07 100644 --- a/drivers/usb/typec/ucsi/Kconfig +++ b/drivers/usb/typec/ucsi/Kconfig @@ -91,4 +91,15 @@ config UCSI_LENOVO_YOGA_C630 To compile the driver as a module, choose M here: the module will be called ucsi_yoga_c630. +config UCSI_HUAWEI_GAOKUN + tristate "UCSI Interface Driver for Huawei Matebook E Go" + depends on EC_HUAWEI_GAOKUN + select DRM_AUX_HPD_BRIDGE if DRM_BRIDGE && OF + help + This driver enables UCSI support on the Huawei Matebook E Go tablet, + which is a sc8280xp-based 2-in-1 tablet. + + To compile the driver as a module, choose M here: the module will be + called ucsi_huawei_gaokun. + endif diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile index be98a879104d..dbc571763eff 100644 --- a/drivers/usb/typec/ucsi/Makefile +++ b/drivers/usb/typec/ucsi/Makefile @@ -23,3 +23,4 @@ obj-$(CONFIG_UCSI_STM32G0) += ucsi_stm32g0.o obj-$(CONFIG_UCSI_PMIC_GLINK) += ucsi_glink.o obj-$(CONFIG_CROS_EC_UCSI) += cros_ec_ucsi.o obj-$(CONFIG_UCSI_LENOVO_YOGA_C630) += ucsi_yoga_c630.o +obj-$(CONFIG_UCSI_HUAWEI_GAOKUN) += ucsi_huawei_gaokun.o diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c index eae2b18a2d8a..92ebf1a2defd 100644 --- a/drivers/usb/typec/ucsi/debugfs.c +++ b/drivers/usb/typec/ucsi/debugfs.c @@ -34,16 +34,20 @@ static int ucsi_cmd(void *data, u64 val) case UCSI_CONNECTOR_RESET: case UCSI_SET_SINK_PATH: case UCSI_SET_NEW_CAM: + case UCSI_SET_USB: ret = ucsi_send_command(ucsi, val, NULL, 0); break; case UCSI_GET_CAPABILITY: case UCSI_GET_CONNECTOR_CAPABILITY: case UCSI_GET_ALTERNATE_MODES: + case UCSI_GET_CAM_SUPPORTED: case UCSI_GET_CURRENT_CAM: case UCSI_GET_PDOS: case UCSI_GET_CABLE_PROPERTY: case UCSI_GET_CONNECTOR_STATUS: case UCSI_GET_ERROR_STATUS: + case UCSI_GET_PD_MESSAGE: + case UCSI_GET_ATTENTION_VDO: case UCSI_GET_CAM_CS: case UCSI_GET_LPM_PPM_INFO: ret = ucsi_send_command(ucsi, val, diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 420af5139c70..8aae80b457d7 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) u8 cur = 0; int ret; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) schedule_work(&dp->work); ret = 0; err_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) u64 command; int ret = 0; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) schedule_work(&dp->work); out_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, int cmd = PD_VDO_CMD(header); int svdm_version; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); dev_warn(&p->dev, "firmware doesn't support alternate mode overriding\n"); - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return -EOPNOTSUPP; } svdm_version = typec_altmode_get_svdm_version(alt); if (svdm_version < 0) { - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return svdm_version; } @@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, break; } - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return 0; } @@ -296,6 +299,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) if (!dp) return; + cancel_work_sync(&dp->work); + dp->data.conf = 0; dp->data.status = 0; dp->initialized = false; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index e8c7e9dc4930..01ce858a1a2b 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -1923,6 +1923,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data) EXPORT_SYMBOL_GPL(ucsi_set_drvdata); /** + * ucsi_con_mutex_lock - Acquire the connector mutex + * @con: The connector interface to lock + * + * Returns true on success, false if the connector is disconnected + */ +bool ucsi_con_mutex_lock(struct ucsi_connector *con) +{ + bool mutex_locked = false; + bool connected = true; + + while (connected && !mutex_locked) { + mutex_locked = mutex_trylock(&con->lock) != 0; + connected = UCSI_CONSTAT(con, CONNECTED); + if (connected && !mutex_locked) + msleep(20); + } + + connected = connected && con->partner; + if (!connected && mutex_locked) + mutex_unlock(&con->lock); + + return connected; +} + +/** + * ucsi_con_mutex_unlock - Release the connector mutex + * @con: The connector interface to unlock + */ +void ucsi_con_mutex_unlock(struct ucsi_connector *con) +{ + mutex_unlock(&con->lock); +} + +/** * ucsi_create - Allocate UCSI instance * @dev: Device interface to the PPM (Platform Policy Manager) * @ops: I/O routines diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 3a2c1762bec1..5a8f947fcece 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -94,6 +94,8 @@ int ucsi_register(struct ucsi *ucsi); void ucsi_unregister(struct ucsi *ucsi); void *ucsi_get_drvdata(struct ucsi *ucsi); void ucsi_set_drvdata(struct ucsi *ucsi, void *data); +bool ucsi_con_mutex_lock(struct ucsi_connector *con); +void ucsi_con_mutex_unlock(struct ucsi_connector *con); void ucsi_connector_change(struct ucsi *ucsi, u8 num); @@ -123,9 +125,11 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num); #define UCSI_GET_CONNECTOR_STATUS 0x12 #define UCSI_GET_CONNECTOR_STATUS_SIZE 152 #define UCSI_GET_ERROR_STATUS 0x13 +#define UCSI_GET_ATTENTION_VDO 0x16 #define UCSI_GET_PD_MESSAGE 0x15 #define UCSI_GET_CAM_CS 0x18 #define UCSI_SET_SINK_PATH 0x1c +#define UCSI_SET_USB 0x21 #define UCSI_GET_LPM_PPM_INFO 0x22 #define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16) @@ -432,7 +436,7 @@ struct ucsi_debugfs_entry { u64 low; u64 high; } response; - u32 status; + int status; struct dentry *dentry; }; diff --git a/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c b/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c new file mode 100644 index 000000000000..7b5222081bbb --- /dev/null +++ b/drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c @@ -0,0 +1,526 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ucsi-huawei-gaokun - A UCSI driver for HUAWEI Matebook E Go + * + * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com> + */ + +#include <drm/bridge/aux-bridge.h> +#include <linux/auxiliary_bus.h> +#include <linux/bitops.h> +#include <linux/completion.h> +#include <linux/container_of.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/platform_data/huawei-gaokun-ec.h> +#include <linux/string.h> +#include <linux/usb/pd_vdo.h> +#include <linux/usb/typec_altmode.h> +#include <linux/usb/typec_dp.h> +#include <linux/workqueue_types.h> + +#include "ucsi.h" + +#define EC_EVENT_UCSI 0x21 +#define EC_EVENT_USB 0x22 + +#define GAOKUN_CCX_MASK GENMASK(1, 0) +#define GAOKUN_MUX_MASK GENMASK(3, 2) + +#define GAOKUN_DPAM_MASK GENMASK(3, 0) +#define GAOKUN_HPD_STATE_MASK BIT(4) +#define GAOKUN_HPD_IRQ_MASK BIT(5) + +#define GET_IDX(updt) (ffs(updt) - 1) + +#define CCX_TO_ORI(ccx) (++(ccx) % 3) /* convert ccx to enum typec_orientation */ + +/* Configuration Channel Extension */ +enum gaokun_ucsi_ccx { + USBC_CCX_NORMAL, + USBC_CCX_REVERSE, + USBC_CCX_NONE, +}; + +enum gaokun_ucsi_mux { + USBC_MUX_NONE, + USBC_MUX_USB_2L, + USBC_MUX_DP_4L, + USBC_MUX_USB_DP, +}; + +/* based on pmic_glink_altmode_pin_assignment */ +enum gaokun_ucsi_dpam_pan { /* DP Alt Mode Pin Assignments */ + USBC_DPAM_PAN_NONE, + USBC_DPAM_PAN_A, /* Not supported after USB Type-C Standard v1.0b */ + USBC_DPAM_PAN_B, /* Not supported after USB Type-C Standard v1.0b */ + USBC_DPAM_PAN_C, /* USBC_DPAM_PAN_C_REVERSE - 6 */ + USBC_DPAM_PAN_D, + USBC_DPAM_PAN_E, + USBC_DPAM_PAN_F, /* Not supported after USB Type-C Standard v1.0b */ + USBC_DPAM_PAN_A_REVERSE,/* Not supported after USB Type-C Standard v1.0b */ + USBC_DPAM_PAN_B_REVERSE,/* Not supported after USB Type-C Standard v1.0b */ + USBC_DPAM_PAN_C_REVERSE, + USBC_DPAM_PAN_D_REVERSE, + USBC_DPAM_PAN_E_REVERSE, + USBC_DPAM_PAN_F_REVERSE,/* Not supported after USB Type-C Standard v1.0b */ +}; + +struct gaokun_ucsi_reg { + u8 num_ports; + u8 port_updt; + u8 port_data[4]; + u8 checksum; + u8 reserved; +} __packed; + +struct gaokun_ucsi_port { + struct completion usb_ack; + spinlock_t lock; /* serializing port resource access */ + + struct gaokun_ucsi *ucsi; + struct auxiliary_device *bridge; + + int idx; + enum gaokun_ucsi_ccx ccx; + enum gaokun_ucsi_mux mux; + u8 mode; + u16 svid; + u8 hpd_state; + u8 hpd_irq; +}; + +struct gaokun_ucsi { + struct gaokun_ec *ec; + struct ucsi *ucsi; + struct gaokun_ucsi_port *ports; + struct device *dev; + struct delayed_work work; + struct notifier_block nb; + u16 version; + u8 num_ports; +}; + +/* -------------------------------------------------------------------------- */ +/* For UCSI */ + +static int gaokun_ucsi_read_version(struct ucsi *ucsi, u16 *version) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi); + + *version = uec->version; + + return 0; +} + +static int gaokun_ucsi_read_cci(struct ucsi *ucsi, u32 *cci) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi); + u8 buf[GAOKUN_UCSI_READ_SIZE]; + int ret; + + ret = gaokun_ec_ucsi_read(uec->ec, buf); + if (ret) + return ret; + + memcpy(cci, buf, sizeof(*cci)); + + return 0; +} + +static int gaokun_ucsi_read_message_in(struct ucsi *ucsi, + void *val, size_t val_len) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi); + u8 buf[GAOKUN_UCSI_READ_SIZE]; + int ret; + + ret = gaokun_ec_ucsi_read(uec->ec, buf); + if (ret) + return ret; + + memcpy(val, buf + GAOKUN_UCSI_CCI_SIZE, + min(val_len, GAOKUN_UCSI_MSGI_SIZE)); + + return 0; +} + +static int gaokun_ucsi_async_control(struct ucsi *ucsi, u64 command) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(ucsi); + u8 buf[GAOKUN_UCSI_WRITE_SIZE] = {}; + + memcpy(buf, &command, sizeof(command)); + + return gaokun_ec_ucsi_write(uec->ec, buf); +} + +static void gaokun_ucsi_update_connector(struct ucsi_connector *con) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(con->ucsi); + + if (con->num > uec->num_ports) + return; + + con->typec_cap.orientation_aware = true; +} + +static void gaokun_set_orientation(struct ucsi_connector *con, + struct gaokun_ucsi_port *port) +{ + enum gaokun_ucsi_ccx ccx; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + ccx = port->ccx; + spin_unlock_irqrestore(&port->lock, flags); + + typec_set_orientation(con->port, CCX_TO_ORI(ccx)); +} + +static void gaokun_ucsi_connector_status(struct ucsi_connector *con) +{ + struct gaokun_ucsi *uec = ucsi_get_drvdata(con->ucsi); + int idx; + + idx = con->num - 1; + if (con->num > uec->num_ports) { + dev_warn(uec->dev, "set orientation out of range: con%d\n", idx); + return; + } + + gaokun_set_orientation(con, &uec->ports[idx]); +} + +const struct ucsi_operations gaokun_ucsi_ops = { + .read_version = gaokun_ucsi_read_version, + .read_cci = gaokun_ucsi_read_cci, + .read_message_in = gaokun_ucsi_read_message_in, + .sync_control = ucsi_sync_control_common, + .async_control = gaokun_ucsi_async_control, + .update_connector = gaokun_ucsi_update_connector, + .connector_status = gaokun_ucsi_connector_status, +}; + +/* -------------------------------------------------------------------------- */ +/* For Altmode */ + +static void gaokun_ucsi_port_update(struct gaokun_ucsi_port *port, + const u8 *port_data) +{ + struct gaokun_ucsi *uec = port->ucsi; + int offset = port->idx * 2; /* every port has 2 Bytes data */ + unsigned long flags; + u8 dcc, ddi; + + dcc = port_data[offset]; + ddi = port_data[offset + 1]; + + spin_lock_irqsave(&port->lock, flags); + + port->ccx = FIELD_GET(GAOKUN_CCX_MASK, dcc); + port->mux = FIELD_GET(GAOKUN_MUX_MASK, dcc); + port->mode = FIELD_GET(GAOKUN_DPAM_MASK, ddi); + port->hpd_state = FIELD_GET(GAOKUN_HPD_STATE_MASK, ddi); + port->hpd_irq = FIELD_GET(GAOKUN_HPD_IRQ_MASK, ddi); + + /* Mode and SVID are unused; keeping them to make things clearer */ + switch (port->mode) { + case USBC_DPAM_PAN_C: + case USBC_DPAM_PAN_C_REVERSE: + port->mode = DP_PIN_ASSIGN_C; /* correct it for usb later */ + break; + case USBC_DPAM_PAN_D: + case USBC_DPAM_PAN_D_REVERSE: + port->mode = DP_PIN_ASSIGN_D; + break; + case USBC_DPAM_PAN_E: + case USBC_DPAM_PAN_E_REVERSE: + port->mode = DP_PIN_ASSIGN_E; + break; + case USBC_DPAM_PAN_NONE: + port->mode = TYPEC_STATE_SAFE; + break; + default: + dev_warn(uec->dev, "unknown mode %d\n", port->mode); + break; + } + + switch (port->mux) { + case USBC_MUX_NONE: + port->svid = 0; + break; + case USBC_MUX_USB_2L: + port->svid = USB_SID_PD; + port->mode = TYPEC_STATE_USB; /* same as PAN_C, correct it */ + break; + case USBC_MUX_DP_4L: + case USBC_MUX_USB_DP: + port->svid = USB_SID_DISPLAYPORT; + break; + default: + dev_warn(uec->dev, "unknown mux state %d\n", port->mux); + break; + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int gaokun_ucsi_refresh(struct gaokun_ucsi *uec) +{ + struct gaokun_ucsi_reg ureg; + int ret, idx; + + ret = gaokun_ec_ucsi_get_reg(uec->ec, &ureg); + if (ret) + return GAOKUN_UCSI_NO_PORT_UPDATE; + + uec->num_ports = ureg.num_ports; + idx = GET_IDX(ureg.port_updt); + + if (idx < 0 || idx >= ureg.num_ports) + return GAOKUN_UCSI_NO_PORT_UPDATE; + + gaokun_ucsi_port_update(&uec->ports[idx], ureg.port_data); + return idx; +} + +static void gaokun_ucsi_handle_altmode(struct gaokun_ucsi_port *port) +{ + struct gaokun_ucsi *uec = port->ucsi; + int idx = port->idx; + + if (idx >= uec->ucsi->cap.num_connectors) { + dev_warn(uec->dev, "altmode port out of range: %d\n", idx); + return; + } + + /* UCSI callback .connector_status() have set orientation */ + if (port->bridge) + drm_aux_hpd_bridge_notify(&port->bridge->dev, + port->hpd_state ? + connector_status_connected : + connector_status_disconnected); + + gaokun_ec_ucsi_pan_ack(uec->ec, port->idx); +} + +static void gaokun_ucsi_altmode_notify_ind(struct gaokun_ucsi *uec) +{ + int idx; + + if (!uec->ucsi->connector) { /* slow to register */ + dev_err_ratelimited(uec->dev, "ucsi connector is not initialized yet\n"); + return; + } + + idx = gaokun_ucsi_refresh(uec); + if (idx == GAOKUN_UCSI_NO_PORT_UPDATE) + gaokun_ec_ucsi_pan_ack(uec->ec, idx); /* ack directly if no update */ + else + gaokun_ucsi_handle_altmode(&uec->ports[idx]); +} + +/* + * When inserting, 2 UCSI events(connector change) are followed by USB events. + * If we received one USB event, that means USB events are not blocked, so we + * can complelte for all ports, and we should signal all events. + */ +static void gaokun_ucsi_complete_usb_ack(struct gaokun_ucsi *uec) +{ + struct gaokun_ucsi_port *port; + int idx = 0; + + while (idx < uec->num_ports) { + port = &uec->ports[idx++]; + if (!completion_done(&port->usb_ack)) + complete_all(&port->usb_ack); + } +} + +/* + * USB event is necessary for enabling altmode, the event should follow + * UCSI event, if not after timeout(this notify may be disabled somehow), + * then force to enable altmode. + */ +static void gaokun_ucsi_handle_no_usb_event(struct gaokun_ucsi *uec, int idx) +{ + struct gaokun_ucsi_port *port; + + port = &uec->ports[idx]; + if (!wait_for_completion_timeout(&port->usb_ack, 2 * HZ)) { + dev_warn(uec->dev, "No USB EVENT, triggered by UCSI EVENT"); + gaokun_ucsi_altmode_notify_ind(uec); + } +} + +static int gaokun_ucsi_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + u32 cci; + int ret; + struct gaokun_ucsi *uec = container_of(nb, struct gaokun_ucsi, nb); + + switch (action) { + case EC_EVENT_USB: + gaokun_ucsi_complete_usb_ack(uec); + gaokun_ucsi_altmode_notify_ind(uec); + return NOTIFY_OK; + + case EC_EVENT_UCSI: + ret = gaokun_ucsi_read_cci(uec->ucsi, &cci); + if (ret) + return NOTIFY_DONE; + + ucsi_notify_common(uec->ucsi, cci); + if (UCSI_CCI_CONNECTOR(cci)) + gaokun_ucsi_handle_no_usb_event(uec, UCSI_CCI_CONNECTOR(cci) - 1); + + return NOTIFY_OK; + + default: + return NOTIFY_DONE; + } +} + +static int gaokun_ucsi_ports_init(struct gaokun_ucsi *uec) +{ + struct gaokun_ucsi_port *ucsi_port; + struct gaokun_ucsi_reg ureg = {}; + struct device *dev = uec->dev; + struct fwnode_handle *fwnode; + int i, ret, num_ports; + u32 port; + + gaokun_ec_ucsi_get_reg(uec->ec, &ureg); + num_ports = ureg.num_ports; + uec->ports = devm_kcalloc(dev, num_ports, sizeof(*uec->ports), + GFP_KERNEL); + if (!uec->ports) + return -ENOMEM; + + for (i = 0; i < num_ports; ++i) { + ucsi_port = &uec->ports[i]; + ucsi_port->ccx = USBC_CCX_NONE; + ucsi_port->idx = i; + ucsi_port->ucsi = uec; + init_completion(&ucsi_port->usb_ack); + spin_lock_init(&ucsi_port->lock); + } + + device_for_each_child_node(dev, fwnode) { + ret = fwnode_property_read_u32(fwnode, "reg", &port); + if (ret < 0) { + dev_err(dev, "missing reg property of %pOFn\n", fwnode); + fwnode_handle_put(fwnode); + return ret; + } + + if (port >= num_ports) { + dev_warn(dev, "invalid connector number %d, ignoring\n", port); + continue; + } + + ucsi_port = &uec->ports[port]; + ucsi_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode)); + if (IS_ERR(ucsi_port->bridge)) { + fwnode_handle_put(fwnode); + return PTR_ERR(ucsi_port->bridge); + } + } + + for (i = 0; i < num_ports; i++) { + if (!uec->ports[i].bridge) + continue; + + ret = devm_drm_dp_hpd_bridge_add(dev, uec->ports[i].bridge); + if (ret) + return ret; + } + + return 0; +} + +static void gaokun_ucsi_register_worker(struct work_struct *work) +{ + struct gaokun_ucsi *uec; + struct ucsi *ucsi; + int ret; + + uec = container_of(work, struct gaokun_ucsi, work.work); + ucsi = uec->ucsi; + + ret = gaokun_ec_register_notify(uec->ec, &uec->nb); + if (ret) { + dev_err_probe(ucsi->dev, ret, "notifier register failed\n"); + return; + } + + ret = ucsi_register(ucsi); + if (ret) + dev_err_probe(ucsi->dev, ret, "ucsi register failed\n"); +} + +static int gaokun_ucsi_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct gaokun_ec *ec = adev->dev.platform_data; + struct device *dev = &adev->dev; + struct gaokun_ucsi *uec; + int ret; + + uec = devm_kzalloc(dev, sizeof(*uec), GFP_KERNEL); + if (!uec) + return -ENOMEM; + + uec->ec = ec; + uec->dev = dev; + uec->version = UCSI_VERSION_1_0; + uec->nb.notifier_call = gaokun_ucsi_notify; + + INIT_DELAYED_WORK(&uec->work, gaokun_ucsi_register_worker); + + ret = gaokun_ucsi_ports_init(uec); + if (ret) + return ret; + + uec->ucsi = ucsi_create(dev, &gaokun_ucsi_ops); + if (IS_ERR(uec->ucsi)) + return PTR_ERR(uec->ucsi); + + ucsi_set_drvdata(uec->ucsi, uec); + auxiliary_set_drvdata(adev, uec); + + /* EC can't handle UCSI properly in the early stage */ + schedule_delayed_work(&uec->work, 3 * HZ); + + return 0; +} + +static void gaokun_ucsi_remove(struct auxiliary_device *adev) +{ + struct gaokun_ucsi *uec = auxiliary_get_drvdata(adev); + + gaokun_ec_unregister_notify(uec->ec, &uec->nb); + ucsi_unregister(uec->ucsi); + ucsi_destroy(uec->ucsi); +} + +static const struct auxiliary_device_id gaokun_ucsi_id_table[] = { + { .name = GAOKUN_MOD_NAME "." GAOKUN_DEV_UCSI, }, + {} +}; +MODULE_DEVICE_TABLE(auxiliary, gaokun_ucsi_id_table); + +static struct auxiliary_driver gaokun_ucsi_driver = { + .name = GAOKUN_DEV_UCSI, + .id_table = gaokun_ucsi_id_table, + .probe = gaokun_ucsi_probe, + .remove = gaokun_ucsi_remove, +}; + +module_auxiliary_driver(gaokun_ucsi_driver); + +MODULE_DESCRIPTION("HUAWEI Matebook E Go UCSI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 35f9046af315..6328c3a05bcd 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1646,14 +1646,14 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf, { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_core_device *vdev = vma->vm_private_data; - unsigned long pfn, pgoff = vmf->pgoff - vma->vm_pgoff; + unsigned long addr = vmf->address & ~((PAGE_SIZE << order) - 1); + unsigned long pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + unsigned long pfn = vma_to_pfn(vma) + pgoff; vm_fault_t ret = VM_FAULT_SIGBUS; - pfn = vma_to_pfn(vma) + pgoff; - - if (order && (pfn & ((1 << order) - 1) || - vmf->address & ((PAGE_SIZE << order) - 1) || - vmf->address + (PAGE_SIZE << order) > vma->vm_end)) { + if (order && (addr < vma->vm_start || + addr + (PAGE_SIZE << order) > vma->vm_end || + pfn & ((1 << order) - 1))) { ret = VM_FAULT_FALLBACK; goto out; } diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1f65795cf5d7..ef56a2500ed6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -217,6 +217,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, * buffering it. */ if (dma_capable(dev, dev_addr, size, true) && + !dma_kmalloc_needs_bounce(dev, size, dir) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && !is_swiotlb_force_bounce(dev)) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 13821e7e825e..9ac0427724a3 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -77,6 +77,7 @@ enum xb_req_state { struct xb_req_data { struct list_head list; wait_queue_head_t wq; + struct kref kref; struct xsd_sockmsg msg; uint32_t caller_req_id; enum xsd_sockmsg_type type; @@ -103,6 +104,7 @@ int xb_init_comms(void); void xb_deinit_comms(void); int xs_watch_msg(struct xs_watch_event *event); void xs_request_exit(struct xb_req_data *req); +void xs_free_req(struct kref *kref); int xenbus_match(struct device *_dev, const struct device_driver *_drv); int xenbus_dev_probe(struct device *_dev); diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index e5fda0256feb..82df2da1b880 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -309,8 +309,8 @@ static int process_msg(void) virt_wmb(); req->state = xb_req_state_got_reply; req->cb(req); - } else - kfree(req); + } + kref_put(&req->kref, xs_free_req); } mutex_unlock(&xs_response_mutex); @@ -386,14 +386,13 @@ static int process_writes(void) state.req->msg.type = XS_ERROR; state.req->err = err; list_del(&state.req->list); - if (state.req->state == xb_req_state_aborted) - kfree(state.req); - else { + if (state.req->state != xb_req_state_aborted) { /* write err, then update state */ virt_wmb(); state.req->state = xb_req_state_got_reply; wake_up(&state.req->wq); } + kref_put(&state.req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 46f8916597e5..f5c21ba64df5 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req) mutex_unlock(&u->reply_mutex); kfree(req->body); - kfree(req); + kref_put(&req->kref, xs_free_req); kref_put(&u->kref, xenbus_file_free); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 6d32ffb01136..86fe6e779056 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -966,9 +966,15 @@ static int __init xenbus_init(void) if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) + { xen_store_domain_type = XS_HVM; - if (xen_hvm_domain() && xen_initial_domain()) - xen_store_domain_type = XS_LOCAL; + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (err) + goto out_error; + xen_store_evtchn = (int)v; + if (!v && xen_initial_domain()) + xen_store_domain_type = XS_LOCAL; + } if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) @@ -987,10 +993,6 @@ static int __init xenbus_init(void) xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: - err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); - if (err) - goto out_error; - xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index d32c726f7a12..dcf9182c8451 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -112,6 +112,12 @@ static void xs_suspend_exit(void) wake_up_all(&xs_state_enter_wq); } +void xs_free_req(struct kref *kref) +{ + struct xb_req_data *req = container_of(kref, struct xb_req_data, kref); + kfree(req); +} + static uint32_t xs_request_enter(struct xb_req_data *req) { uint32_t rq_id; @@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) req->caller_req_id = req->msg.req_id; req->msg.req_id = xs_request_enter(req); + /* + * Take 2nd ref. One for this thread, and the second for the + * xenbus_thread. + */ + kref_get(&req->kref); + mutex_lock(&xb_write_mutex); list_add_tail(&req->list, &xb_write_list); notify = list_is_singular(&xb_write_list); @@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg) if (req->state == xb_req_state_queued || req->state == xb_req_state_wait_reply) req->state = xb_req_state_aborted; - else - kfree(req); + + kref_put(&req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); return ret; @@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) req->cb = xenbus_dev_queue_reply; req->par = par; req->user_req = true; + kref_init(&req->kref); xs_send(req, msg); @@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->num_vecs = num_vecs; req->cb = xs_wake_up; req->user_req = false; + kref_init(&req->kref); msg.req_id = 0; msg.tx_id = t.id; diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index effafc3e0ced..7ec022e9361a 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -1422,11 +1422,31 @@ alloc_done: wp->sectors_free = UINT_MAX; - open_bucket_for_each(c, &wp->ptrs, ob, i) + open_bucket_for_each(c, &wp->ptrs, ob, i) { + /* + * Ensure proper write alignment - either due to misaligned + * bucket sizes (from buggy bcachefs-tools), or writes that mix + * logical/physical alignment: + */ + struct bch_dev *ca = ob_dev(c, ob); + u64 offset = bucket_to_sector(ca, ob->bucket) + + ca->mi.bucket_size - + ob->sectors_free; + unsigned align = round_up(offset, block_sectors(c)) - offset; + + ob->sectors_free = max_t(int, 0, ob->sectors_free - align); + wp->sectors_free = min(wp->sectors_free, ob->sectors_free); + } wp->sectors_free = rounddown(wp->sectors_free, block_sectors(c)); + /* Did alignment use up space in an open_bucket? */ + if (unlikely(!wp->sectors_free)) { + bch2_alloc_sectors_done(c, wp); + goto retry; + } + BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); return 0; diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 7b98ba2dec64..37b69d89341f 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -47,6 +47,27 @@ #define DROP_PREV_NODE 11 #define DID_FILL_FROM_SCAN 12 +/* + * Returns true if it's a btree we can easily reconstruct, or otherwise won't + * cause data loss if it's missing: + */ +static bool btree_id_important(enum btree_id btree) +{ + if (btree_id_is_alloc(btree)) + return false; + + switch (btree) { + case BTREE_ID_quotas: + case BTREE_ID_snapshot_trees: + case BTREE_ID_logged_ops: + case BTREE_ID_rebalance_work: + case BTREE_ID_subvolume_children: + return false; + default: + return true; + } +} + static const char * const bch2_gc_phase_strs[] = { #define x(n) #n, GC_PHASES() @@ -534,8 +555,10 @@ reconstruct_root: r->error = 0; if (!bch2_btree_has_scanned_nodes(c, i)) { - mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing, - "no nodes found for btree %s, continue?", buf.buf); + __fsck_err(trans, + FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0), + btree_root_unreadable_and_scan_found_nothing, + "no nodes found for btree %s, continue?", buf.buf); bch2_btree_root_alloc_fake_trans(trans, i, 0); } else { bch2_btree_root_alloc_fake_trans(trans, i, 1); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 5fd4a58d2ad2..60782f3e5aec 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -41,6 +41,7 @@ void bch2_btree_node_io_unlock(struct btree *b) clear_btree_node_write_in_flight_inner(b); clear_btree_node_write_in_flight(b); + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); } @@ -1400,6 +1401,7 @@ start: printbuf_exit(&buf); clear_btree_node_read_in_flight(b); + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); } @@ -1595,6 +1597,7 @@ fsck_err: printbuf_exit(&buf); clear_btree_node_read_in_flight(b); + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); } @@ -1721,6 +1724,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b, set_btree_node_read_error(b); bch2_btree_lost_data(c, b->c.btree_id); clear_btree_node_read_in_flight(b); + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_read_in_flight); printbuf_exit(&buf); return; @@ -2061,8 +2065,10 @@ static void __btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start if (new & (1U << BTREE_NODE_write_in_flight)) __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type); - else + else { + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_write_in_flight); + } } static void btree_node_write_done(struct bch_fs *c, struct btree *b, u64 start_time) @@ -2175,6 +2181,7 @@ static void btree_node_write_endio(struct bio *bio) } clear_btree_node_write_in_flight_inner(b); + smp_mb__after_atomic(); wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner); INIT_WORK(&wb->work, btree_node_write_work); queue_work(c->btree_io_complete_wq, &wb->work); diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c index 7d6c971db23c..ade3b5addd75 100644 --- a/fs/bcachefs/btree_journal_iter.c +++ b/fs/bcachefs/btree_journal_iter.c @@ -288,7 +288,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id, .size = max_t(size_t, keys->size, 8) * 2, }; - new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL); + new_keys.data = bch2_kvmalloc(new_keys.size * sizeof(new_keys.data[0]), GFP_KERNEL); if (!new_keys.data) { bch_err(c, "%s: error allocating new key array (size %zu)", __func__, new_keys.size); diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 44b5fe430370..00307356d7c8 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1389,7 +1389,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, printbuf_exit(&buf); } -static void +static int bch2_btree_insert_keys_interior(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, @@ -1411,7 +1411,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, insert = bkey_next(insert)) bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); - if (bch2_btree_node_check_topology(trans, b)) { + int ret = bch2_btree_node_check_topology(trans, b); + if (ret) { struct printbuf buf = PRINTBUF; for (struct bkey_i *k = keys->keys; @@ -1421,11 +1422,15 @@ bch2_btree_insert_keys_interior(struct btree_update *as, prt_newline(&buf); } - panic("%s(): check_topology error: inserted keys\n%s", __func__, buf.buf); + bch2_fs_fatal_error(as->c, "%ps -> %s(): check_topology error %s: inserted keys\n%s", + (void *) _RET_IP_, __func__, bch2_err_str(ret), buf.buf); + dump_stack(); + return ret; } memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data); keys->top_p -= insert->_data - keys->keys_p; + return 0; } static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) @@ -1559,11 +1564,11 @@ static void __btree_split_node(struct btree_update *as, * nodes that were coalesced, and thus in the middle of a child node post * coalescing: */ -static void btree_split_insert_keys(struct btree_update *as, - struct btree_trans *trans, - btree_path_idx_t path_idx, - struct btree *b, - struct keylist *keys) +static int btree_split_insert_keys(struct btree_update *as, + struct btree_trans *trans, + btree_path_idx_t path_idx, + struct btree *b, + struct keylist *keys) { struct btree_path *path = trans->paths + path_idx; @@ -1573,8 +1578,12 @@ static void btree_split_insert_keys(struct btree_update *as, bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); - bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); + int ret = bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); + if (ret) + return ret; } + + return 0; } static int btree_split(struct btree_update *as, struct btree_trans *trans, @@ -1607,8 +1616,10 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, __btree_split_node(as, trans, b, n, keys); if (keys) { - btree_split_insert_keys(as, trans, path, n1, keys); - btree_split_insert_keys(as, trans, path, n2, keys); + ret = btree_split_insert_keys(as, trans, path, n1, keys) ?: + btree_split_insert_keys(as, trans, path, n2, keys); + if (ret) + goto err; BUG_ON(!bch2_keylist_empty(keys)); } @@ -1654,7 +1665,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, n3->sib_u64s[0] = U16_MAX; n3->sib_u64s[1] = U16_MAX; - btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); + ret = btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); + if (ret) + goto err; } } else { trace_and_count(c, btree_node_compact, trans, b); @@ -1662,7 +1675,9 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, n1 = bch2_btree_node_alloc_replacement(as, trans, b); if (keys) { - btree_split_insert_keys(as, trans, path, n1, keys); + ret = btree_split_insert_keys(as, trans, path, n1, keys); + if (ret) + goto err; BUG_ON(!bch2_keylist_empty(keys)); } @@ -1809,15 +1824,15 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t goto split; } - ret = bch2_btree_node_check_topology(trans, b); + + ret = bch2_btree_node_check_topology(trans, b) ?: + bch2_btree_insert_keys_interior(as, trans, path, b, + path->l[b->c.level].iter, keys); if (ret) { bch2_btree_node_unlock_write(trans, path, b); return ret; } - bch2_btree_insert_keys_interior(as, trans, path, b, - path->l[b->c.level].iter, keys); - trans_for_each_path_with_node(trans, b, linked, i) bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b); diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 4ef261e8db4f..31fbc2716d8b 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -604,6 +604,13 @@ static int bch2_trigger_pointer(struct btree_trans *trans, } struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); + if (!bucket_valid(ca, bucket.offset)) { + if (insert) { + bch2_dev_bucket_missing(ca, bucket.offset); + ret = -BCH_ERR_trigger_pointer; + } + goto err; + } if (flags & BTREE_TRIGGER_transactional) { struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); @@ -1307,13 +1314,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); if (resize) { - bucket_gens->nbuckets = min(bucket_gens->nbuckets, - old_bucket_gens->nbuckets); - bucket_gens->nbuckets_minus_first = - bucket_gens->nbuckets - bucket_gens->first_bucket; + u64 copy = min(bucket_gens->nbuckets, + old_bucket_gens->nbuckets); memcpy(bucket_gens->b, old_bucket_gens->b, - bucket_gens->nbuckets); + sizeof(bucket_gens->b[0]) * copy); } rcu_assign_pointer(ca->bucket_gens, bucket_gens); diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index 8d75b27a1418..af1532de4a37 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -44,6 +44,7 @@ static inline void bucket_unlock(struct bucket *b) BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); + smp_mb__after_atomic(); wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); } diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index 92ee59d9e00e..8a680e52c1ed 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -685,7 +685,7 @@ static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subv vfs_d_type(d.v->d_type)); if (ret) ctx->pos = d.k->p.offset + 1; - return ret; + return !ret; } int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) @@ -710,7 +710,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) if (ret2 > 0) continue; - ret2 ?: drop_locks_do(trans, bch2_dir_emit(ctx, dirent, target)); + ret2 ?: (bch2_trans_unlock(trans), bch2_dir_emit(ctx, dirent, target)); }))); bch2_bkey_buf_exit(&sk, c); diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c index 1186280b29e9..2ca3cbf12b71 100644 --- a/fs/bcachefs/disk_groups.c +++ b/fs/bcachefs/disk_groups.c @@ -470,23 +470,22 @@ inval: int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name) { - struct bch_member *mi; - int ret, v = -1; + lockdep_assert_held(&c->sb_lock); - if (!strlen(name) || !strcmp(name, "none")) - return 0; - v = bch2_disk_path_find_or_create(&c->disk_sb, name); - if (v < 0) - return v; + if (!strlen(name) || !strcmp(name, "none")) { + struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_GROUP(mi, 0); + } else { + int v = bch2_disk_path_find_or_create(&c->disk_sb, name); + if (v < 0) + return v; - ret = bch2_sb_disk_groups_to_cpu(c); - if (ret) - return ret; + struct bch_member *mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); + SET_BCH_MEMBER_GROUP(mi, v + 1); + } - mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); - SET_BCH_MEMBER_GROUP(mi, v + 1); - return 0; + return bch2_sb_disk_groups_to_cpu(c); } int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name) diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index a396865e8b17..fff58b78327c 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -2204,10 +2204,10 @@ void bch2_fs_ec_stop(struct bch_fs *c) static bool bch2_fs_ec_flush_done(struct bch_fs *c) { - bool ret; + sched_annotate_sleep(); mutex_lock(&c->ec_stripe_new_lock); - ret = list_empty(&c->ec_stripe_new_list); + bool ret = list_empty(&c->ec_stripe_new_list); mutex_unlock(&c->ec_stripe_new_lock); return ret; diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h index 62d27e04d763..51893e1ee874 100644 --- a/fs/bcachefs/ec.h +++ b/fs/bcachefs/ec.h @@ -160,6 +160,7 @@ static inline void gc_stripe_unlock(struct gc_stripe *s) BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock); + smp_mb__after_atomic(); wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR); } diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index a615e4852ded..d9ebffa5b3a2 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -269,7 +269,7 @@ x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \ x(BCH_ERR_invalid, invalid_bkey) \ x(BCH_ERR_operation_blocked, nocow_lock_blocked) \ - x(EIO, journal_shutdown) \ + x(EROFS, journal_shutdown) \ x(EIO, journal_flush_err) \ x(EIO, journal_write_err) \ x(EIO, btree_node_read_err) \ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index 925b0b54ea2f..6b8695b1349c 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -478,7 +478,9 @@ int __bch2_fsck_err(struct bch_fs *c, } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { if (c->opts.errors != BCH_ON_ERROR_continue || !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { - prt_str(out, ", shutting down"); + prt_str_indented(out, ", shutting down\n" + "error not marked as autofix and not in fsck\n" + "run fsck, and forward to devs so error can be marked for self-healing"); inconsistent = true; print = true; ret = -BCH_ERR_fsck_errors_not_fixed; diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index dca2b8425cc0..e597fb9c9823 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -1056,8 +1056,9 @@ bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bke static bool want_cached_ptr(struct bch_fs *c, struct bch_io_opts *opts, struct bch_extent_ptr *ptr) { - if (!opts->promote_target || - !bch2_dev_in_target(c, ptr->dev, opts->promote_target)) + unsigned target = opts->promote_target ?: opts->foreground_target; + + if (target && !bch2_dev_in_target(c, ptr->dev, target)) return false; struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 65c2c33d253d..9657144666b8 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -144,10 +144,25 @@ int __must_check bch2_write_inode_size(struct bch_fs *c, void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *quota_res, s64 sectors) { - bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c, - "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)", - inode->v.i_ino, (u64) inode->v.i_blocks, sectors, - inode->ei_inode.bi_sectors); + if (unlikely((s64) inode->v.i_blocks + sectors < 0)) { + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + prt_printf(&buf, "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)", + inode->v.i_ino, (u64) inode->v.i_blocks, sectors, + inode->ei_inode.bi_sectors); + + bool repeat = false, print = false, suppress = false; + bch2_count_fsck_err(c, vfs_inode_i_blocks_underflow, buf.buf, &repeat, &print, &suppress); + if (print) + bch2_print_str(c, buf.buf); + printbuf_exit(&buf); + + if (sectors < 0) + sectors = -inode->v.i_blocks; + else + sectors = 0; + } + inode->v.i_blocks += sectors; #ifdef CONFIG_BCACHEFS_QUOTA @@ -502,11 +517,22 @@ int bchfs_truncate(struct mnt_idmap *idmap, goto err; } - bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks && - !bch2_journal_error(&c->journal), c, - "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)", - inode->v.i_ino, (u64) inode->v.i_blocks, - inode->ei_inode.bi_sectors); + if (unlikely(!inode->v.i_size && inode->v.i_blocks && + !bch2_journal_error(&c->journal))) { + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + prt_printf(&buf, + "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)", + inode->v.i_ino, (u64) inode->v.i_blocks, + inode->ei_inode.bi_sectors); + + bool repeat = false, print = false, suppress = false; + bch2_count_fsck_err(c, vfs_inode_i_blocks_not_zero_at_truncate, buf.buf, + &repeat, &print, &suppress); + if (print) + bch2_print_str(c, buf.buf); + printbuf_exit(&buf); + } ret = bch2_setattr_nonsize(idmap, inode, iattr); err: diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 0f1d61aab90b..b6801861c66f 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -66,6 +66,8 @@ static inline void bch2_inode_flags_to_vfs(struct bch_fs *c, struct bch_inode_in if (bch2_inode_casefold(c, &inode->ei_inode)) inode->v.i_flags |= S_CASEFOLD; + else + inode->v.i_flags &= ~S_CASEFOLD; } void bch2_inode_update_after_write(struct btree_trans *trans, @@ -848,10 +850,8 @@ int __bch2_unlink(struct inode *vdir, struct dentry *dentry, set_nlink(&inode->v, 0); } - if (IS_CASEFOLDED(vdir)) { + if (IS_CASEFOLDED(vdir)) d_invalidate(dentry); - d_prune_aliases(&inode->v); - } err: bch2_trans_put(trans); bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); @@ -1464,8 +1464,8 @@ static int bch2_next_fiemap_extent(struct btree_trans *trans, unsigned sectors = cur->kbuf.k->k.size; s64 offset_into_extent = 0; enum btree_id data_btree = BTREE_ID_extents; - int ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, - &cur->kbuf); + ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, + &cur->kbuf); if (ret) goto err; @@ -2502,10 +2502,9 @@ static int bch2_fs_get_tree(struct fs_context *fc) bch2_opts_apply(&c->opts, opts); - /* - * need to initialise sb and set c->vfs_sb _before_ starting fs, - * for blk_holder_ops - */ + ret = bch2_fs_start(c); + if (ret) + goto err_stop_fs; sb = sget(fc->fs_type, NULL, bch2_set_super, fc->sb_flags|SB_NOSEC, c); ret = PTR_ERR_OR_ZERO(sb); @@ -2567,9 +2566,10 @@ got_sb: sb->s_shrink->seeks = 0; - ret = bch2_fs_start(c); - if (ret) - goto err_put_super; +#ifdef CONFIG_UNICODE + sb->s_encoding = c->cf_encoding; +#endif + generic_set_sb_d_ops(sb); vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM); ret = PTR_ERR_OR_ZERO(vinode); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index a418fa62f09d..c1237da079ed 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -255,6 +255,27 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, } if (i_sectors_delta) { + s64 bi_sectors = le64_to_cpu(inode->v.bi_sectors); + if (unlikely(bi_sectors + i_sectors_delta < 0)) { + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + bch2_log_msg_start(c, &buf); + prt_printf(&buf, "inode %llu i_sectors underflow: %lli + %lli < 0", + extent_iter->pos.inode, bi_sectors, i_sectors_delta); + + bool repeat = false, print = false, suppress = false; + bch2_count_fsck_err(c, inode_i_sectors_underflow, buf.buf, + &repeat, &print, &suppress); + if (print) + bch2_print_str(c, buf.buf); + printbuf_exit(&buf); + + if (i_sectors_delta < 0) + i_sectors_delta = -bi_sectors; + else + i_sectors_delta = 0; + } + le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta); inode_update_flags = 0; } diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 2a54ac79189b..ded18a94ed02 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -19,6 +19,7 @@ #include <linux/ioprio.h> #include <linux/string_choices.h> +#include <linux/sched/sysctl.h> void bch2_journal_pos_from_member_info_set(struct bch_fs *c) { @@ -1262,7 +1263,8 @@ int bch2_journal_read(struct bch_fs *c, degraded = true; } - closure_sync(&jlist.cl); + while (closure_sync_timeout(&jlist.cl, sysctl_hung_task_timeout_secs * HZ / 2)) + ; if (jlist.ret) return jlist.ret; @@ -1782,7 +1784,7 @@ static CLOSURE_CALLBACK(journal_write_submit) struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE); if (!ca) { /* XXX: fix this */ - bch_err(c, "missing device for journal write\n"); + bch_err(c, "missing device %u for journal write", ptr->dev); continue; } diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index ea670c3c43d8..976464d8a695 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -266,10 +266,11 @@ out: static bool should_discard_bucket(struct journal *j, struct journal_device *ja) { - bool ret; - spin_lock(&j->lock); - ret = ja->discard_idx != ja->dirty_idx_ondisk; + unsigned min_free = max(4, ja->nr / 8); + + bool ret = bch2_journal_dev_buckets_available(j, ja, journal_space_discarded) < min_free && + ja->discard_idx != ja->dirty_idx_ondisk; spin_unlock(&j->lock); return ret; diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index fc396b9fa754..dfdbb9259985 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -784,7 +784,8 @@ static int __bch2_move_data_phys(struct moving_context *ctxt, goto err; ret = bch2_btree_write_buffer_tryflush(trans); - bch_err_msg(c, ret, "flushing btree write buffer"); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "flushing btree write buffer"); if (ret) goto err; diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c index 46f3c8b100a9..52c58c6d53d2 100644 --- a/fs/bcachefs/namei.c +++ b/fs/bcachefs/namei.c @@ -343,6 +343,9 @@ bool bch2_reinherit_attrs(struct bch_inode_unpacked *dst_u, bool ret = false; for (id = 0; id < Inode_opt_nr; id++) { + if (!S_ISDIR(dst_u->bi_mode) && id == Inode_opt_casefold) + continue; + /* Skip attributes that were explicitly set on this inode */ if (dst_u->bi_fields_set & (1 << id)) continue; diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c index acb5d845841e..badd0e17ada5 100644 --- a/fs/bcachefs/sb-downgrade.c +++ b/fs/bcachefs/sb-downgrade.c @@ -20,6 +20,10 @@ * x(version, recovery_passes, errors...) */ #define UPGRADE_TABLE() \ + x(snapshot_2, \ + RECOVERY_PASS_ALL_FSCK, \ + BCH_FSCK_ERR_subvol_root_wrong_bi_subvol, \ + BCH_FSCK_ERR_subvol_not_master_and_not_snapshot) \ x(backpointers, \ RECOVERY_PASS_ALL_FSCK) \ x(inode_v3, \ diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index dc53d25c7cbb..3b69a924086f 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -46,7 +46,7 @@ enum bch_fsck_flags { x(btree_node_unsupported_version, 34, 0) \ x(btree_node_bset_older_than_sb_min, 35, 0) \ x(btree_node_bset_newer_than_sb, 36, 0) \ - x(btree_node_data_missing, 37, 0) \ + x(btree_node_data_missing, 37, FSCK_AUTOFIX) \ x(btree_node_bset_after_end, 38, 0) \ x(btree_node_replicas_sectors_written_mismatch, 39, 0) \ x(btree_node_replicas_data_mismatch, 40, 0) \ @@ -205,9 +205,9 @@ enum bch_fsck_flags { x(snapshot_bad_depth, 184, 0) \ x(snapshot_bad_skiplist, 185, 0) \ x(subvol_pos_bad, 186, 0) \ - x(subvol_not_master_and_not_snapshot, 187, 0) \ + x(subvol_not_master_and_not_snapshot, 187, FSCK_AUTOFIX) \ x(subvol_to_missing_root, 188, 0) \ - x(subvol_root_wrong_bi_subvol, 189, 0) \ + x(subvol_root_wrong_bi_subvol, 189, FSCK_AUTOFIX) \ x(bkey_in_missing_snapshot, 190, 0) \ x(inode_pos_inode_nonzero, 191, 0) \ x(inode_pos_blockdev_range, 192, 0) \ @@ -236,6 +236,9 @@ enum bch_fsck_flags { x(inode_has_child_snapshots_wrong, 287, 0) \ x(inode_unreachable, 210, FSCK_AUTOFIX) \ x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \ + x(inode_i_sectors_underflow, 312, FSCK_AUTOFIX) \ + x(vfs_inode_i_blocks_underflow, 311, FSCK_AUTOFIX) \ + x(vfs_inode_i_blocks_not_zero_at_truncate, 313, FSCK_AUTOFIX) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \ @@ -317,7 +320,9 @@ enum bch_fsck_flags { x(directory_size_mismatch, 303, FSCK_AUTOFIX) \ x(dirent_cf_name_too_big, 304, 0) \ x(dirent_stray_data_after_cf_name, 305, 0) \ - x(MAX, 308, 0) + x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \ + x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \ + x(MAX, 314, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c index 116131f95815..72779912939b 100644 --- a/fs/bcachefs/sb-members.c +++ b/fs/bcachefs/sb-members.c @@ -15,9 +15,11 @@ void bch2_dev_missing(struct bch_fs *c, unsigned dev) bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev); } -void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket) +void bch2_dev_bucket_missing(struct bch_dev *ca, u64 bucket) { - bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset); + bch2_fs_inconsistent(ca->fs, + "pointer to nonexistent bucket %llu on device %s (valid range %u-%llu)", + bucket, ca->name, ca->mi.first_bucket, ca->mi.nbuckets); } #define x(t, n, ...) [n] = #t, diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h index 06bb41a3f360..42786657522c 100644 --- a/fs/bcachefs/sb-members.h +++ b/fs/bcachefs/sb-members.h @@ -249,20 +249,23 @@ static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev) static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket) { struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); - if (ca && !bucket_valid(ca, bucket.offset)) { + if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { bch2_dev_put(ca); ca = NULL; } return ca; } -void bch2_dev_bucket_missing(struct bch_fs *, struct bpos); +void bch2_dev_bucket_missing(struct bch_dev *, u64); static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket) { - struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket); - if (!ca) - bch2_dev_bucket_missing(c, bucket); + struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); + if (ca && unlikely(!bucket_valid(ca, bucket.offset))) { + bch2_dev_bucket_missing(ca, bucket.offset); + bch2_dev_put(ca); + ca = NULL; + } return ca; } diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index 5537283d0bea..d0209f7658bb 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -6,6 +6,7 @@ #include "errcode.h" #include "error.h" #include "fs.h" +#include "recovery_passes.h" #include "snapshot.h" #include "subvolume.h" @@ -44,8 +45,8 @@ static int check_subvol(struct btree_trans *trans, ret = bch2_snapshot_lookup(trans, snapid, &snapshot); if (bch2_err_matches(ret, ENOENT)) - bch_err(c, "subvolume %llu points to nonexistent snapshot %u", - k.k->p.offset, snapid); + return bch2_run_explicit_recovery_pass(c, + BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret; if (ret) return ret; diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index e4ab0595c0ae..84a37d971ffd 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -377,6 +377,11 @@ void bch2_fs_read_only(struct bch_fs *c) bch_verbose(c, "marking filesystem clean"); bch2_fs_mark_clean(c); } else { + /* Make sure error counts/counters are persisted */ + mutex_lock(&c->sb_lock); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + bch_verbose(c, "done going read-only, filesystem not clean"); } } @@ -531,6 +536,10 @@ static void __bch2_fs_free(struct bch_fs *c) for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_exit(&c->times[i]); +#ifdef CONFIG_UNICODE + utf8_unload(c->cf_encoding); +#endif + bch2_find_btree_nodes_exit(&c->found_btree_nodes); bch2_free_pending_node_rewrites(c); bch2_free_fsck_errs(c); @@ -823,25 +832,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) if (ret) goto err; -#ifdef CONFIG_UNICODE - /* Default encoding until we can potentially have more as an option. */ - c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING); - if (IS_ERR(c->cf_encoding)) { - printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u", - unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), - unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), - unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); - ret = -EINVAL; - goto err; - } -#else - if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) { - printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n"); - ret = -EINVAL; - goto err; - } -#endif - pr_uuid(&name, c->sb.user_uuid.b); ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0; if (ret) @@ -941,6 +931,29 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) if (ret) goto err; +#ifdef CONFIG_UNICODE + /* Default encoding until we can potentially have more as an option. */ + c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING); + if (IS_ERR(c->cf_encoding)) { + printk(KERN_ERR "Cannot load UTF-8 encoding for filesystem. Version: %u.%u.%u", + unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), + unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), + unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); + ret = -EINVAL; + goto err; + } + bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u", + unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING), + unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING), + unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING)); +#else + if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) { + printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n"); + ret = -EINVAL; + goto err; + } +#endif + for (i = 0; i < c->sb.nr_devices; i++) { if (!bch2_member_exists(c->disk_sb.sb, i)) continue; diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c index dea73bc1cb51..314a24d15d4e 100644 --- a/fs/bcachefs/thread_with_file.c +++ b/fs/bcachefs/thread_with_file.c @@ -455,8 +455,10 @@ ssize_t bch2_stdio_redirect_vprintf(struct stdio_redirect *stdio, bool nonblocki struct stdio_buf *buf = &stdio->output; unsigned long flags; ssize_t ret; - again: + if (stdio->done) + return -EPIPE; + spin_lock_irqsave(&buf->lock, flags); ret = bch2_darray_vprintf(&buf->buf, GFP_NOWAIT, fmt, args); spin_unlock_irqrestore(&buf->lock, flags); diff --git a/fs/bcachefs/xattr_format.h b/fs/bcachefs/xattr_format.h index c7916011ef34..67426e33d04e 100644 --- a/fs/bcachefs/xattr_format.h +++ b/fs/bcachefs/xattr_format.h @@ -13,7 +13,13 @@ struct bch_xattr { __u8 x_type; __u8 x_name_len; __le16 x_val_len; - __u8 x_name[] __counted_by(x_name_len); + /* + * x_name contains the name and value counted by + * x_name_len + x_val_len. The introduction of + * __counted_by(x_name_len) caused a false positive + * detection of an out of bounds write. + */ + __u8 x_name[]; } __packed __aligned(8); #endif /* _BCACHEFS_XATTR_FORMAT_H */ diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index e7f8ee5d48a4..7f11ef559be6 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -606,7 +606,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio) free_extent_map(em); cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE); - cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS); + cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct folio *), GFP_NOFS); if (!cb->compressed_folios) { ret = BLK_STS_RESOURCE; goto out_free_bio; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 197f5e51c474..13bdd60da3c7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2047,7 +2047,7 @@ static int submit_eb_subpage(struct folio *folio, struct writeback_control *wbc) subpage->bitmaps)) { spin_unlock_irqrestore(&subpage->lock, flags); spin_unlock(&folio->mapping->i_private_lock); - bit_start++; + bit_start += sectors_per_node; continue; } @@ -3508,8 +3508,8 @@ static void btree_clear_folio_dirty_tag(struct folio *folio) ASSERT(folio_test_locked(folio)); xa_lock_irq(&folio->mapping->i_pages); if (!folio_test_dirty(folio)) - __xa_clear_mark(&folio->mapping->i_pages, - folio_index(folio), PAGECACHE_TAG_DIRTY); + __xa_clear_mark(&folio->mapping->i_pages, folio->index, + PAGECACHE_TAG_DIRTY); xa_unlock_irq(&folio->mapping->i_pages); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 2e261892c7bc..f5b28b5c4908 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -298,6 +298,8 @@ static inline int __pure num_extent_pages(const struct extent_buffer *eb) */ static inline int __pure num_extent_folios(const struct extent_buffer *eb) { + if (!eb->folios[0]) + return 0; if (folio_order(eb->folios[0])) return 1; return num_extent_pages(eb); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cc67d1a2d611..bdafe4d4c4a5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2129,12 +2129,13 @@ next_slot: /* * If the found extent starts after requested offset, then - * adjust extent_end to be right before this extent begins + * adjust cur_offset to be right before this extent begins. */ if (found_key.offset > cur_offset) { - extent_end = found_key.offset; - extent_type = 0; - goto must_cow; + if (cow_start == (u64)-1) + cow_start = cur_offset; + cur_offset = found_key.offset; + goto next_slot; } /* @@ -5681,8 +5682,10 @@ struct btrfs_inode *btrfs_iget(u64 ino, struct btrfs_root *root) return inode; path = btrfs_alloc_path(); - if (!path) + if (!path) { + iget_failed(&inode->vfs_inode); return ERR_PTR(-ENOMEM); + } ret = btrfs_read_locked_inode(inode, path); btrfs_free_path(path); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2c5edcee9450..c3b2e29e3e01 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1541,8 +1541,8 @@ static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg, u64 extent_gen; int ret; - if (unlikely(!extent_root)) { - btrfs_err(fs_info, "no valid extent root for scrub"); + if (unlikely(!extent_root || !csum_root)) { + btrfs_err(fs_info, "no valid extent or csum root for scrub"); return -EUCLEAN; } memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) * diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c8c21c55be53..8e6b6fed7429 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -733,82 +733,6 @@ const u8 *btrfs_sb_fsid_ptr(const struct btrfs_super_block *sb) return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; } -/* - * We can have very weird soft links passed in. - * One example is "/proc/self/fd/<fd>", which can be a soft link to - * a block device. - * - * But it's never a good idea to use those weird names. - * Here we check if the path (not following symlinks) is a good one inside - * "/dev/". - */ -static bool is_good_dev_path(const char *dev_path) -{ - struct path path = { .mnt = NULL, .dentry = NULL }; - char *path_buf = NULL; - char *resolved_path; - bool is_good = false; - int ret; - - if (!dev_path) - goto out; - - path_buf = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path_buf) - goto out; - - /* - * Do not follow soft link, just check if the original path is inside - * "/dev/". - */ - ret = kern_path(dev_path, 0, &path); - if (ret) - goto out; - resolved_path = d_path(&path, path_buf, PATH_MAX); - if (IS_ERR(resolved_path)) - goto out; - if (strncmp(resolved_path, "/dev/", strlen("/dev/"))) - goto out; - is_good = true; -out: - kfree(path_buf); - path_put(&path); - return is_good; -} - -static int get_canonical_dev_path(const char *dev_path, char *canonical) -{ - struct path path = { .mnt = NULL, .dentry = NULL }; - char *path_buf = NULL; - char *resolved_path; - int ret; - - if (!dev_path) { - ret = -EINVAL; - goto out; - } - - path_buf = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path_buf) { - ret = -ENOMEM; - goto out; - } - - ret = kern_path(dev_path, LOOKUP_FOLLOW, &path); - if (ret) - goto out; - resolved_path = d_path(&path, path_buf, PATH_MAX); - if (IS_ERR(resolved_path)) { - ret = PTR_ERR(resolved_path); - goto out; - } - ret = strscpy(canonical, resolved_path, PATH_MAX); -out: - kfree(path_buf); - path_put(&path); - return ret; -} - static bool is_same_device(struct btrfs_device *device, const char *new_path) { struct path old = { .mnt = NULL, .dentry = NULL }; @@ -1513,23 +1437,12 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, bool new_device_added = false; struct btrfs_device *device = NULL; struct file *bdev_file; - char *canonical_path = NULL; u64 bytenr; dev_t devt; int ret; lockdep_assert_held(&uuid_mutex); - if (!is_good_dev_path(path)) { - canonical_path = kmalloc(PATH_MAX, GFP_KERNEL); - if (canonical_path) { - ret = get_canonical_dev_path(path, canonical_path); - if (ret < 0) { - kfree(canonical_path); - canonical_path = NULL; - } - } - } /* * Avoid an exclusive open here, as the systemd-udev may initiate the * device scan which may race with the user's mount or mkfs command, @@ -1574,8 +1487,7 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags, goto free_disk_super; } - device = device_list_add(canonical_path ? : path, disk_super, - &new_device_added); + device = device_list_add(path, disk_super, &new_device_added); if (!IS_ERR(device) && new_device_added) btrfs_free_stale_devices(device->devt, device); @@ -1584,7 +1496,6 @@ free_disk_super: error_bdev_put: fput(bdev_file); - kfree(canonical_path); return device; } diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 4fa0a0121288..60c7cc4c105c 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -150,10 +150,10 @@ io_retry: io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9; attached = 0; } - if (!attached++) - erofs_onlinefolio_split(folio); if (!bio_add_folio(&io->rq->bio, folio, len, cur)) goto io_retry; + if (!attached++) + erofs_onlinefolio_split(folio); io->dev.m_pa += len; } cur += len; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index cadec6b1b554..da6ee7c39290 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -357,7 +357,6 @@ static void erofs_default_options(struct erofs_sb_info *sbi) enum { Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum, Opt_device, Opt_fsid, Opt_domain_id, Opt_directio, - Opt_err }; static const struct constant_table erofs_param_cache_strategy[] = { diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 5c061aaeeb45..b8e6b76c23d5 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -79,9 +79,6 @@ struct z_erofs_pcluster { /* L: whether partial decompression or not */ bool partial; - /* L: indicate several pageofs_outs or not */ - bool multibases; - /* L: whether extra buffer allocations are best-effort */ bool besteffort; @@ -1046,8 +1043,6 @@ static int z_erofs_scan_folio(struct z_erofs_frontend *f, break; erofs_onlinefolio_split(folio); - if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) - f->pcl->multibases = true; if (f->pcl->length < offset + end - map->m_la) { f->pcl->length = offset + end - map->m_la; f->pcl->pageofs_out = map->m_la & ~PAGE_MASK; @@ -1093,7 +1088,6 @@ struct z_erofs_backend { struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; struct super_block *sb; struct z_erofs_pcluster *pcl; - /* pages with the longest decompressed length for deduplication */ struct page **decompressed_pages; /* pages to keep the compressed data */ @@ -1102,6 +1096,8 @@ struct z_erofs_backend { struct list_head decompressed_secondary_bvecs; struct page **pagepool; unsigned int onstack_used, nr_pages; + /* indicate if temporary copies should be preserved for later use */ + bool keepxcpy; }; struct z_erofs_bvec_item { @@ -1112,18 +1108,20 @@ struct z_erofs_bvec_item { static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be, struct z_erofs_bvec *bvec) { + int poff = bvec->offset + be->pcl->pageofs_out; struct z_erofs_bvec_item *item; - unsigned int pgnr; - - if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) && - (bvec->end == PAGE_SIZE || - bvec->offset + bvec->end == be->pcl->length)) { - pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; - DBG_BUGON(pgnr >= be->nr_pages); - if (!be->decompressed_pages[pgnr]) { - be->decompressed_pages[pgnr] = bvec->page; + struct page **page; + + if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE || + bvec->offset + bvec->end == be->pcl->length)) { + DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages); + page = be->decompressed_pages + (poff >> PAGE_SHIFT); + if (!*page) { + *page = bvec->page; return; } + } else { + be->keepxcpy = true; } /* (cold path) one pcluster is requested multiple times */ @@ -1289,7 +1287,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) .alg = pcl->algorithmformat, .inplace_io = overlapped, .partial_decoding = pcl->partial, - .fillgaps = pcl->multibases, + .fillgaps = be->keepxcpy, .gfp = pcl->besteffort ? GFP_KERNEL : GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); @@ -1346,7 +1344,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) pcl->length = 0; pcl->partial = true; - pcl->multibases = false; pcl->besteffort = false; pcl->bvset.nextpage = NULL; pcl->vcnt = 0; diff --git a/fs/namespace.c b/fs/namespace.c index 98a5cd756e9a..1b466c54a357 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -787,15 +787,11 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) return 0; mnt = real_mount(bastard); mnt_add_count(mnt, 1); - smp_mb(); // see mntput_no_expire() + smp_mb(); // see mntput_no_expire() and do_umount() if (likely(!read_seqretry(&mount_lock, seq))) return 0; - if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { - mnt_add_count(mnt, -1); - return 1; - } lock_mount_hash(); - if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { + if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { mnt_add_count(mnt, -1); unlock_mount_hash(); return 1; @@ -2048,6 +2044,7 @@ static int do_umount(struct mount *mnt, int flags) umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { + smp_mb(); // paired with __legitimize_mnt() shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { @@ -3560,7 +3557,8 @@ static int can_move_mount_beneath(const struct path *from, * @mnt_from itself. This defeats the whole purpose of mounting * @mnt_from beneath @mnt_to. */ - if (propagation_would_overmount(parent_mnt_to, mnt_from, mp)) + if (check_mnt(mnt_from) && + propagation_would_overmount(parent_mnt_to, mnt_from, mp)) return -EINVAL; return 0; @@ -3718,15 +3716,14 @@ static int do_move_mount(struct path *old_path, if (err) goto out; - if (is_anon_ns(ns)) - ns->mntns_flags &= ~MNTNS_PROPAGATING; - /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); if (attached) put_mountpoint(old_mp); out: + if (is_anon_ns(ns)) + ns->mntns_flags &= ~MNTNS_PROPAGATING; unlock_mount(mp); if (!err) { if (attached) { diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index cb01ea81724d..d0bcf744c553 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -705,8 +705,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) int blocksize; int err; - down_write(&nilfs->ns_sem); - blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { nilfs_err(sb, "unable to set blocksize"); @@ -779,7 +777,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) set_nilfs_init(nilfs); err = 0; out: - up_write(&nilfs->ns_sem); return err; failed_sbh: diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index f2d840ae4ded..87f861e9004f 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1961,12 +1961,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, return -EINVAL; if (mark_cmd == FAN_MARK_FLUSH) { - if (mark_type == FAN_MARK_MOUNT) - fsnotify_clear_vfsmount_marks_by_group(group); - else if (mark_type == FAN_MARK_FILESYSTEM) - fsnotify_clear_sb_marks_by_group(group); - else - fsnotify_clear_inode_marks_by_group(group); + fsnotify_clear_marks_by_group(group, obj_type); return 0; } diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index b8ac85b548c7..821cb7874685 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6918,6 +6918,7 @@ static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end, if (IS_ERR(folios[numfolios])) { ret = PTR_ERR(folios[numfolios]); mlog_errno(ret); + folios[numfolios] = NULL; goto out; } diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index c7a9729dc9d0..e5f58ff2175f 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -174,7 +174,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) struct ocfs2_recovery_map *rm; mutex_init(&osb->recovery_lock); - osb->disable_recovery = 0; + osb->recovery_state = OCFS2_REC_ENABLED; osb->recovery_thread_task = NULL; init_waitqueue_head(&osb->recovery_event); @@ -190,31 +190,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) return 0; } -/* we can't grab the goofy sem lock from inside wait_event, so we use - * memory barriers to make sure that we'll see the null task before - * being woken up */ static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) { - mb(); return osb->recovery_thread_task != NULL; } -void ocfs2_recovery_exit(struct ocfs2_super *osb) +static void ocfs2_recovery_disable(struct ocfs2_super *osb, + enum ocfs2_recovery_state state) { - struct ocfs2_recovery_map *rm; - - /* disable any new recovery threads and wait for any currently - * running ones to exit. Do this before setting the vol_state. */ mutex_lock(&osb->recovery_lock); - osb->disable_recovery = 1; + /* + * If recovery thread is not running, we can directly transition to + * final state. + */ + if (!ocfs2_recovery_thread_running(osb)) { + osb->recovery_state = state + 1; + goto out_lock; + } + osb->recovery_state = state; + /* Wait for recovery thread to acknowledge state transition */ + wait_event_cmd(osb->recovery_event, + !ocfs2_recovery_thread_running(osb) || + osb->recovery_state >= state + 1, + mutex_unlock(&osb->recovery_lock), + mutex_lock(&osb->recovery_lock)); +out_lock: mutex_unlock(&osb->recovery_lock); - wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); - /* At this point, we know that no more recovery threads can be - * launched, so wait for any recovery completion work to - * complete. */ + /* + * At this point we know that no more recovery work can be queued so + * wait for any recovery completion work to complete. + */ if (osb->ocfs2_wq) flush_workqueue(osb->ocfs2_wq); +} + +void ocfs2_recovery_disable_quota(struct ocfs2_super *osb) +{ + ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE); +} + +void ocfs2_recovery_exit(struct ocfs2_super *osb) +{ + struct ocfs2_recovery_map *rm; + + /* disable any new recovery threads and wait for any currently + * running ones to exit. Do this before setting the vol_state. */ + ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE); /* * Now that recovery is shut down, and the osb is about to be @@ -1472,6 +1494,18 @@ static int __ocfs2_recovery_thread(void *arg) } } restart: + if (quota_enabled) { + mutex_lock(&osb->recovery_lock); + /* Confirm that recovery thread will no longer recover quotas */ + if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) { + osb->recovery_state = OCFS2_REC_QUOTA_DISABLED; + wake_up(&osb->recovery_event); + } + if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED) + quota_enabled = 0; + mutex_unlock(&osb->recovery_lock); + } + status = ocfs2_super_lock(osb, 1); if (status < 0) { mlog_errno(status); @@ -1569,27 +1603,29 @@ bail: ocfs2_free_replay_slots(osb); osb->recovery_thread_task = NULL; - mb(); /* sync with ocfs2_recovery_thread_running */ + if (osb->recovery_state == OCFS2_REC_WANT_DISABLE) + osb->recovery_state = OCFS2_REC_DISABLED; wake_up(&osb->recovery_event); mutex_unlock(&osb->recovery_lock); - if (quota_enabled) - kfree(rm_quota); + kfree(rm_quota); return status; } void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) { + int was_set = -1; + mutex_lock(&osb->recovery_lock); + if (osb->recovery_state < OCFS2_REC_WANT_DISABLE) + was_set = ocfs2_recovery_map_set(osb, node_num); trace_ocfs2_recovery_thread(node_num, osb->node_num, - osb->disable_recovery, osb->recovery_thread_task, - osb->disable_recovery ? - -1 : ocfs2_recovery_map_set(osb, node_num)); + osb->recovery_state, osb->recovery_thread_task, was_set); - if (osb->disable_recovery) + if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE) goto out; if (osb->recovery_thread_task) diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index e3c3a35dc5e0..6397170f302f 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb); int ocfs2_recovery_init(struct ocfs2_super *osb); void ocfs2_recovery_exit(struct ocfs2_super *osb); +void ocfs2_recovery_disable_quota(struct ocfs2_super *osb); int ocfs2_compute_replay_slots(struct ocfs2_super *osb); void ocfs2_free_replay_slots(struct ocfs2_super *osb); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 51c52768132d..6aaa94c554c1 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -308,6 +308,21 @@ enum ocfs2_journal_trigger_type { void ocfs2_initialize_journal_triggers(struct super_block *sb, struct ocfs2_triggers triggers[]); +enum ocfs2_recovery_state { + OCFS2_REC_ENABLED = 0, + OCFS2_REC_QUOTA_WANT_DISABLE, + /* + * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for + * ocfs2_recovery_disable_quota() to work. + */ + OCFS2_REC_QUOTA_DISABLED, + OCFS2_REC_WANT_DISABLE, + /* + * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work + */ + OCFS2_REC_DISABLED, +}; + struct ocfs2_journal; struct ocfs2_slot_info; struct ocfs2_recovery_map; @@ -370,7 +385,7 @@ struct ocfs2_super struct ocfs2_recovery_map *recovery_map; struct ocfs2_replay_map *replay_map; struct task_struct *recovery_thread_task; - int disable_recovery; + enum ocfs2_recovery_state recovery_state; wait_queue_head_t checkpoint_event; struct ocfs2_journal *journal; unsigned long osb_commit_interval; diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 2956d888c131..e272429da3db 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -453,8 +453,7 @@ out: /* Sync changes in local quota file into global quota file and * reinitialize local quota file. - * The function expects local quota file to be already locked and - * s_umount locked in shared mode. */ + * The function expects local quota file to be already locked. */ static int ocfs2_recover_local_quota_file(struct inode *lqinode, int type, struct ocfs2_quota_recovery *rec) @@ -588,7 +587,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, { unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, LOCAL_GROUP_QUOTA_SYSTEM_INODE }; - struct super_block *sb = osb->sb; struct ocfs2_local_disk_dqinfo *ldinfo; struct buffer_head *bh; handle_t *handle; @@ -600,7 +598,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " "slot %u\n", osb->dev_str, slot_num); - down_read(&sb->s_umount); for (type = 0; type < OCFS2_MAXQUOTAS; type++) { if (list_empty(&(rec->r_list[type]))) continue; @@ -677,7 +674,6 @@ out_put: break; } out: - up_read(&sb->s_umount); kfree(rec); return status; } @@ -843,8 +839,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); /* - * s_umount held in exclusive mode protects us against racing with - * recovery thread... + * ocfs2_dismount_volume() has already aborted quota recovery... */ if (oinfo->dqi_rec) { ocfs2_free_quota_recovery(oinfo->dqi_rec); diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index f7b483f0de2a..6ac4dcd54588 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -698,10 +698,12 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, bg_bh = ocfs2_block_group_alloc_contig(osb, handle, alloc_inode, ac, cl); - if (PTR_ERR(bg_bh) == -ENOSPC) + if (PTR_ERR(bg_bh) == -ENOSPC) { + ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG; bg_bh = ocfs2_block_group_alloc_discontig(handle, alloc_inode, ac, cl); + } if (IS_ERR(bg_bh)) { status = PTR_ERR(bg_bh); bg_bh = NULL; @@ -1794,6 +1796,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, { int status; u16 chain; + u32 contig_bits; u64 next_group; struct inode *alloc_inode = ac->ac_inode; struct buffer_head *group_bh = NULL; @@ -1819,10 +1822,21 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, status = -ENOSPC; /* for now, the chain search is a bit simplistic. We just use * the 1st group with any empty bits. */ - while ((status = ac->ac_group_search(alloc_inode, group_bh, - bits_wanted, min_bits, - ac->ac_max_block, - res)) == -ENOSPC) { + while (1) { + if (ac->ac_which == OCFS2_AC_USE_MAIN_DISCONTIG) { + contig_bits = le16_to_cpu(bg->bg_contig_free_bits); + if (!contig_bits) + contig_bits = ocfs2_find_max_contig_free_bits(bg->bg_bitmap, + le16_to_cpu(bg->bg_bits), 0); + if (bits_wanted > contig_bits && contig_bits >= min_bits) + bits_wanted = contig_bits; + } + + status = ac->ac_group_search(alloc_inode, group_bh, + bits_wanted, min_bits, + ac->ac_max_block, res); + if (status != -ENOSPC) + break; if (!bg->bg_next_group) break; @@ -1982,6 +1996,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, victim = ocfs2_find_victim_chain(cl); ac->ac_chain = victim; +search: status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, res, &bits_left); if (!status) { @@ -2022,6 +2037,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, } } + /* Chains can't supply the bits_wanted contiguous space. + * We should switch to using every single bit when allocating + * from the global bitmap. */ + if (i == le16_to_cpu(cl->cl_next_free_rec) && + status == -ENOSPC && ac->ac_which == OCFS2_AC_USE_MAIN) { + ac->ac_which = OCFS2_AC_USE_MAIN_DISCONTIG; + ac->ac_chain = victim; + goto search; + } + set_hint: if (status != -ENOSPC) { /* If the next search of this group is not likely to @@ -2365,7 +2390,8 @@ int __ocfs2_claim_clusters(handle_t *handle, BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted); BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL - && ac->ac_which != OCFS2_AC_USE_MAIN); + && ac->ac_which != OCFS2_AC_USE_MAIN + && ac->ac_which != OCFS2_AC_USE_MAIN_DISCONTIG); if (ac->ac_which == OCFS2_AC_USE_LOCAL) { WARN_ON(min_clusters > 1); diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index b481b834857d..bcf2ed4a8631 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -29,6 +29,7 @@ struct ocfs2_alloc_context { #define OCFS2_AC_USE_MAIN 2 #define OCFS2_AC_USE_INODE 3 #define OCFS2_AC_USE_META 4 +#define OCFS2_AC_USE_MAIN_DISCONTIG 5 u32 ac_which; /* these are used by the chain search */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 8bb5022f3082..3d2533950bae 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1812,6 +1812,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) /* Orphan scan should be stopped as early as possible */ ocfs2_orphan_scan_stop(osb); + /* Stop quota recovery so that we can disable quotas */ + ocfs2_recovery_disable_quota(osb); + ocfs2_disable_quotas(osb); /* All dquots should be freed by now */ diff --git a/fs/pnode.c b/fs/pnode.c index 7a062a5de10e..fb77427df39e 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -150,7 +150,7 @@ static struct mount *propagation_next(struct mount *m, struct mount *origin) { /* are there any slaves of this mount? */ - if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list)) + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); while (1) { @@ -174,7 +174,7 @@ static struct mount *skip_propagation_subtree(struct mount *m, * Advance m such that propagation_next will not return * the slaves of m. */ - if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list)) + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) m = last_slave(m); return m; @@ -185,7 +185,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) while (1) { while (1) { struct mount *next; - if (!IS_MNT_PROPAGATED(m) && !list_empty(&m->mnt_slave_list)) + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); next = next_peer(m); if (m->mnt_group_id == origin->mnt_group_id) { @@ -226,11 +226,15 @@ static int propagate_one(struct mount *m, struct mountpoint *dest_mp) struct mount *child; int type; /* skip ones added by this propagate_mnt() */ - if (IS_MNT_PROPAGATED(m)) + if (IS_MNT_NEW(m)) return 0; - /* skip if mountpoint isn't covered by it */ + /* skip if mountpoint isn't visible in m */ if (!is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) return 0; + /* skip if m is in the anon_ns we are emptying */ + if (m->mnt_ns->mntns_flags & MNTNS_PROPAGATING) + return 0; + if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { @@ -380,9 +384,6 @@ bool propagation_would_overmount(const struct mount *from, if (!IS_MNT_SHARED(from)) return false; - if (IS_MNT_PROPAGATED(to)) - return false; - if (to->mnt.mnt_root != mp->m_dentry) return false; diff --git a/fs/pnode.h b/fs/pnode.h index ddafe0d087ca..34b6247af01d 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -12,7 +12,7 @@ #define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED) #define IS_MNT_SLAVE(m) ((m)->mnt_master) -#define IS_MNT_PROPAGATED(m) (!(m)->mnt_ns || ((m)->mnt_ns->mntns_flags & MNTNS_PROPAGATING)) +#define IS_MNT_NEW(m) (!(m)->mnt_ns) #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c index fe738623cf1b..240d82c6f908 100644 --- a/fs/smb/client/cached_dir.c +++ b/fs/smb/client/cached_dir.c @@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, { struct cached_fid *cfid; - spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (!strcmp(cfid->path, path)) { /* @@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, * being deleted due to a lease break. */ if (!cfid->time || !cfid->has_lease) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } kref_get(&cfid->refcount); - spin_unlock(&cfids->cfid_list_lock); return cfid; } } if (lookup_only) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } if (cfids->num_entries >= max_cached_dirs) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid = init_cached_dir(path); if (cfid == NULL) { - spin_unlock(&cfids->cfid_list_lock); return NULL; } cfid->cfids = cfids; @@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, */ cfid->has_lease = true; - spin_unlock(&cfids->cfid_list_lock); return cfid; } @@ -187,8 +180,10 @@ replay_again: if (!utf16_path) return -ENOMEM; + spin_lock(&cfids->cfid_list_lock); cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); if (cfid == NULL) { + spin_unlock(&cfids->cfid_list_lock); kfree(utf16_path); return -ENOENT; } @@ -197,7 +192,6 @@ replay_again: * Otherwise, it is either a new entry or laundromat worker removed it * from @cfids->entries. Caller will put last reference if the latter. */ - spin_lock(&cfids->cfid_list_lock); if (cfid->has_lease && cfid->time) { spin_unlock(&cfids->cfid_list_lock); *ret_cfid = cfid; diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index 18d67ab113f0..1b79fe07476f 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -1266,10 +1266,9 @@ typedef struct smb_com_query_information_rsp { typedef struct smb_com_setattr_req { struct smb_hdr hdr; /* wct = 8 */ __le16 attr; - __le16 time_low; - __le16 time_high; + __le32 last_write_time; __le16 reserved[5]; /* must be zero */ - __u16 ByteCount; + __le16 ByteCount; __u8 BufferFormat; /* 4 = ASCII */ unsigned char fileName[]; } __attribute__((packed)) SETATTR_REQ; diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 59f6fdfe560e..ecf774a8f1ca 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -395,6 +395,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon); extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *FSData); +extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, __le32 attributes, __le64 write_time, + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb); extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 60cb264a01e5..f55457b4b82e 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -5171,6 +5171,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, return rc; } +int +SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, __le32 attributes, __le64 write_time, + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb) +{ + SETATTR_REQ *pSMB; + SETATTR_RSP *pSMBr; + struct timespec64 ts; + int bytes_returned; + int name_len; + int rc; + + cifs_dbg(FYI, "In %s path %s\n", __func__, fileName); + +retry: + rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB, + (void **) &pSMBr); + if (rc) + return rc; + + if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) { + name_len = + cifsConvertToUTF16((__le16 *) pSMB->fileName, + fileName, PATH_MAX, nls_codepage, + cifs_remap(cifs_sb)); + name_len++; /* trailing null */ + name_len *= 2; + } else { + name_len = copy_path_name(pSMB->fileName, fileName); + } + /* Only few attributes can be set by this command, others are not accepted by Win9x. */ + pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) & + (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE)); + /* Zero write time value (in both NT and SETATTR formats) means to not change it. */ + if (le64_to_cpu(write_time) != 0) { + ts = cifs_NTtimeToUnix(write_time); + pSMB->last_write_time = cpu_to_le32(ts.tv_sec); + } + pSMB->BufferFormat = 0x04; + name_len++; /* account for buffer type byte */ + inc_rfc1001_len(pSMB, (__u16)name_len); + pSMB->ByteCount = cpu_to_le16(name_len); + + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned, 0); + if (rc) + cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc); + + cifs_buf_release(pSMB); + + if (rc == -EAGAIN) + goto retry; + + return rc; +} + /* Some legacy servers such as NT4 require that the file times be set on an open handle, rather than by pathname - this is awkward due to potential access conflicts on the open, but it is unavoidable for these diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index df976ce6aed9..6bf04d9a5491 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -3753,28 +3753,7 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx) } } - /* - * Clamp the rsize/wsize mount arguments if they are too big for the server - * and set the rsize/wsize to the negotiated values if not passed in by - * the user on mount - */ - if ((cifs_sb->ctx->wsize == 0) || - (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) { - cifs_sb->ctx->wsize = - round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE); - /* - * in the very unlikely event that the server sent a max write size under PAGE_SIZE, - * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096 - */ - if (cifs_sb->ctx->wsize == 0) { - cifs_sb->ctx->wsize = PAGE_SIZE; - cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n"); - } - } - if ((cifs_sb->ctx->rsize == 0) || - (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx))) - cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx); - + cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); /* * The cookie is initialized from volume info returned above. * Inside cifs_fscache_get_super_cookie it checks diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 9e8f404b9e56..851b74f557c1 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -160,10 +160,8 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq) server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); rdata->server = server; - if (cifs_sb->ctx->rsize == 0) - cifs_sb->ctx->rsize = - server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), - cifs_sb->ctx); + cifs_negotiate_rsize(server, cifs_sb->ctx, + tlink_tcon(req->cfile->tlink)); rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &size, &rdata->credits); diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 2980941b9667..a634a34d4086 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -1021,6 +1021,7 @@ static int smb3_reconfigure(struct fs_context *fc) struct dentry *root = fc->root; struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses; + unsigned int rsize = ctx->rsize, wsize = ctx->wsize; char *new_password = NULL, *new_password2 = NULL; bool need_recon = false; int rc; @@ -1103,11 +1104,8 @@ static int smb3_reconfigure(struct fs_context *fc) STEAL_STRING(cifs_sb, ctx, iocharset); /* if rsize or wsize not passed in on remount, use previous values */ - if (ctx->rsize == 0) - ctx->rsize = cifs_sb->ctx->rsize; - if (ctx->wsize == 0) - ctx->wsize = cifs_sb->ctx->wsize; - + ctx->rsize = rsize ? CIFS_ALIGN_RSIZE(fc, rsize) : cifs_sb->ctx->rsize; + ctx->wsize = wsize ? CIFS_ALIGN_WSIZE(fc, wsize) : cifs_sb->ctx->wsize; smb3_cleanup_fs_context_contents(cifs_sb->ctx); rc = smb3_fs_context_dup(cifs_sb->ctx, ctx); @@ -1312,7 +1310,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, __func__); goto cifs_parse_mount_err; } - ctx->bsize = result.uint_32; + ctx->bsize = CIFS_ALIGN_BSIZE(fc, result.uint_32); ctx->got_bsize = true; break; case Opt_rasize: @@ -1336,24 +1334,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->rasize = result.uint_32; break; case Opt_rsize: - ctx->rsize = result.uint_32; + ctx->rsize = CIFS_ALIGN_RSIZE(fc, result.uint_32); ctx->got_rsize = true; ctx->vol_rsize = ctx->rsize; break; case Opt_wsize: - ctx->wsize = result.uint_32; + ctx->wsize = CIFS_ALIGN_WSIZE(fc, result.uint_32); ctx->got_wsize = true; - if (ctx->wsize % PAGE_SIZE != 0) { - ctx->wsize = round_down(ctx->wsize, PAGE_SIZE); - if (ctx->wsize == 0) { - ctx->wsize = PAGE_SIZE; - cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE); - } else { - cifs_dbg(VFS, - "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n", - ctx->wsize, PAGE_SIZE); - } - } ctx->vol_wsize = ctx->wsize; break; case Opt_acregmax: diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h index d1d29249bcdb..9e83302ce4b8 100644 --- a/fs/smb/client/fs_context.h +++ b/fs/smb/client/fs_context.h @@ -20,6 +20,21 @@ cifs_dbg(VFS, fmt, ## __VA_ARGS__); \ } while (0) +static inline size_t cifs_io_align(struct fs_context *fc, + const char *name, size_t size) +{ + if (!size || !IS_ALIGNED(size, PAGE_SIZE)) { + cifs_errorf(fc, "unaligned %s, making it a multiple of %lu bytes\n", + name, PAGE_SIZE); + size = umax(round_down(size, PAGE_SIZE), PAGE_SIZE); + } + return size; +} + +#define CIFS_ALIGN_WSIZE(_fc, _size) cifs_io_align(_fc, "wsize", _size) +#define CIFS_ALIGN_RSIZE(_fc, _size) cifs_io_align(_fc, "rsize", _size) +#define CIFS_ALIGN_BSIZE(_fc, _size) cifs_io_align(_fc, "bsize", _size) + enum smb_version { Smb_1 = 1, Smb_20, @@ -361,4 +376,36 @@ static inline void cifs_mount_unlock(void) mutex_unlock(&cifs_mount_mutex); } +static inline void cifs_negotiate_rsize(struct TCP_Server_Info *server, + struct smb3_fs_context *ctx, + struct cifs_tcon *tcon) +{ + unsigned int size; + + size = umax(server->ops->negotiate_rsize(tcon, ctx), PAGE_SIZE); + if (ctx->rsize) + size = umax(umin(ctx->rsize, size), PAGE_SIZE); + ctx->rsize = round_down(size, PAGE_SIZE); +} + +static inline void cifs_negotiate_wsize(struct TCP_Server_Info *server, + struct smb3_fs_context *ctx, + struct cifs_tcon *tcon) +{ + unsigned int size; + + size = umax(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE); + if (ctx->wsize) + size = umax(umin(ctx->wsize, size), PAGE_SIZE); + ctx->wsize = round_down(size, PAGE_SIZE); +} + +static inline void cifs_negotiate_iosize(struct TCP_Server_Info *server, + struct smb3_fs_context *ctx, + struct cifs_tcon *tcon) +{ + cifs_negotiate_rsize(server, ctx, tcon); + cifs_negotiate_wsize(server, ctx, tcon); +} + #endif diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index 0adeec652dc1..b27a182629ec 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -432,7 +432,7 @@ cifs_negotiate(const unsigned int xid, } static unsigned int -cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) +smb1_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; @@ -467,7 +467,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) } static unsigned int -cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) +smb1_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; @@ -543,24 +543,104 @@ static int cifs_query_path_info(const unsigned int xid, const char *full_path, struct cifs_open_info_data *data) { - int rc; + int rc = -EOPNOTSUPP; FILE_ALL_INFO fi = {}; + struct cifs_search_info search_info = {}; + bool non_unicode_wildcard = false; data->reparse_point = false; data->adjust_tz = false; - /* could do find first instead but this returns more info */ - rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls, - cifs_remap(cifs_sb)); /* - * BB optimize code so we do not make the above call when server claims - * no NT SMB support and the above call failed at least once - set flag - * in tcon or mount. + * First try CIFSSMBQPathInfo() function which returns more info + * (NumberOfLinks) than CIFSFindFirst() fallback function. + * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over + * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over + * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB + * Open command on non-NT servers works only for files, does not work + * for directories. And moreover Win9x SMB server returns bogus data in + * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers, + * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function. */ - if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { + if (tcon->ses->capabilities & CAP_NT_SMBS) + rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, + cifs_sb->local_nls, cifs_remap(cifs_sb)); + + /* + * Non-UNICODE variant of fallback functions below expands wildcards, + * so they cannot be used for querying paths with wildcard characters. + */ + if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><")) + non_unicode_wildcard = true; + + /* + * Then fallback to CIFSFindFirst() which works also with non-NT servers + * but does not does not provide NumberOfLinks. + */ + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && + !non_unicode_wildcard) { + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) + search_info.info_level = SMB_FIND_FILE_INFO_STANDARD; + else + search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO; + rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL, + CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END, + &search_info, false); + if (rc == 0) { + if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) { + FIND_FILE_STANDARD_INFO *di; + int offset = tcon->ses->server->timeAdj; + + di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start; + fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->CreationDate, di->CreationTime, offset))); + fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->LastAccessDate, di->LastAccessTime, offset))); + fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm( + di->LastWriteDate, di->LastWriteTime, offset))); + fi.ChangeTime = fi.LastWriteTime; + fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes)); + fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize)); + fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize)); + } else { + FILE_FULL_DIRECTORY_INFO *di; + + di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start; + fi.CreationTime = di->CreationTime; + fi.LastAccessTime = di->LastAccessTime; + fi.LastWriteTime = di->LastWriteTime; + fi.ChangeTime = di->ChangeTime; + fi.Attributes = di->ExtFileAttributes; + fi.AllocationSize = di->AllocationSize; + fi.EndOfFile = di->EndOfFile; + fi.EASize = di->EaSize; + } + fi.NumberOfLinks = cpu_to_le32(1); + fi.DeletePending = 0; + fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY); + cifs_buf_release(search_info.ntwrk_buf_start); + } else if (!full_path[0]) { + /* + * CIFSFindFirst() does not work on root path if the + * root path was exported on the server from the top + * level path (drive letter). + */ + rc = -EOPNOTSUPP; + } + } + + /* + * If everything failed then fallback to the legacy SMB command + * SMB_COM_QUERY_INFORMATION which works with all servers, but + * provide just few information. + */ + if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) { rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls, cifs_remap(cifs_sb)); data->adjust_tz = true; + } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) { + /* Path with non-UNICODE wildcard character cannot exist. */ + rc = -ENOENT; } if (!rc) { @@ -639,6 +719,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, int rc; FILE_ALL_INFO fi = {}; + /* + * CIFSSMBQFileInfo() for non-NT servers returns bogus data in + * Attributes fields. So do not use this command for non-NT servers. + */ + if (!(tcon->ses->capabilities & CAP_NT_SMBS)) + return -EOPNOTSUPP; + if (cfile->symlink_target) { data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL); if (!data->symlink_target) @@ -809,6 +896,9 @@ smb_set_file_info(struct inode *inode, const char *full_path, struct cifs_fid fid; struct cifs_open_parms oparms; struct cifsFileInfo *open_file; + FILE_BASIC_INFO new_buf; + struct cifs_open_info_data query_data; + __le64 write_time = buf->LastWriteTime; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; @@ -816,20 +906,58 @@ smb_set_file_info(struct inode *inode, const char *full_path, /* if the file is already open for write, just use that fileid */ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY); + if (open_file) { fid.netfid = open_file->fid.netfid; netpid = open_file->pid; tcon = tlink_tcon(open_file->tlink); - goto set_via_filehandle; + } else { + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) { + rc = PTR_ERR(tlink); + tlink = NULL; + goto out; + } + tcon = tlink_tcon(tlink); } - tlink = cifs_sb_tlink(cifs_sb); - if (IS_ERR(tlink)) { - rc = PTR_ERR(tlink); - tlink = NULL; - goto out; + /* + * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO + * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers + * interprets zero time value as do not change existing value on server. + * API of ->set_file_info() callback expects that zero time value has + * the NT meaning - do not change. Therefore if server is non-NT and + * some time values in "buf" are zero, then fetch missing time values. + */ + if (!(tcon->ses->capabilities & CAP_NT_SMBS) && + (!buf->CreationTime || !buf->LastAccessTime || + !buf->LastWriteTime || !buf->ChangeTime)) { + rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data); + if (rc) { + if (open_file) { + cifsFileInfo_put(open_file); + open_file = NULL; + } + goto out; + } + /* + * Original write_time from buf->LastWriteTime is preserved + * as SMBSetInformation() interprets zero as do not change. + */ + new_buf = *buf; + buf = &new_buf; + if (!buf->CreationTime) + buf->CreationTime = query_data.fi.CreationTime; + if (!buf->LastAccessTime) + buf->LastAccessTime = query_data.fi.LastAccessTime; + if (!buf->LastWriteTime) + buf->LastWriteTime = query_data.fi.LastWriteTime; + if (!buf->ChangeTime) + buf->ChangeTime = query_data.fi.ChangeTime; } - tcon = tlink_tcon(tlink); + + if (open_file) + goto set_via_filehandle; rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, cifs_sb); @@ -850,8 +978,45 @@ smb_set_file_info(struct inode *inode, const char *full_path, .fid = &fid, }; - cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); - rc = CIFS_open(xid, &oparms, &oplock, NULL); + if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) { + /* Opening directory path is not possible on non-NT servers. */ + rc = -EOPNOTSUPP; + } else { + /* + * Use cifs_open_file() instead of CIFS_open() as the + * cifs_open_file() selects the correct function which + * works also on non-NT servers. + */ + rc = cifs_open_file(xid, &oparms, &oplock, NULL); + /* + * Opening path for writing on non-NT servers is not + * possible when the read-only attribute is already set. + * Non-NT server in this case returns -EACCES. For those + * servers the only possible way how to clear the read-only + * bit is via SMB_COM_SETATTR command. + */ + if (rc == -EACCES && + (cinode->cifsAttrs & ATTR_READONLY) && + le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */ + !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) && + !(tcon->ses->capabilities & CAP_NT_SMBS)) + rc = -EOPNOTSUPP; + } + + /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */ + if (rc == -EOPNOTSUPP) { + cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n"); + rc = SMBSetInformation(xid, tcon, full_path, + buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs), + write_time, + cifs_sb->local_nls, cifs_sb); + if (rc == 0) + cinode->cifsAttrs = le32_to_cpu(buf->Attributes); + else + rc = -EACCES; + goto out; + } + if (rc != 0) { if (rc == -EIO) rc = -EINVAL; @@ -859,6 +1024,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, } netpid = current->tgid; + cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n"); set_via_filehandle: rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid); @@ -869,6 +1035,21 @@ set_via_filehandle: CIFSSMBClose(xid, tcon, fid.netfid); else cifsFileInfo_put(open_file); + + /* + * Setting the read-only bit is not honered on non-NT servers when done + * via open-semantics. So for setting it, use SMB_COM_SETATTR command. + * This command works only after the file is closed, so use it only when + * operation was called without the filehandle. + */ + if (open_file == NULL && + !(tcon->ses->capabilities & CAP_NT_SMBS) && + le32_to_cpu(buf->Attributes) & ATTR_READONLY) { + SMBSetInformation(xid, tcon, full_path, + buf->Attributes, + 0 /* do not change write time */, + cifs_sb->local_nls, cifs_sb); + } out: if (tlink != NULL) cifs_put_tlink(tlink); @@ -1161,8 +1342,8 @@ struct smb_version_operations smb1_operations = { .check_trans2 = cifs_check_trans2, .need_neg = cifs_need_neg, .negotiate = cifs_negotiate, - .negotiate_wsize = cifs_negotiate_wsize, - .negotiate_rsize = cifs_negotiate_rsize, + .negotiate_wsize = smb1_negotiate_wsize, + .negotiate_rsize = smb1_negotiate_rsize, .sess_setup = CIFS_SessSetup, .logoff = CIFSSMBLogoff, .tree_connect = CIFSTCon, diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index 57d9bfbadd97..2a3e46b8e15a 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -666,6 +666,8 @@ finished: /* smb2_parse_contexts() fills idata->fi.IndexNumber */ rc = smb2_parse_contexts(server, &rsp_iov[0], &oparms->fid->epoch, oparms->fid->lease_key, &oplock, &idata->fi, NULL); + if (rc) + cifs_dbg(VFS, "rc: %d parsing context of compound op\n", rc); } for (i = 0; i < num_cmds; i++) { diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index c4d52bebd37d..0b35816d551f 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -2921,6 +2921,7 @@ replay_again: req->CreateContextsOffset = cpu_to_le32( sizeof(struct smb2_create_req) + iov[1].iov_len); + le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len); pc_buf = iov[n_iov-1].iov_base; } @@ -4092,12 +4093,8 @@ static void cifs_renegotiate_iosize(struct TCP_Server_Info *server, return; spin_lock(&tcon->sb_list_lock); - list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) { - cifs_sb->ctx->rsize = - server->ops->negotiate_rsize(tcon, cifs_sb->ctx); - cifs_sb->ctx->wsize = - server->ops->negotiate_wsize(tcon, cifs_sb->ctx); - } + list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) + cifs_negotiate_iosize(server, cifs_sb->ctx, tcon); spin_unlock(&tcon->sb_list_lock); } diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c index 83caa3849749..b3d121052408 100644 --- a/fs/smb/server/auth.c +++ b/fs/smb/server/auth.c @@ -550,7 +550,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob, retval = -ENOMEM; goto out; } - sess->user = user; + + if (!sess->user) { + /* First successful authentication */ + sess->user = user; + } else { + if (!ksmbd_compare_user(sess->user, user)) { + ksmbd_debug(AUTH, "different user tried to reuse session\n"); + retval = -EPERM; + ksmbd_free_user(user); + goto out; + } + ksmbd_free_user(user); + } memcpy(sess->sess_key, resp->payload, resp->session_key_len); memcpy(out_blob, resp->payload + resp->session_key_len, diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 3f45f28f6f0f..9dec4c2940bc 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -59,10 +59,12 @@ static void ksmbd_session_rpc_clear_list(struct ksmbd_session *sess) struct ksmbd_session_rpc *entry; long index; + down_write(&sess->rpc_lock); xa_for_each(&sess->rpc_handle_list, index, entry) { xa_erase(&sess->rpc_handle_list, index); __session_rpc_close(sess, entry); } + up_write(&sess->rpc_lock); xa_destroy(&sess->rpc_handle_list); } @@ -92,7 +94,7 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) { struct ksmbd_session_rpc *entry, *old; struct ksmbd_rpc_command *resp; - int method; + int method, id; method = __rpc_method(rpc_name); if (!method) @@ -102,26 +104,29 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) if (!entry) return -ENOMEM; + down_read(&sess->rpc_lock); entry->method = method; - entry->id = ksmbd_ipc_id_alloc(); - if (entry->id < 0) + entry->id = id = ksmbd_ipc_id_alloc(); + if (id < 0) goto free_entry; - old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP); + old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP); if (xa_is_err(old)) goto free_id; - resp = ksmbd_rpc_open(sess, entry->id); + resp = ksmbd_rpc_open(sess, id); if (!resp) goto erase_xa; + up_read(&sess->rpc_lock); kvfree(resp); - return entry->id; + return id; erase_xa: xa_erase(&sess->rpc_handle_list, entry->id); free_id: ksmbd_rpc_id_free(entry->id); free_entry: kfree(entry); + up_read(&sess->rpc_lock); return -EINVAL; } @@ -129,9 +134,11 @@ void ksmbd_session_rpc_close(struct ksmbd_session *sess, int id) { struct ksmbd_session_rpc *entry; + down_write(&sess->rpc_lock); entry = xa_erase(&sess->rpc_handle_list, id); if (entry) __session_rpc_close(sess, entry); + up_write(&sess->rpc_lock); } int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id) @@ -439,6 +446,7 @@ static struct ksmbd_session *__session_create(int protocol) sess->sequence_number = 1; rwlock_init(&sess->tree_conns_lock); atomic_set(&sess->refcnt, 2); + init_rwsem(&sess->rpc_lock); ret = __init_smb2_session(sess); if (ret) diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h index f21348381d59..c5749d6ec715 100644 --- a/fs/smb/server/mgmt/user_session.h +++ b/fs/smb/server/mgmt/user_session.h @@ -63,6 +63,7 @@ struct ksmbd_session { rwlock_t tree_conns_lock; atomic_t refcnt; + struct rw_semaphore rpc_lock; }; static inline int test_session_flag(struct ksmbd_session *sess, int bit) diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 81a29857b1e3..03f606afad93 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req) if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < sizeof(struct create_lease_v2) - 4) - return NULL; + goto err_out; memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; @@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req) if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < sizeof(struct create_lease)) - return NULL; + goto err_out; memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; @@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req) lreq->version = 1; } return lreq; +err_out: + kfree(lreq); + return NULL; } /** diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 57839f9708bb..f2a2be8467c6 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -633,6 +633,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls) return name; } + if (*name == '\0') { + kfree(name); + return ERR_PTR(-EINVAL); + } + if (*name == '\\') { pr_err("not allow directory name included leading slash\n"); kfree(name); @@ -1445,7 +1450,7 @@ static int ntlm_authenticate(struct ksmbd_work *work, { struct ksmbd_conn *conn = work->conn; struct ksmbd_session *sess = work->sess; - struct channel *chann = NULL; + struct channel *chann = NULL, *old; struct ksmbd_user *user; u64 prev_id; int sz, rc; @@ -1557,7 +1562,12 @@ binding_session: return -ENOMEM; chann->conn = conn; - xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP); + old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann, + KSMBD_DEFAULT_GFP); + if (xa_is_err(old)) { + kfree(chann); + return xa_err(old); + } } } @@ -1602,11 +1612,6 @@ static int krb5_authenticate(struct ksmbd_work *work, if (prev_sess_id && prev_sess_id != sess->id) destroy_previous_session(conn, sess->user, prev_sess_id); - if (sess->state == SMB2_SESSION_VALID) { - ksmbd_free_user(sess->user); - sess->user = NULL; - } - retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); if (retval) { @@ -2249,10 +2254,6 @@ int smb2_session_logoff(struct ksmbd_work *work) sess->state = SMB2_SESSION_EXPIRED; up_write(&conn->session_lock); - if (sess->user) { - ksmbd_free_user(sess->user); - sess->user = NULL; - } ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP); rsp->StructureSize = cpu_to_le16(4); diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 391d07da586c..482eba0f4dc1 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -426,6 +426,13 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, goto out; } + if (v_len <= *pos) { + pr_err("stream write position %lld is out of bounds (stream length: %zd)\n", + *pos, v_len); + err = -EINVAL; + goto out; + } + if (v_len < size) { wbuf = kvzalloc(size, KSMBD_DEFAULT_GFP); if (!wbuf) { diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c index 1f8fa3468173..dfed6fce8904 100644 --- a/fs/smb/server/vfs_cache.c +++ b/fs/smb/server/vfs_cache.c @@ -661,21 +661,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft, bool (*skip)(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)) { - unsigned int id; - struct ksmbd_file *fp; - int num = 0; + struct ksmbd_file *fp; + unsigned int id = 0; + int num = 0; + + while (1) { + write_lock(&ft->lock); + fp = idr_get_next(ft->idr, &id); + if (!fp) { + write_unlock(&ft->lock); + break; + } - idr_for_each_entry(ft->idr, fp, id) { - if (skip(tcon, fp)) + if (skip(tcon, fp) || + !atomic_dec_and_test(&fp->refcount)) { + id++; + write_unlock(&ft->lock); continue; + } set_close_state_blocked_works(fp); + idr_remove(ft->idr, fp->volatile_id); + fp->volatile_id = KSMBD_NO_FID; + write_unlock(&ft->lock); + + down_write(&fp->f_ci->m_lock); + list_del_init(&fp->node); + up_write(&fp->f_ci->m_lock); - if (!atomic_dec_and_test(&fp->refcount)) - continue; __ksmbd_close_fd(ft, fp); + num++; + id++; } + return num; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index d80f94346199..22f4bf956ba1 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1585,8 +1585,11 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, user_uffdio_copy = (struct uffdio_copy __user *) arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_copy->copy))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_copy, user_uffdio_copy, @@ -1641,8 +1644,11 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, @@ -1744,8 +1750,11 @@ static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) user_uffdio_continue = (struct uffdio_continue __user *)arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_continue, user_uffdio_continue, @@ -1801,8 +1810,11 @@ static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long user_uffdio_poison = (struct uffdio_poison __user *)arg; ret = -EAGAIN; - if (atomic_read(&ctx->mmap_changing)) + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_poison->updated))) + return -EFAULT; goto out; + } ret = -EFAULT; if (copy_from_user(&uffdio_poison, user_uffdio_poison, @@ -1870,8 +1882,12 @@ static int userfaultfd_move(struct userfaultfd_ctx *ctx, user_uffdio_move = (struct uffdio_move __user *) arg; - if (atomic_read(&ctx->mmap_changing)) - return -EAGAIN; + ret = -EAGAIN; + if (unlikely(atomic_read(&ctx->mmap_changing))) { + if (unlikely(put_user(ret, &user_uffdio_move->move))) + return -EFAULT; + goto out; + } if (copy_from_user(&uffdio_move, user_uffdio_move, /* don't copy "move" last field */ diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h index 24ad120b8827..c33cba111171 100644 --- a/include/drm/ttm/ttm_backup.h +++ b/include/drm/ttm/ttm_backup.h @@ -9,14 +9,12 @@ #include <linux/mm_types.h> #include <linux/shmem_fs.h> -struct ttm_backup; - /** * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer * @handle: The handle to convert. * * Converts an opaque handle received from the - * struct ttm_backoup_ops::backup_page() function to an (invalid) + * ttm_backup_backup_page() function to an (invalid) * struct page pointer suitable for a struct page array. * * Return: An (invalid) struct page pointer. @@ -45,8 +43,8 @@ static inline bool ttm_backup_page_ptr_is_handle(const struct page *page) * * Return: The handle that was previously used in * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable - * for use as argument in the struct ttm_backup_ops drop() or - * copy_backed_up_page() functions. + * for use as argument in the struct ttm_backup_drop() or + * ttm_backup_copy_page() functions. */ static inline unsigned long ttm_backup_page_ptr_to_handle(const struct page *page) @@ -55,20 +53,20 @@ ttm_backup_page_ptr_to_handle(const struct page *page) return (unsigned long)page >> 1; } -void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle); +void ttm_backup_drop(struct file *backup, pgoff_t handle); -int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst, +int ttm_backup_copy_page(struct file *backup, struct page *dst, pgoff_t handle, bool intr); s64 -ttm_backup_backup_page(struct ttm_backup *backup, struct page *page, +ttm_backup_backup_page(struct file *backup, struct page *page, bool writeback, pgoff_t idx, gfp_t page_gfp, gfp_t alloc_gfp); -void ttm_backup_fini(struct ttm_backup *backup); +void ttm_backup_fini(struct file *backup); u64 ttm_backup_bytes_avail(void); -struct ttm_backup *ttm_backup_shmem_create(loff_t size); +struct file *ttm_backup_shmem_create(loff_t size); #endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 13cf47f3322f..406437ad674b 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -118,7 +118,7 @@ struct ttm_tt { * ttm_tt_create() callback is responsible for assigning * this field. */ - struct ttm_backup *backup; + struct file *backup; /** * @caching: The current caching state of the pages, see enum * ttm_caching. diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h index 39f203256c4f..6d1ce7f5da51 100644 --- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h +++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h @@ -139,6 +139,7 @@ #define DISPLAY_PORT_RX_5 133 #define DISPLAY_PORT_RX_6 134 #define DISPLAY_PORT_RX_7 135 +#define USB_RX 136 #define LPASS_CLK_ID_PRI_MI2S_IBIT 1 #define LPASS_CLK_ID_PRI_MI2S_EBIT 2 diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 400fee6427a5..7a5b391dcc01 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -776,8 +776,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation); + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq); @@ -840,12 +840,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_al(policy, target_freq, @@ -855,6 +855,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, efficiencies); } +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies); +} + /* Find highest freq at or below target in a table in ascending order */ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, unsigned int target_freq, @@ -908,12 +916,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_ah(policy, target_freq, @@ -923,6 +931,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, efficiencies); } +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies); +} + /* Find closest freq to target in a table in ascending order */ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, unsigned int target_freq, @@ -993,12 +1009,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_ac(policy, target_freq, @@ -1008,7 +1024,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, efficiencies); } -static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies); +} + +static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, + unsigned int min, unsigned int max, + int idx) { unsigned int freq; @@ -1017,11 +1043,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) freq = policy->freq_table[idx].frequency; - return freq == clamp_val(freq, policy->min, policy->max); + return freq == clamp_val(freq, min, max); } static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int target_freq, + unsigned int min, + unsigned int max, unsigned int relation) { bool efficiencies = policy->efficiencies_available && @@ -1032,29 +1060,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, relation &= ~CPUFREQ_RELATION_E; if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) - return cpufreq_table_index_unsorted(policy, target_freq, - relation); + return cpufreq_table_index_unsorted(policy, target_freq, min, + max, relation); retry: switch (relation) { case CPUFREQ_RELATION_L: - idx = cpufreq_table_find_index_l(policy, target_freq, - efficiencies); + idx = find_index_l(policy, target_freq, min, max, efficiencies); break; case CPUFREQ_RELATION_H: - idx = cpufreq_table_find_index_h(policy, target_freq, - efficiencies); + idx = find_index_h(policy, target_freq, min, max, efficiencies); break; case CPUFREQ_RELATION_C: - idx = cpufreq_table_find_index_c(policy, target_freq, - efficiencies); + idx = find_index_c(policy, target_freq, min, max, efficiencies); break; default: WARN_ON_ONCE(1); return 0; } - /* Limit frequency index to honor policy->min/max */ - if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { + /* Limit frequency index to honor min and max */ + if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) { efficiencies = false; goto retry; } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 6cd8d1d28b8b..fc27b53c58c2 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -907,21 +907,6 @@ extern void fsnotify_wait_marks_destroyed(void); /* Clear all of the marks of a group attached to a given object type */ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type); -/* run all the marks in a group, and clear all of the vfsmount marks */ -static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT); -} -/* run all the marks in a group, and clear all of the inode marks */ -static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE); -} -/* run all the marks in a group, and clear all of the sn marks */ -static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) -{ - fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB); -} extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 675959fb97ba..d6ffe01962c2 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1002,6 +1002,12 @@ struct vmbus_channel { /* The max size of a packet on this channel */ u32 max_pkt_size; + + /* function to mmap ring buffer memory to the channel's sysfs ring attribute */ + int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma); + + /* boolean to control visibility of sysfs for ring buffer */ + bool ring_sysfs_visible; }; #define lock_requestor(channel, flags) \ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 508d466de1cc..457b4fba88bd 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1526,7 +1526,7 @@ struct ieee80211_mgmt { struct { u8 action_code; u8 dialog_token; - u8 status_code; + __le16 status_code; u8 variable[]; } __packed ttlm_res; struct { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ccce8a751e2a..3a8d35d41fda 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -440,10 +440,10 @@ static inline int __iommu_copy_struct_from_user( void *dst_data, const struct iommu_user_data *src_data, unsigned int data_type, size_t data_len, size_t min_len) { - if (src_data->type != data_type) - return -EINVAL; if (WARN_ON(!dst_data || !src_data)) return -EINVAL; + if (src_data->type != data_type) + return -EINVAL; if (src_data->len < min_len || data_len < src_data->len) return -EINVAL; return copy_struct_from_user(dst_data, data_len, src_data->uptr, @@ -456,8 +456,8 @@ static inline int __iommu_copy_struct_from_user( * include/uapi/linux/iommufd.h * @user_data: Pointer to a struct iommu_user_data for user space data info * @data_type: The data type of the @kdst. Must match with @user_data->type - * @min_last: The last memember of the data structure @kdst points in the - * initial version. + * @min_last: The last member of the data structure @kdst points in the initial + * version. * Return 0 for success, otherwise -error. */ #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index bd7e60c0b72f..dde836875deb 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -340,7 +340,7 @@ struct pcmcia_device_id { #define INPUT_DEVICE_ID_LED_MAX 0x0f #define INPUT_DEVICE_ID_SND_MAX 0x07 #define INPUT_DEVICE_ID_FF_MAX 0x7f -#define INPUT_DEVICE_ID_SW_MAX 0x10 +#define INPUT_DEVICE_ID_SW_MAX 0x11 #define INPUT_DEVICE_ID_PROP_MAX 0x1f #define INPUT_DEVICE_ID_MATCH_BUS 1 diff --git a/include/linux/module.h b/include/linux/module.h index d94b196d5a34..b3329110d668 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -162,6 +162,8 @@ extern void cleanup_module(void); #define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ +struct module_kobject *lookup_or_create_module_kobject(const char *name); + /* Generic info of form tag = "info" */ #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2d11d013cabe..7ea022750e4e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4972,6 +4972,7 @@ static inline void __dev_mc_unsync(struct net_device *dev, /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); +int netif_set_promiscuity(struct net_device *dev, int inc); int dev_set_promiscuity(struct net_device *dev, int inc); int netif_set_allmulti(struct net_device *dev, int inc, bool notify); int dev_set_allmulti(struct net_device *dev, int inc); diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 7d902d8c054b..75247486616b 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -11,6 +11,13 @@ #ifndef THUNDERBOLT_H_ #define THUNDERBOLT_H_ +#include <linux/types.h> + +struct fwnode_handle; +struct device; + +#if IS_REACHABLE(CONFIG_USB4) + #include <linux/device.h> #include <linux/idr.h> #include <linux/list.h> @@ -674,4 +681,15 @@ static inline struct device *tb_ring_dma_device(struct tb_ring *ring) return &ring->nhi->pdev->dev; } +bool usb4_usb3_port_match(struct device *usb4_port_dev, + const struct fwnode_handle *usb3_port_fwnode); + +#else /* CONFIG_USB4 */ +static inline bool usb4_usb3_port_match(struct device *usb4_port_dev, + const struct fwnode_handle *usb3_port_fwnode) +{ + return false; +} +#endif /* CONFIG_USB4 */ + #endif /* THUNDERBOLT_H_ */ diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e39d4d563b19..785048a3b3e6 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -51,7 +51,7 @@ struct tk_read_base { * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai - * @tai_offset: The current UTC to TAI offset in seconds + * @coarse_nsec: The nanoseconds part for coarse time getters * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds * @clock_was_set_seq: The sequence number of clock was set events @@ -76,6 +76,7 @@ struct tk_read_base { * ntp shifted nano seconds. * @ntp_err_mult: Multiplication factor for scaled math conversion * @skip_second_overflow: Flag used to avoid updating NTP twice with same second + * @tai_offset: The current UTC to TAI offset in seconds * * Note: For timespec(64) based interfaces wall_to_monotonic is what * we need to add to xtime (or xtime corrected for sub jiffy times) @@ -100,7 +101,7 @@ struct tk_read_base { * which results in the following cacheline layout: * * 0: seqcount, tkr_mono - * 1: xtime_sec ... tai_offset + * 1: xtime_sec ... coarse_nsec * 2: tkr_raw, raw_sec * 3,4: Internal variables * @@ -121,7 +122,7 @@ struct timekeeper { ktime_t offs_real; ktime_t offs_boot; ktime_t offs_tai; - s32 tai_offset; + u32 coarse_nsec; /* Cacheline 2: */ struct tk_read_base tkr_raw; @@ -144,6 +145,7 @@ struct timekeeper { u32 ntp_error_shift; u32 ntp_err_mult; u32 skip_second_overflow; + s32 tai_offset; }; #ifdef CONFIG_GENERIC_TIME_VSYSCALL diff --git a/include/linux/usb.h b/include/linux/usb.h index b46738701f8d..1b2545b4363b 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -815,10 +815,10 @@ static inline void usb_mark_last_busy(struct usb_device *udev) #else -static inline int usb_enable_autosuspend(struct usb_device *udev) -{ return 0; } -static inline int usb_disable_autosuspend(struct usb_device *udev) -{ return 0; } +static inline void usb_enable_autosuspend(struct usb_device *udev) +{ } +static inline void usb_disable_autosuspend(struct usb_device *udev) +{ } static inline int usb_autopm_get_interface(struct usb_interface *intf) { return 0; } diff --git a/include/linux/usb/xhci-sideband.h b/include/linux/usb/xhci-sideband.h new file mode 100644 index 000000000000..45288c392f6e --- /dev/null +++ b/include/linux/usb/xhci-sideband.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * xHCI host controller sideband support + * + * Copyright (c) 2023-2025, Intel Corporation. + * + * Author: Mathias Nyman <mathias.nyman@linux.intel.com> + */ +#ifndef __LINUX_XHCI_SIDEBAND_H +#define __LINUX_XHCI_SIDEBAND_H + +#include <linux/scatterlist.h> +#include <linux/usb.h> + +#define EP_CTX_PER_DEV 31 /* FIXME defined twice, from xhci.h */ + +struct xhci_sideband; + +enum xhci_sideband_type { + XHCI_SIDEBAND_AUDIO, + XHCI_SIDEBAND_VENDOR, +}; + +enum xhci_sideband_notify_type { + XHCI_SIDEBAND_XFER_RING_FREE, +}; + +/** + * struct xhci_sideband_event - sideband event + * @type: notifier type + * @evt_data: event data + */ +struct xhci_sideband_event { + enum xhci_sideband_notify_type type; + void *evt_data; +}; + +/** + * struct xhci_sideband - representation of a sideband accessed usb device. + * @xhci: The xhci host controller the usb device is connected to + * @vdev: the usb device accessed via sideband + * @eps: array of endpoints controlled via sideband + * @ir: event handling and buffer for sideband accessed device + * @type: xHCI sideband type + * @mutex: mutex for sideband operations + * @intf: USB sideband client interface + * @notify_client: callback for xHCI sideband sequences + * + * FIXME usb device accessed via sideband Keeping track of sideband accessed usb devices. + */ +struct xhci_sideband { + struct xhci_hcd *xhci; + struct xhci_virt_device *vdev; + struct xhci_virt_ep *eps[EP_CTX_PER_DEV]; + struct xhci_interrupter *ir; + enum xhci_sideband_type type; + + /* Synchronizing xHCI sideband operations with client drivers operations */ + struct mutex mutex; + + struct usb_interface *intf; + int (*notify_client)(struct usb_interface *intf, + struct xhci_sideband_event *evt); +}; + +struct xhci_sideband * +xhci_sideband_register(struct usb_interface *intf, enum xhci_sideband_type type, + int (*notify_client)(struct usb_interface *intf, + struct xhci_sideband_event *evt)); +void +xhci_sideband_unregister(struct xhci_sideband *sb); +int +xhci_sideband_add_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep); +int +xhci_sideband_remove_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep); +int +xhci_sideband_stop_endpoint(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep); +struct sg_table * +xhci_sideband_get_endpoint_buffer(struct xhci_sideband *sb, + struct usb_host_endpoint *host_ep); +struct sg_table * +xhci_sideband_get_event_buffer(struct xhci_sideband *sb); +int +xhci_sideband_create_interrupter(struct xhci_sideband *sb, int num_seg, + bool ip_autoclear, u32 imod_interval, int intr_num); +void +xhci_sideband_remove_interrupter(struct xhci_sideband *sb); +int +xhci_sideband_interrupter_id(struct xhci_sideband *sb); + +#if IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND) +void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb, + unsigned int ep_index); +#else +static inline void xhci_sideband_notify_ep_ring_free(struct xhci_sideband *sb, + unsigned int ep_index) +{ } +#endif /* IS_ENABLED(CONFIG_USB_XHCI_SIDEBAND) */ +#endif /* __LINUX_XHCI_SIDEBAND_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 31e9ffd936e3..5ca8d4dd149d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -61,6 +61,7 @@ struct vm_struct { unsigned int nr_pages; phys_addr_t phys_addr; const void *caller; + unsigned long requested_size; }; struct vmap_area { diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index a8586c3058c7..797992019f9e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1931,6 +1931,8 @@ struct hci_cp_le_pa_create_sync { __u8 sync_cte_type; } __packed; +#define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045 + #define HCI_OP_LE_PA_TERM_SYNC 0x2046 struct hci_cp_le_pa_term_sync { __le16 handle; @@ -2830,7 +2832,7 @@ struct hci_evt_le_create_big_complete { __le16 bis_handle[]; } __packed; -#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d +#define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d struct hci_evt_le_big_sync_estabilished { __u8 status; __u8 handle; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 5115da34f881..522d837a23fa 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1113,10 +1113,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, return NULL; } -static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, - __u8 sid, - bdaddr_t *dst, - __u8 dst_type) +static inline struct hci_conn * +hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1124,8 +1122,10 @@ static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (c->type != ISO_LINK || bacmp(&c->dst, dst) || - c->dst_type != dst_type || c->sid != sid) + if (c->type != ISO_LINK) + continue; + + if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags)) continue; rcu_read_unlock(); @@ -1524,8 +1524,6 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); int hci_le_create_cis_pending(struct hci_dev *hdev); -int hci_pa_create_sync_pending(struct hci_dev *hdev); -int hci_le_big_create_sync_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, @@ -1566,9 +1564,9 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 data_len, __u8 *data); struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); -int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, - struct bt_iso_qos *qos, - __u16 sync_handle, __u8 num_bis, __u8 bis[]); +int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, + __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 7e2cf0cca939..72558c826aa1 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -185,3 +185,6 @@ int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn); int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn); int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, struct hci_conn_params *params); + +int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn); +int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn); diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index 825141d675e5..d01c82983b4d 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -103,6 +103,12 @@ struct netdev_stat_ops { struct netdev_queue_stats_tx *tx); }; +void netdev_stat_queue_sum(struct net_device *netdev, + int rx_start, int rx_end, + struct netdev_queue_stats_rx *rx_sum, + int tx_start, int tx_end, + struct netdev_queue_stats_tx *tx_sum); + /** * struct netdev_queue_mgmt_ops - netdev ops for queue management * diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index a58ae7589d12..e8bd6ddb7b12 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -71,9 +71,6 @@ struct xdp_sock { */ u32 tx_budget_spent; - /* Protects generic receive. */ - spinlock_t rx_lock; - /* Statistics */ u64 rx_dropped; u64 rx_queue_full; diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 1dcd4d71468a..cac56e6b0869 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -53,6 +53,8 @@ struct xsk_buff_pool { refcount_t users; struct xdp_umem *umem; struct work_struct work; + /* Protects generic receive in shared and non-shared umem mode. */ + spinlock_t rx_lock; struct list_head free_list; struct list_head xskb_list; u32 heads_cnt; @@ -238,8 +240,8 @@ static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb, return orig_addr; offset = xskb->xdp.data - xskb->xdp.data_hard_start; - orig_addr -= offset; offset += pool->headroom; + orig_addr -= offset; return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); } diff --git a/include/sound/jack.h b/include/sound/jack.h index 1ed90e2109e9..bd3f62281c97 100644 --- a/include/sound/jack.h +++ b/include/sound/jack.h @@ -22,6 +22,7 @@ struct input_dev; * @SND_JACK_VIDEOOUT: Video out * @SND_JACK_AVOUT: AV (Audio Video) out * @SND_JACK_LINEIN: Line in + * @SND_JACK_USB: USB audio device * @SND_JACK_BTN_0: Button 0 * @SND_JACK_BTN_1: Button 1 * @SND_JACK_BTN_2: Button 2 @@ -43,6 +44,7 @@ enum snd_jack_types { SND_JACK_VIDEOOUT = 0x0010, SND_JACK_AVOUT = SND_JACK_LINEOUT | SND_JACK_VIDEOOUT, SND_JACK_LINEIN = 0x0020, + SND_JACK_USB = 0x0040, /* Kept separate from switches to facilitate implementation */ SND_JACK_BTN_0 = 0x4000, @@ -54,7 +56,7 @@ enum snd_jack_types { }; /* Keep in sync with definitions above */ -#define SND_JACK_SWITCH_TYPES 6 +#define SND_JACK_SWITCH_TYPES 7 struct snd_jack { struct list_head kctl_list; diff --git a/include/sound/q6usboffload.h b/include/sound/q6usboffload.h new file mode 100644 index 000000000000..f7e2449fe1b3 --- /dev/null +++ b/include/sound/q6usboffload.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * sound/q6usboffload.h -- QDSP6 USB offload + * + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +/** + * struct q6usb_offload - USB backend DAI link offload parameters + * @dev: dev handle to usb be + * @domain: allocated iommu domain + * @intr_num: usb interrupter number + * @sid: streamID for iommu + **/ +struct q6usb_offload { + struct device *dev; + struct iommu_domain *domain; + u16 intr_num; + u8 sid; +}; diff --git a/include/sound/soc-usb.h b/include/sound/soc-usb.h new file mode 100644 index 000000000000..124acb1939e5 --- /dev/null +++ b/include/sound/soc-usb.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __LINUX_SND_SOC_USB_H +#define __LINUX_SND_SOC_USB_H + +#include <sound/soc.h> + +enum snd_soc_usb_kctl { + SND_SOC_USB_KCTL_CARD_ROUTE, + SND_SOC_USB_KCTL_PCM_ROUTE, +}; + +/** + * struct snd_soc_usb_device - SoC USB representation of a USB sound device + * @card_idx: sound card index associated with USB device + * @chip_idx: USB sound chip array index + * @cpcm_idx: capture PCM index array associated with USB device + * @ppcm_idx: playback PCM index array associated with USB device + * @num_capture: number of capture streams + * @num_playback: number of playback streams + * @list: list head for SoC USB devices + **/ +struct snd_soc_usb_device { + int card_idx; + int chip_idx; + + /* PCM index arrays */ + unsigned int *cpcm_idx; /* TODO: capture path is not tested yet */ + unsigned int *ppcm_idx; + int num_capture; /* TODO: capture path is not tested yet */ + int num_playback; + + struct list_head list; +}; + +/** + * struct snd_soc_usb - representation of a SoC USB backend entity + * @list: list head for SND SOC struct list + * @component: reference to ASoC component + * @connection_status_cb: callback to notify connection events + * @update_offload_route_info: callback to fetch mapped ASoC card and pcm + * device pair. This is unrelated to the concept + * of DAPM route. The "route" argument carries + * an array used for a kcontrol output for either + * the card or pcm index. "path" determines the + * which entry to look for. (ie mapped card or pcm) + * @priv_data: driver data + **/ +struct snd_soc_usb { + struct list_head list; + struct snd_soc_component *component; + int (*connection_status_cb)(struct snd_soc_usb *usb, + struct snd_soc_usb_device *sdev, + bool connected); + int (*update_offload_route_info)(struct snd_soc_component *component, + int card, int pcm, int direction, + enum snd_soc_usb_kctl path, + long *route); + void *priv_data; +}; + +#if IS_ENABLED(CONFIG_SND_SOC_USB) +int snd_soc_usb_find_supported_format(int card_idx, + struct snd_pcm_hw_params *params, + int direction); + +int snd_soc_usb_connect(struct device *usbdev, struct snd_soc_usb_device *sdev); +int snd_soc_usb_disconnect(struct device *usbdev, struct snd_soc_usb_device *sdev); +void *snd_soc_usb_find_priv_data(struct device *usbdev); + +int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack); +int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm, + int direction, enum snd_soc_usb_kctl path, + long *route); + +struct snd_soc_usb *snd_soc_usb_allocate_port(struct snd_soc_component *component, + void *data); +void snd_soc_usb_free_port(struct snd_soc_usb *usb); +void snd_soc_usb_add_port(struct snd_soc_usb *usb); +void snd_soc_usb_remove_port(struct snd_soc_usb *usb); +#else +static inline int +snd_soc_usb_find_supported_format(int card_idx, struct snd_pcm_hw_params *params, + int direction) +{ + return -EINVAL; +} + +static inline int snd_soc_usb_connect(struct device *usbdev, + struct snd_soc_usb_device *sdev) +{ + return -ENODEV; +} + +static inline int snd_soc_usb_disconnect(struct device *usbdev, + struct snd_soc_usb_device *sdev) +{ + return -EINVAL; +} + +static inline void *snd_soc_usb_find_priv_data(struct device *usbdev) +{ + return NULL; +} + +static inline int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack) +{ + return 0; +} + +static int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm, + int direction, enum snd_soc_usb_kctl path, + long *route) +{ + return -ENODEV; +} + +static inline struct snd_soc_usb * +snd_soc_usb_allocate_port(struct snd_soc_component *component, void *data) +{ + return ERR_PTR(-ENOMEM); +} + +static inline void snd_soc_usb_free_port(struct snd_soc_usb *usb) +{ } + +static inline void snd_soc_usb_add_port(struct snd_soc_usb *usb) +{ } + +static inline void snd_soc_usb_remove_port(struct snd_soc_usb *usb) +{ } +#endif /* IS_ENABLED(CONFIG_SND_SOC_USB) */ +#endif /*__LINUX_SND_SOC_USB_H */ diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h index 36a4a1e1d8ca..d8bd5d37131a 100644 --- a/include/sound/soc_sdw_utils.h +++ b/include/sound/soc_sdw_utils.h @@ -226,6 +226,7 @@ int asoc_sdw_cs_amp_init(struct snd_soc_card *card, bool playback); int asoc_sdw_cs_spk_feedback_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); +int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix); /* MAXIM codec support */ int asoc_sdw_maxim_init(struct snd_soc_card *card, diff --git a/include/sound/ump_convert.h b/include/sound/ump_convert.h index d099ae27f849..682499b871ea 100644 --- a/include/sound/ump_convert.h +++ b/include/sound/ump_convert.h @@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank { /* context for converting from MIDI1 byte stream to UMP packet */ struct ump_cvt_to_ump { /* MIDI1 intermediate buffer */ - unsigned char buf[4]; + unsigned char buf[6]; /* up to 6 bytes for SysEx */ int len; int cmd_bytes; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 549ab3b41961..3efc00cc1bcd 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1928,7 +1928,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, const struct prelim_ref *oldref, const struct prelim_ref *newref, u64 tree_size), - TP_ARGS(fs_info, newref, oldref, tree_size), + TP_ARGS(fs_info, oldref, newref, tree_size), TP_STRUCT__entry_btrfs( __field( u64, root_id ) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 28705ae67784..fd404729b115 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4968,6 +4968,9 @@ union bpf_attr { * the netns switch takes place from ingress to ingress without * going through the CPU's backlog queue. * + * *skb*\ **->mark** and *skb*\ **->tstamp** are not cleared during + * the netns switch. + * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types at the * ingress hook and for veth and netkit target device types. The diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h index fe24c3459ac0..30c8dad6214e 100644 --- a/include/uapi/linux/ethtool_netlink_generated.h +++ b/include/uapi/linux/ethtool_netlink_generated.h @@ -31,11 +31,6 @@ enum ethtool_header_flags { ETHTOOL_FLAG_STATS = 4, }; -enum { - ETHTOOL_PHY_UPSTREAM_TYPE_MAC, - ETHTOOL_PHY_UPSTREAM_TYPE_PHY, -}; - enum ethtool_tcp_data_split { ETHTOOL_TCP_DATA_SPLIT_UNKNOWN, ETHTOOL_TCP_DATA_SPLIT_DISABLED, diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 5a199f3d4a26..3b2524e4b667 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -925,7 +925,8 @@ #define SW_MUTE_DEVICE 0x0e /* set = device disabled */ #define SW_PEN_INSERTED 0x0f /* set = pen inserted */ #define SW_MACHINE_COVER 0x10 /* set = cover closed */ -#define SW_MAX 0x10 +#define SW_USB_INSERT 0x11 /* set = USB audio device connected */ +#define SW_MAX 0x11 #define SW_CNT (SW_MAX+1) /* diff --git a/init/Kconfig b/init/Kconfig index 63f5974b9fa6..4cdd1049283c 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -140,6 +140,9 @@ config LD_CAN_USE_KEEP_IN_OVERLAY config RUSTC_HAS_COERCE_POINTEE def_bool RUSTC_VERSION >= 108400 +config RUSTC_HAS_UNNECESSARY_TRANSMUTES + def_bool RUSTC_VERSION >= 108800 + config PAHOLE_VERSION int default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE)) diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index f60d0a9d505e..9414ca6d101c 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -123,11 +123,11 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *file) seq_printf(m, "SqMask:\t0x%x\n", sq_mask); seq_printf(m, "SqHead:\t%u\n", sq_head); seq_printf(m, "SqTail:\t%u\n", sq_tail); - seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); + seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head)); seq_printf(m, "CqMask:\t0x%x\n", cq_mask); seq_printf(m, "CqHead:\t%u\n", cq_head); seq_printf(m, "CqTail:\t%u\n", cq_tail); - seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); + seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail)); seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); sq_entries = min(sq_tail - sq_head, ctx->sq_entries); for (i = 0; i < sq_entries; i++) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index a2b256e96d5d..541e65a1eebf 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -448,24 +448,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) return req->link; } -static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) -{ - if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) - return NULL; - return __io_prep_linked_timeout(req); -} - -static noinline void __io_arm_ltimeout(struct io_kiocb *req) -{ - io_queue_linked_timeout(__io_prep_linked_timeout(req)); -} - -static inline void io_arm_ltimeout(struct io_kiocb *req) -{ - if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) - __io_arm_ltimeout(req); -} - static void io_prep_async_work(struct io_kiocb *req) { const struct io_issue_def *def = &io_issue_defs[req->opcode]; @@ -518,7 +500,6 @@ static void io_prep_async_link(struct io_kiocb *req) static void io_queue_iowq(struct io_kiocb *req) { - struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->tctx; BUG_ON(!tctx); @@ -543,8 +524,6 @@ static void io_queue_iowq(struct io_kiocb *req) trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); io_wq_enqueue(tctx->io_wq, &req->work); - if (link) - io_queue_linked_timeout(link); } static void io_req_queue_iowq_tw(struct io_kiocb *req, io_tw_token_t tw) @@ -869,6 +848,14 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) struct io_ring_ctx *ctx = req->ctx; bool posted; + /* + * If multishot has already posted deferred completions, ensure that + * those are flushed first before posting this one. If not, CQEs + * could get reordered. + */ + if (!wq_list_empty(&ctx->submit_state.compl_reqs)) + __io_submit_flush_completions(ctx); + lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock); @@ -1724,15 +1711,22 @@ static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def, return !!req->file; } +#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT) + static inline int __io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags, const struct io_issue_def *def) { const struct cred *creds = NULL; + struct io_kiocb *link = NULL; int ret; - if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) - creds = override_creds(req->creds); + if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) { + if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + creds = override_creds(req->creds); + if (req->flags & REQ_F_ARM_LTIMEOUT) + link = __io_prep_linked_timeout(req); + } if (!def->audit_skip) audit_uring_entry(req->opcode); @@ -1742,8 +1736,12 @@ static inline int __io_issue_sqe(struct io_kiocb *req, if (!def->audit_skip) audit_uring_exit(!ret, ret); - if (creds) - revert_creds(creds); + if (unlikely(creds || link)) { + if (creds) + revert_creds(creds); + if (link) + io_queue_linked_timeout(link); + } return ret; } @@ -1769,7 +1767,6 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (ret == IOU_ISSUE_SKIP_COMPLETE) { ret = 0; - io_arm_ltimeout(req); /* If the op doesn't have a file, we're not polling for it */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue) @@ -1824,8 +1821,6 @@ void io_wq_submit_work(struct io_wq_work *work) else req_ref_get(req); - io_arm_ltimeout(req); - /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) { fail: @@ -1941,15 +1936,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd) static void io_queue_async(struct io_kiocb *req, int ret) __must_hold(&req->ctx->uring_lock) { - struct io_kiocb *linked_timeout; - if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { io_req_defer_failed(req, ret); return; } - linked_timeout = io_prep_linked_timeout(req); - switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: io_kbuf_recycle(req, 0); @@ -1962,9 +1953,6 @@ static void io_queue_async(struct io_kiocb *req, int ret) case IO_APOLL_OK: break; } - - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); } static inline void io_queue_sqe(struct io_kiocb *req) diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index d037cc68e9d3..03c699493b5a 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -20,7 +20,7 @@ #include "sqpoll.h" #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8 -#define IORING_TW_CAP_ENTRIES_VALUE 8 +#define IORING_TW_CAP_ENTRIES_VALUE 32 enum { IO_SQ_THREAD_SHOULD_STOP = 0, diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 5c8d43cdb0a3..c05ba7ca00fa 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -761,7 +761,7 @@ static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fw static void msi_domain_debug_show(struct seq_file *m, struct irq_domain *d, struct irq_data *irqd, int ind) { - struct msi_desc *desc = irq_data_get_msi_desc(irqd); + struct msi_desc *desc = irqd ? irq_data_get_msi_desc(irqd) : NULL; if (!desc) return; diff --git a/kernel/params.c b/kernel/params.c index 2509f216c9f3..b92d64161b75 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -760,38 +760,35 @@ void destroy_params(const struct kernel_param *params, unsigned num) params[i].ops->free(params[i].arg); } -static struct module_kobject * __init locate_module_kobject(const char *name) +struct module_kobject __modinit * lookup_or_create_module_kobject(const char *name) { struct module_kobject *mk; struct kobject *kobj; int err; kobj = kset_find_obj(module_kset, name); - if (kobj) { - mk = to_module_kobject(kobj); - } else { - mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); - BUG_ON(!mk); - - mk->mod = THIS_MODULE; - mk->kobj.kset = module_kset; - err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, - "%s", name); -#ifdef CONFIG_MODULES - if (!err) - err = sysfs_create_file(&mk->kobj, &module_uevent.attr); -#endif - if (err) { - kobject_put(&mk->kobj); - pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n", - name, err); - return NULL; - } + if (kobj) + return to_module_kobject(kobj); - /* So that we hold reference in both cases. */ - kobject_get(&mk->kobj); + mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); + if (!mk) + return NULL; + + mk->mod = THIS_MODULE; + mk->kobj.kset = module_kset; + err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name); + if (IS_ENABLED(CONFIG_MODULES) && !err) + err = sysfs_create_file(&mk->kobj, &module_uevent.attr); + if (err) { + kobject_put(&mk->kobj); + pr_crit("Adding module '%s' to sysfs failed (%d), the system may be unstable.\n", + name, err); + return NULL; } + /* So that we hold reference in both cases. */ + kobject_get(&mk->kobj); + return mk; } @@ -802,7 +799,7 @@ static void __init kernel_add_sysfs_param(const char *name, struct module_kobject *mk; int err; - mk = locate_module_kobject(name); + mk = lookup_or_create_module_kobject(name); if (!mk) return; @@ -873,7 +870,7 @@ static void __init version_sysfs_builtin(void) int err; for (vattr = __start___modver; vattr < __stop___modver; vattr++) { - mk = locate_module_kobject(vattr->module_name); + mk = lookup_or_create_module_kobject(vattr->module_name); if (mk) { err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr); WARN_ON_ONCE(err); @@ -946,7 +943,9 @@ struct kset *module_kset; static void module_kobj_release(struct kobject *kobj) { struct module_kobject *mk = to_module_kobject(kobj); - complete(mk->kobj_completion); + + if (mk->kobj_completion) + complete(mk->kobj_completion); } const struct kobj_type module_ktype = { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 1e67d076f195..a009c91f7b05 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -164,10 +164,34 @@ static inline struct timespec64 tk_xtime(const struct timekeeper *tk) return ts; } +static inline struct timespec64 tk_xtime_coarse(const struct timekeeper *tk) +{ + struct timespec64 ts; + + ts.tv_sec = tk->xtime_sec; + ts.tv_nsec = tk->coarse_nsec; + return ts; +} + +/* + * Update the nanoseconds part for the coarse time keepers. They can't rely + * on xtime_nsec because xtime_nsec could be adjusted by a small negative + * amount when the multiplication factor of the clock is adjusted, which + * could cause the coarse clocks to go slightly backwards. See + * timekeeping_apply_adjustment(). Thus we keep a separate copy for the coarse + * clockids which only is updated when the clock has been set or we have + * accumulated time. + */ +static inline void tk_update_coarse_nsecs(struct timekeeper *tk) +{ + tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; +} + static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) { tk->xtime_sec = ts->tv_sec; tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; + tk_update_coarse_nsecs(tk); } static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) @@ -175,6 +199,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) tk->xtime_sec += ts->tv_sec; tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; tk_normalize_xtime(tk); + tk_update_coarse_nsecs(tk); } static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) @@ -708,6 +733,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) tk_normalize_xtime(tk); delta -= incr; } + tk_update_coarse_nsecs(tk); } /** @@ -804,8 +830,8 @@ EXPORT_SYMBOL_GPL(ktime_get_with_offset); ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned int seq; ktime_t base, *offset = offsets[offs]; + unsigned int seq; u64 nsecs; WARN_ON(timekeeping_suspended); @@ -813,7 +839,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) do { seq = read_seqcount_begin(&tk_core.seq); base = ktime_add(tk->tkr_mono.base, *offset); - nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + nsecs = tk->coarse_nsec; } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -2161,7 +2187,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode) struct timekeeper *real_tk = &tk_core.timekeeper; unsigned int clock_set = 0; int shift = 0, maxshift; - u64 offset; + u64 offset, orig_offset; guard(raw_spinlock_irqsave)(&tk_core.lock); @@ -2172,7 +2198,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode) offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask, tk->tkr_mono.clock->max_raw_delta); - + orig_offset = offset; /* Check if there's really nothing to do */ if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK) return false; @@ -2205,6 +2231,14 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode) */ clock_set |= accumulate_nsecs_to_secs(tk); + /* + * To avoid inconsistencies caused adjtimex TK_ADV_FREQ calls + * making small negative adjustments to the base xtime_nsec + * value, only update the coarse clocks if we accumulated time + */ + if (orig_offset != offset) + tk_update_coarse_nsecs(tk); + timekeeping_update_from_shadow(&tk_core, clock_set); return !!clock_set; @@ -2248,7 +2282,7 @@ void ktime_get_coarse_real_ts64(struct timespec64 *ts) do { seq = read_seqcount_begin(&tk_core.seq); - *ts = tk_xtime(tk); + *ts = tk_xtime_coarse(tk); } while (read_seqcount_retry(&tk_core.seq, seq)); } EXPORT_SYMBOL(ktime_get_coarse_real_ts64); @@ -2271,7 +2305,7 @@ void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts) do { seq = read_seqcount_begin(&tk_core.seq); - *ts = tk_xtime(tk); + *ts = tk_xtime_coarse(tk); offset = tk_core.timekeeper.offs_real; } while (read_seqcount_retry(&tk_core.seq, seq)); @@ -2350,12 +2384,12 @@ void ktime_get_coarse_ts64(struct timespec64 *ts) do { seq = read_seqcount_begin(&tk_core.seq); - now = tk_xtime(tk); + now = tk_xtime_coarse(tk); mono = tk->wall_to_monotonic; } while (read_seqcount_retry(&tk_core.seq, seq)); set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec, - now.tv_nsec + mono.tv_nsec); + now.tv_nsec + mono.tv_nsec); } EXPORT_SYMBOL(ktime_get_coarse_ts64); diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 01c2ab1e8971..32ef27c71b57 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -98,12 +98,12 @@ void update_vsyscall(struct timekeeper *tk) /* CLOCK_REALTIME_COARSE */ vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; vdso_ts->sec = tk->xtime_sec; - vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + vdso_ts->nsec = tk->coarse_nsec; /* CLOCK_MONOTONIC_COARSE */ vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; + nsec = tk->coarse_nsec; nsec = nsec + tk->wall_to_monotonic.tv_nsec; vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 61130bb34d6c..6981830c3128 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3436,7 +3436,7 @@ static int add_next_hash(struct ftrace_hash **filter_hash, struct ftrace_hash ** /* Copy the subops hash */ *filter_hash = alloc_and_copy_ftrace_hash(size_bits, subops_hash->filter_hash); - if (!filter_hash) + if (!*filter_hash) return -ENOMEM; /* Remove any notrace functions from the copy */ remove_hash(*filter_hash, subops_hash->notrace_hash); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8ddf6b17215c..5b8db27fb6ef 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6043,8 +6043,10 @@ unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr) tscratch = tr->scratch; /* if there is no tscrach, module_delta must be NULL. */ module_delta = READ_ONCE(tr->module_delta); - if (!module_delta || tscratch->entries[0].mod_addr > addr) + if (!module_delta || !tscratch->nr_entries || + tscratch->entries[0].mod_addr > addr) { return addr + tr->text_delta; + } /* Note that entries must be sorted. */ nr_entries = tscratch->nr_entries; @@ -6821,13 +6823,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), - trace_seq_used(&iter->seq)); + min((size_t)trace_seq_used(&iter->seq), + PAGE_SIZE)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; - spd.partial[i].len = trace_seq_used(&iter->seq); + spd.partial[i].len = ret; trace_seq_init(&iter->seq); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index fee40ffbd490..b9ab06c99543 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -1042,11 +1042,12 @@ enum print_line_t print_event_fields(struct trace_iterator *iter, struct trace_event_call *call; struct list_head *head; + lockdep_assert_held_read(&trace_event_sem); + /* ftrace defined events have separate call structures */ if (event->type <= __TRACE_LAST_TYPE) { bool found = false; - down_read(&trace_event_sem); list_for_each_entry(call, &ftrace_events, list) { if (call->event.type == event->type) { found = true; @@ -1056,7 +1057,6 @@ enum print_line_t print_event_fields(struct trace_iterator *iter, if (call->event.type > __TRACE_LAST_TYPE) break; } - up_read(&trace_event_sem); if (!found) { trace_seq_printf(&iter->seq, "UNKNOWN TYPE %d\n", event->type); goto out; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2a47682d1ab7..47d76d03ce30 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3075,6 +3075,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, bool freeze, struct folio *folio) { + bool pmd_migration = is_pmd_migration_entry(*pmd); + VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio)); VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE)); VM_WARN_ON_ONCE(folio && !folio_test_locked(folio)); @@ -3085,9 +3087,12 @@ void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, * require a folio to check the PMD against. Otherwise, there * is a risk of replacing the wrong folio. */ - if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || - is_pmd_migration_entry(*pmd)) { - if (folio && folio != pmd_folio(*pmd)) + if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) || pmd_migration) { + /* + * Do not apply pmd_folio() to a migration entry; and folio lock + * guarantees that it must be of the wrong folio anyway. + */ + if (folio && (pmd_migration || folio != pmd_folio(*pmd))) return; __split_huge_pmd_locked(vma, pmd, address, freeze); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e3e6ac991b9c..6ea1be71aa42 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4034,10 +4034,13 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, list_for_each_entry_safe(folio, next, src_list, lru) { int i; + bool cma; if (folio_test_hugetlb_vmemmap_optimized(folio)) continue; + cma = folio_test_hugetlb_cma(folio); + list_del(&folio->lru); split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst)); @@ -4053,6 +4056,9 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, new_folio->mapping = NULL; init_new_hugetlb_folio(dst, new_folio); + /* Copy the CMA flag so that it is freed correctly */ + if (cma) + folio_set_hugetlb_cma(new_folio); list_add(&new_folio->lru, &dst_list); } } diff --git a/mm/internal.h b/mm/internal.h index e9695baa5922..25a29872c634 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -248,11 +248,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, bool *any_writable, bool *any_young, bool *any_dirty) { - unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); - const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte, *ptep; bool writable, young, dirty; - int nr; + int nr, cur_nr; if (any_writable) *any_writable = false; @@ -265,11 +263,15 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); + /* Limit max_nr to the actual remaining PFNs in the folio we could batch. */ + max_nr = min_t(unsigned long, max_nr, + folio_pfn(folio) + folio_nr_pages(folio) - pte_pfn(pte)); + nr = pte_batch_hint(start_ptep, pte); expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); ptep = start_ptep + nr; - while (ptep < end_ptep) { + while (nr < max_nr) { pte = ptep_get(ptep); if (any_writable) writable = !!pte_write(pte); @@ -282,14 +284,6 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (!pte_same(pte, expected_pte)) break; - /* - * Stop immediately once we reached the end of the folio. In - * corner cases the next PFN might fall into a different - * folio. - */ - if (pte_pfn(pte) >= folio_end_pfn) - break; - if (any_writable) *any_writable |= writable; if (any_young) @@ -297,12 +291,13 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (any_dirty) *any_dirty |= dirty; - nr = pte_batch_hint(ptep, pte); - expected_pte = pte_advance_pfn(expected_pte, nr); - ptep += nr; + cur_nr = pte_batch_hint(ptep, pte); + expected_pte = pte_advance_pfn(expected_pte, cur_nr); + ptep += cur_nr; + nr += cur_nr; } - return min(ptep - start_ptep, max_nr); + return min(nr, max_nr); } /** diff --git a/mm/memblock.c b/mm/memblock.c index 0a53db4d9f7b..0e9ebb8aa7fe 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -457,7 +457,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, min(new_area_start, memblock.current_limit), new_alloc_size, PAGE_SIZE); - new_array = addr ? __va(addr) : NULL; + if (addr) { + /* The memory may not have been accepted, yet. */ + accept_memory(addr, new_alloc_size); + + new_array = __va(addr); + } else { + new_array = NULL; + } } if (!addr) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", @@ -2183,11 +2190,14 @@ static void __init memmap_init_reserved_pages(void) struct memblock_region *region; phys_addr_t start, end; int nid; + unsigned long max_reserved; /* * set nid on all reserved pages and also treat struct * pages for the NOMAP regions as PageReserved */ +repeat: + max_reserved = memblock.reserved.max; for_each_mem_region(region) { nid = memblock_get_region_node(region); start = region->base; @@ -2196,8 +2206,15 @@ static void __init memmap_init_reserved_pages(void) if (memblock_is_nomap(region)) reserve_bootmem_region(start, end, nid); - memblock_set_node(start, end, &memblock.reserved, nid); + memblock_set_node(start, region->size, &memblock.reserved, nid); } + /* + * 'max' is changed means memblock.reserved has been doubled its + * array, which may result a new reserved region before current + * 'start'. Now we should repeat the procedure to set its node id. + */ + if (max_reserved != memblock.reserved.max) + goto repeat; /* * initialize struct pages for reserved regions that don't have diff --git a/mm/mm_init.c b/mm/mm_init.c index 9659689b8ace..327764ca0ee4 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1786,7 +1786,7 @@ static bool arch_has_descending_max_zone_pfns(void) return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); } -static void set_high_memory(void) +static void __init set_high_memory(void) { phys_addr_t highmem = memblock_end_of_DRAM(); diff --git a/mm/slub.c b/mm/slub.c index dc9e729e1d26..be8b09e09d30 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; } -/* Should be called only if mem_alloc_profiling_enabled() */ -static noinline void free_slab_obj_exts(struct slab *slab) +static inline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts; @@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; } -static inline bool need_slab_obj_ext(void) -{ - if (mem_alloc_profiling_enabled()) - return true; - - /* - * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally - * inside memcg_slab_post_alloc_hook. No other users for now. - */ - return false; -} - #else /* CONFIG_SLAB_OBJ_EXT */ static inline void init_slab_obj_exts(struct slab *slab) @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { } -static inline bool need_slab_obj_ext(void) -{ - return false; -} - #endif /* CONFIG_SLAB_OBJ_EXT */ #ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) + if (mem_alloc_profiling_enabled()) __alloc_tagging_slab_alloc_hook(s, object, flags); } @@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) { - if (memcg_kmem_online() || need_slab_obj_ext()) - free_slab_obj_exts(slab); + /* + * The slab object extensions should now be freed regardless of + * whether mem_alloc_profiling_enabled() or not because profiling + * might have been disabled after slab->obj_exts got allocated. + */ + free_slab_obj_exts(slab); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order)); diff --git a/mm/swapfile.c b/mm/swapfile.c index 2eff8b51a945..f214843612dc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1272,13 +1272,22 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); - /* - * Should not even be attempting large allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (order && (!IS_ENABLED(CONFIG_THP_SWAP) || size > SWAPFILE_CLUSTER)) { - VM_WARN_ON_ONCE(1); - return -EINVAL; + if (order) { + /* + * Reject large allocation when THP_SWAP is disabled, + * the caller should split the folio and try again. + */ + if (!IS_ENABLED(CONFIG_THP_SWAP)) + return -EAGAIN; + + /* + * Allocation size should never exceed cluster size + * (HPAGE_PMD_SIZE). + */ + if (size > SWAPFILE_CLUSTER) { + VM_WARN_ON_ONCE(1); + return -EINVAL; + } } local_lock(&percpu_swap_cluster.lock); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 3ed720a787ec..2d7511654831 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1940,7 +1940,7 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm, { vm->flags = flags; vm->addr = (void *)va->va_start; - vm->size = va_size(va); + vm->size = vm->requested_size = va_size(va); vm->caller = caller; va->vm = vm; } @@ -3133,6 +3133,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size, area->flags = flags; area->caller = caller; + area->requested_size = requested_size; va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); if (IS_ERR(va)) { @@ -4063,6 +4064,8 @@ EXPORT_SYMBOL(vzalloc_node_noprof); */ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) { + struct vm_struct *vm = NULL; + size_t alloced_size = 0; size_t old_size = 0; void *n; @@ -4072,15 +4075,17 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) } if (p) { - struct vm_struct *vm; - vm = find_vm_area(p); if (unlikely(!vm)) { WARN(1, "Trying to vrealloc() nonexistent vm area (%p)\n", p); return NULL; } - old_size = get_vm_area_size(vm); + alloced_size = get_vm_area_size(vm); + old_size = vm->requested_size; + if (WARN(alloced_size < old_size, + "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) + return NULL; } /* @@ -4088,14 +4093,26 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) * would be a good heuristic for when to shrink the vm_area? */ if (size <= old_size) { - /* Zero out spare memory. */ - if (want_init_on_alloc(flags)) + /* Zero out "freed" memory. */ + if (want_init_on_free()) memset((void *)p + size, 0, old_size - size); + vm->requested_size = size; kasan_poison_vmalloc(p + size, old_size - size); - kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL); return (void *)p; } + /* + * We already have the bytes available in the allocation; use them. + */ + if (size <= alloced_size) { + kasan_unpoison_vmalloc(p + old_size, size - old_size, + KASAN_VMALLOC_PROT_NORMAL); + /* Zero out "alloced" memory. */ + if (want_init_on_alloc(flags)) + memset((void *)p + old_size, 0, size - old_size); + vm->requested_size = size; + } + /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ n = __vmalloc_noprof(size, flags); if (!n) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 7e1b53857648..6533e281ada3 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -2064,95 +2064,6 @@ static int create_big_sync(struct hci_dev *hdev, void *data) return hci_le_create_big(conn, &conn->iso_qos); } -static void create_pa_complete(struct hci_dev *hdev, void *data, int err) -{ - bt_dev_dbg(hdev, ""); - - if (err) - bt_dev_err(hdev, "Unable to create PA: %d", err); -} - -static bool hci_conn_check_create_pa_sync(struct hci_conn *conn) -{ - if (conn->type != ISO_LINK || conn->sid == HCI_SID_INVALID) - return false; - - return true; -} - -static int create_pa_sync(struct hci_dev *hdev, void *data) -{ - struct hci_cp_le_pa_create_sync cp = {0}; - struct hci_conn *conn; - int err = 0; - - hci_dev_lock(hdev); - - rcu_read_lock(); - - /* The spec allows only one pending LE Periodic Advertising Create - * Sync command at a time. If the command is pending now, don't do - * anything. We check for pending connections after each PA Sync - * Established event. - * - * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E - * page 2493: - * - * If the Host issues this command when another HCI_LE_Periodic_ - * Advertising_Create_Sync command is pending, the Controller shall - * return the error code Command Disallowed (0x0C). - */ - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (test_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags)) - goto unlock; - } - - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (hci_conn_check_create_pa_sync(conn)) { - struct bt_iso_qos *qos = &conn->iso_qos; - - cp.options = qos->bcast.options; - cp.sid = conn->sid; - cp.addr_type = conn->dst_type; - bacpy(&cp.addr, &conn->dst); - cp.skip = cpu_to_le16(qos->bcast.skip); - cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); - cp.sync_cte_type = qos->bcast.sync_cte_type; - - break; - } - } - -unlock: - rcu_read_unlock(); - - hci_dev_unlock(hdev); - - if (bacmp(&cp.addr, BDADDR_ANY)) { - hci_dev_set_flag(hdev, HCI_PA_SYNC); - set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); - - err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, - sizeof(cp), &cp, HCI_CMD_TIMEOUT); - if (!err) - err = hci_update_passive_scan_sync(hdev); - - if (err) { - hci_dev_clear_flag(hdev, HCI_PA_SYNC); - clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); - } - } - - return err; -} - -int hci_pa_create_sync_pending(struct hci_dev *hdev) -{ - /* Queue start pa_create_sync and scan */ - return hci_cmd_sync_queue(hdev, create_pa_sync, - NULL, create_pa_complete); -} - struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, __u8 sid, struct bt_iso_qos *qos) @@ -2167,97 +2078,18 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, conn->dst_type = dst_type; conn->sid = sid; conn->state = BT_LISTEN; + conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10); hci_conn_hold(conn); - hci_pa_create_sync_pending(hdev); + hci_connect_pa_sync(hdev, conn); return conn; } -static bool hci_conn_check_create_big_sync(struct hci_conn *conn) -{ - if (!conn->num_bis) - return false; - - return true; -} - -static void big_create_sync_complete(struct hci_dev *hdev, void *data, int err) -{ - bt_dev_dbg(hdev, ""); - - if (err) - bt_dev_err(hdev, "Unable to create BIG sync: %d", err); -} - -static int big_create_sync(struct hci_dev *hdev, void *data) -{ - DEFINE_FLEX(struct hci_cp_le_big_create_sync, pdu, bis, num_bis, 0x11); - struct hci_conn *conn; - - rcu_read_lock(); - - pdu->num_bis = 0; - - /* The spec allows only one pending LE BIG Create Sync command at - * a time. If the command is pending now, don't do anything. We - * check for pending connections after each BIG Sync Established - * event. - * - * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E - * page 2586: - * - * If the Host sends this command when the Controller is in the - * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ - * Established event has not been generated, the Controller shall - * return the error code Command Disallowed (0x0C). - */ - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (test_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags)) - goto unlock; - } - - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (hci_conn_check_create_big_sync(conn)) { - struct bt_iso_qos *qos = &conn->iso_qos; - - set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); - - pdu->handle = qos->bcast.big; - pdu->sync_handle = cpu_to_le16(conn->sync_handle); - pdu->encryption = qos->bcast.encryption; - memcpy(pdu->bcode, qos->bcast.bcode, - sizeof(pdu->bcode)); - pdu->mse = qos->bcast.mse; - pdu->timeout = cpu_to_le16(qos->bcast.timeout); - pdu->num_bis = conn->num_bis; - memcpy(pdu->bis, conn->bis, conn->num_bis); - - break; - } - } - -unlock: - rcu_read_unlock(); - - if (!pdu->num_bis) - return 0; - - return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, - struct_size(pdu, bis, pdu->num_bis), pdu); -} - -int hci_le_big_create_sync_pending(struct hci_dev *hdev) -{ - /* Queue big_create_sync */ - return hci_cmd_sync_queue_once(hdev, big_create_sync, - NULL, big_create_sync_complete); -} - -int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, - struct bt_iso_qos *qos, - __u16 sync_handle, __u8 num_bis, __u8 bis[]) +int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, + struct bt_iso_qos *qos, __u16 sync_handle, + __u8 num_bis, __u8 bis[]) { int err; @@ -2274,9 +2106,10 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, hcon->num_bis = num_bis; memcpy(hcon->bis, bis, num_bis); + hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10); } - return hci_le_big_create_sync_pending(hdev); + return hci_connect_big_sync(hdev, hcon); } static void create_big_complete(struct hci_dev *hdev, void *data, int err) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 5f808f0b0e9a..6d6061111ac5 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -6378,8 +6378,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, hci_dev_clear_flag(hdev, HCI_PA_SYNC); - conn = hci_conn_hash_lookup_sid(hdev, ev->sid, &ev->bdaddr, - ev->bdaddr_type); + conn = hci_conn_hash_lookup_create_pa_sync(hdev); if (!conn) { bt_dev_err(hdev, "Unable to find connection for dst %pMR sid 0x%2.2x", @@ -6418,9 +6417,6 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, } unlock: - /* Handle any other pending PA sync command */ - hci_pa_create_sync_pending(hdev); - hci_dev_unlock(hdev); } @@ -6932,7 +6928,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABLISHED, flex_array_size(ev, bis, ev->num_bis))) return; @@ -7003,9 +6999,6 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, } unlock: - /* Handle any other pending BIG sync command */ - hci_le_big_create_sync_pending(hdev); - hci_dev_unlock(hdev); } @@ -7127,8 +7120,8 @@ static const struct hci_le_ev { hci_le_create_big_complete_evt, sizeof(struct hci_evt_le_create_big_complete), HCI_MAX_EVENT_SIZE), - /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ - HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABLISHED] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABLISHED, hci_le_big_sync_established_evt, sizeof(struct hci_evt_le_big_sync_estabilished), HCI_MAX_EVENT_SIZE), diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 609b035e5c90..e56b1cbedab9 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -2693,16 +2693,16 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev) /* Force address filtering if PA Sync is in progress */ if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { - struct hci_cp_le_pa_create_sync *sent; + struct hci_conn *conn; - sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); - if (sent) { + conn = hci_conn_hash_lookup_create_pa_sync(hdev); + if (conn) { struct conn_params pa; memset(&pa, 0, sizeof(pa)); - bacpy(&pa.addr, &sent->addr); - pa.addr_type = sent->addr_type; + bacpy(&pa.addr, &conn->dst); + pa.addr_type = conn->dst_type; /* Clear first since there could be addresses left * behind. @@ -6895,3 +6895,143 @@ int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp, HCI_CMD_TIMEOUT); } + +static void create_pa_complete(struct hci_dev *hdev, void *data, int err) +{ + bt_dev_dbg(hdev, "err %d", err); + + if (!err) + return; + + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + if (err == -ECANCELED) + return; + + hci_dev_lock(hdev); + + hci_update_passive_scan_sync(hdev); + + hci_dev_unlock(hdev); +} + +static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) +{ + struct hci_cp_le_pa_create_sync cp; + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) + return -EBUSY; + + /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can + * program the address in the allow list so PA advertisements can be + * received. + */ + set_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags); + + hci_update_passive_scan_sync(hdev); + + memset(&cp, 0, sizeof(cp)); + cp.options = qos->bcast.options; + cp.sid = conn->sid; + cp.addr_type = conn->dst_type; + bacpy(&cp.addr, &conn->dst); + cp.skip = cpu_to_le16(qos->bcast.skip); + cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp.sync_cte_type = qos->bcast.sync_cte_type; + + /* The spec allows only one pending LE Periodic Advertising Create + * Sync command at a time so we forcefully wait for PA Sync Established + * event since cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2493: + * + * If the Host issues this command when another HCI_LE_Periodic_ + * Advertising_Create_Sync command is pending, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, + sizeof(cp), &cp, + HCI_EV_LE_PA_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + + return err; +} + +int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, + create_pa_complete); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + + if (hci_conn_valid(hdev, conn)) + clear_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); +} + +static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) +{ + DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + int err; + + if (!hci_conn_valid(hdev, conn)) + return -ECANCELED; + + set_bit(HCI_CONN_CREATE_BIG_SYNC, &conn->flags); + + memset(cp, 0, sizeof(*cp)); + cp->handle = qos->bcast.big; + cp->sync_handle = cpu_to_le16(conn->sync_handle); + cp->encryption = qos->bcast.encryption; + memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); + cp->mse = qos->bcast.mse; + cp->timeout = cpu_to_le16(qos->bcast.timeout); + cp->num_bis = conn->num_bis; + memcpy(cp->bis, conn->bis, conn->num_bis); + + /* The spec allows only one pending LE BIG Create Sync command at + * a time, so we forcefully wait for BIG Sync Established event since + * cmd_work can only schedule one command at a time. + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2586: + * + * If the Host sends this command when the Controller is in the + * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ + * Established event has not been generated, the Controller shall + * return the error code Command Disallowed (0x0C). + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, + struct_size(cp, bis, cp->num_bis), cp, + HCI_EVT_LE_BIG_SYNC_ESTABLISHED, + conn->conn_timeout, NULL); + if (err == -ETIMEDOUT) + hci_le_big_terminate_sync(hdev, cp->handle); + + return err; +} + +int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, + create_big_complete); +} diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 3501a991f1c6..2819cda616bc 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -1462,14 +1462,13 @@ static void iso_conn_big_sync(struct sock *sk) lock_sock(sk); if (!test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { - err = hci_le_big_create_sync(hdev, iso_pi(sk)->conn->hcon, - &iso_pi(sk)->qos, - iso_pi(sk)->sync_handle, - iso_pi(sk)->bc_num_bis, - iso_pi(sk)->bc_bis); + err = hci_conn_big_create_sync(hdev, iso_pi(sk)->conn->hcon, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); if (err) - bt_dev_err(hdev, "hci_le_big_create_sync: %d", - err); + bt_dev_err(hdev, "hci_big_create_sync: %d", err); } release_sock(sk); @@ -1922,7 +1921,7 @@ static void iso_conn_ready(struct iso_conn *conn) hcon); } else if (test_bit(HCI_CONN_BIG_SYNC_FAILED, &hcon->flags)) { ev = hci_recv_event_data(hcon->hdev, - HCI_EVT_LE_BIG_SYNC_ESTABILISHED); + HCI_EVT_LE_BIG_SYNC_ESTABLISHED); /* Get reference to PA sync parent socket, if it exists */ parent = iso_get_sock(&hcon->src, &hcon->dst, @@ -2113,12 +2112,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && !test_and_set_bit(BT_SK_BIG_SYNC, &iso_pi(sk)->flags)) { - err = hci_le_big_create_sync(hdev, - hcon, - &iso_pi(sk)->qos, - iso_pi(sk)->sync_handle, - iso_pi(sk)->bc_num_bis, - iso_pi(sk)->bc_bis); + err = hci_conn_big_create_sync(hdev, hcon, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); if (err) { bt_dev_err(hdev, "hci_le_big_create_sync: %d", err); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 5ca7ac43c58d..73472756618a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7415,6 +7415,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, return -ENOMEM; /* Init rx_len */ conn->rx_len = len; + + skb_set_delivery_time(conn->rx_skb, skb->tstamp, + skb->tstamp_type); } /* Copy as much as the rx_skb can hold */ diff --git a/net/can/gw.c b/net/can/gw.c index ef93293c1fae..55eccb1c7620 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -130,7 +130,7 @@ struct cgw_job { u32 handled_frames; u32 dropped_frames; u32 deleted_frames; - struct cf_mod mod; + struct cf_mod __rcu *cf_mod; union { /* CAN frame data source */ struct net_device *dev; @@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) struct cgw_job *gwj = (struct cgw_job *)data; struct canfd_frame *cf; struct sk_buff *nskb; + struct cf_mod *mod; int modidx = 0; /* process strictly Classic CAN or CAN FD frames */ @@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) * When there is at least one modification function activated, * we need to copy the skb as we want to modify skb->data. */ - if (gwj->mod.modfunc[0]) + mod = rcu_dereference(gwj->cf_mod); + if (mod->modfunc[0]) nskb = skb_copy(skb, GFP_ATOMIC); else nskb = skb_clone(skb, GFP_ATOMIC); @@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) cf = (struct canfd_frame *)nskb->data; /* perform preprocessed modification functions if there are any */ - while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) - (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); + while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) + (*mod->modfunc[modidx++])(cf, mod); /* Has the CAN frame been modified? */ if (modidx) { @@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) } /* check for checksum updates */ - if (gwj->mod.csumfunc.crc8) - (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); + if (mod->csumfunc.crc8) + (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); - if (gwj->mod.csumfunc.xor) - (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); + if (mod->csumfunc.xor) + (*mod->csumfunc.xor)(cf, &mod->csum.xor); } /* clear the skb timestamp if not configured the other way */ @@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head) { struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); + /* cgw_job::cf_mod is always accessed from the same cgw_job object within + * the same RCU read section. Once cgw_job is scheduled for removal, + * cf_mod can also be removed without mandating an additional grace period. + */ + kfree(rcu_access_pointer(gwj->cf_mod)); kmem_cache_free(cgw_cache, gwj); } +/* Return cgw_job::cf_mod with RTNL protected section */ +static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) +{ + return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); +} + static int cgw_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { @@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, { struct rtcanmsg *rtcan; struct nlmsghdr *nlh; + struct cf_mod *mod; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); if (!nlh) @@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } + mod = cgw_job_cf_mod(gwj); if (gwj->flags & CGW_FLAGS_CAN_FD) { struct cgw_fdframe_mod mb; - if (gwj->mod.modtype.and) { - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.and; + if (mod->modtype.and) { + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); + mb.modtype = mod->modtype.and; if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.or) { - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.or; + if (mod->modtype.or) { + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); + mb.modtype = mod->modtype.or; if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.xor) { - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.xor; + if (mod->modtype.xor) { + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); + mb.modtype = mod->modtype.xor; if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.set) { - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.set; + if (mod->modtype.set) { + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); + mb.modtype = mod->modtype.set; if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } else { struct cgw_frame_mod mb; - if (gwj->mod.modtype.and) { - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.and; + if (mod->modtype.and) { + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); + mb.modtype = mod->modtype.and; if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.or) { - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.or; + if (mod->modtype.or) { + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); + mb.modtype = mod->modtype.or; if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.xor) { - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.xor; + if (mod->modtype.xor) { + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); + mb.modtype = mod->modtype.xor; if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.set) { - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.set; + if (mod->modtype.set) { + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); + mb.modtype = mod->modtype.set; if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } - if (gwj->mod.uid) { - if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) + if (mod->uid) { + if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) goto cancel; } - if (gwj->mod.csumfunc.crc8) { + if (mod->csumfunc.crc8) { if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, - &gwj->mod.csum.crc8) < 0) + &mod->csum.crc8) < 0) goto cancel; } - if (gwj->mod.csumfunc.xor) { + if (mod->csumfunc.xor) { if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, - &gwj->mod.csum.xor) < 0) + &mod->csum.xor) < 0) goto cancel; } @@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); struct rtcanmsg *r; struct cgw_job *gwj; - struct cf_mod mod; + struct cf_mod *mod; struct can_can_gw ccgw; u8 limhops = 0; int err = 0; @@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; - err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + mod = kmalloc(sizeof(*mod), GFP_KERNEL); + if (!mod) + return -ENOMEM; + + err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); if (err < 0) - return err; + goto out_free_cf; - if (mod.uid) { + if (mod->uid) { ASSERT_RTNL(); /* check for updating an existing job with identical uid */ hlist_for_each_entry(gwj, &net->can.cgw_list, list) { - if (gwj->mod.uid != mod.uid) + struct cf_mod *old_cf; + + old_cf = cgw_job_cf_mod(gwj); + if (old_cf->uid != mod->uid) continue; /* interfaces & filters must be identical */ - if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) - return -EINVAL; + if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { + err = -EINVAL; + goto out_free_cf; + } - /* update modifications with disabled softirq & quit */ - local_bh_disable(); - memcpy(&gwj->mod, &mod, sizeof(mod)); - local_bh_enable(); + rcu_assign_pointer(gwj->cf_mod, mod); + kfree_rcu_mightsleep(old_cf); return 0; } } /* ifindex == 0 is not allowed for job creation */ - if (!ccgw.src_idx || !ccgw.dst_idx) - return -ENODEV; + if (!ccgw.src_idx || !ccgw.dst_idx) { + err = -ENODEV; + goto out_free_cf; + } gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); - if (!gwj) - return -ENOMEM; + if (!gwj) { + err = -ENOMEM; + goto out_free_cf; + } gwj->handled_frames = 0; gwj->dropped_frames = 0; @@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, gwj->limit_hops = limhops; /* insert already parsed information */ - memcpy(&gwj->mod, &mod, sizeof(mod)); + RCU_INIT_POINTER(gwj->cf_mod, mod); memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); err = -ENODEV; @@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, if (!err) hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); out: - if (err) + if (err) { kmem_cache_free(cgw_cache, gwj); - +out_free_cf: + kfree(mod); + } return err; } @@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, /* remove only the first matching entry */ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { + struct cf_mod *cf_mod; + if (gwj->flags != r->flags) continue; if (gwj->limit_hops != limhops) continue; + cf_mod = cgw_job_cf_mod(gwj); /* we have a match when uid is enabled and identical */ - if (gwj->mod.uid || mod.uid) { - if (gwj->mod.uid != mod.uid) + if (cf_mod->uid || mod.uid) { + if (cf_mod->uid != mod.uid) continue; } else { /* no uid => check for identical modifications */ - if (memcmp(&gwj->mod, &mod, sizeof(mod))) + if (memcmp(cf_mod, &mod, sizeof(mod))) continue; } diff --git a/net/core/dev.c b/net/core/dev.c index 1be7cb73a602..11da1e272ec2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -9193,18 +9193,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify) return 0; } -/** - * dev_set_promiscuity - update promiscuity count on a device - * @dev: device - * @inc: modifier - * - * Add or remove promiscuity from a device. While the count in the device - * remains above zero the interface remains promiscuous. Once it hits zero - * the device reverts back to normal filtering operation. A negative inc - * value is used to drop promiscuity on the device. - * Return 0 if successful or a negative errno code on error. - */ -int dev_set_promiscuity(struct net_device *dev, int inc) +int netif_set_promiscuity(struct net_device *dev, int inc) { unsigned int old_flags = dev->flags; int err; @@ -9216,7 +9205,6 @@ int dev_set_promiscuity(struct net_device *dev, int inc) dev_set_rx_mode(dev); return err; } -EXPORT_SYMBOL(dev_set_promiscuity); int netif_set_allmulti(struct net_device *dev, int inc, bool notify) { @@ -11966,9 +11954,9 @@ void unregister_netdevice_many_notify(struct list_head *head, struct sk_buff *skb = NULL; /* Shutdown queueing discipline. */ + netdev_lock_ops(dev); dev_shutdown(dev); dev_tcx_uninstall(dev); - netdev_lock_ops(dev); dev_xdp_uninstall(dev); dev_memory_provider_uninstall(dev); netdev_unlock_ops(dev); @@ -12161,7 +12149,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, synchronize_net(); /* Shutdown queueing discipline. */ + netdev_lock_ops(dev); dev_shutdown(dev); + netdev_unlock_ops(dev); /* Notify protocols, that we are about to destroy * this device. They should clean all the things. diff --git a/net/core/dev_api.c b/net/core/dev_api.c index 90898cd540ce..f9a160ab596f 100644 --- a/net/core/dev_api.c +++ b/net/core/dev_api.c @@ -268,6 +268,29 @@ void dev_disable_lro(struct net_device *dev) EXPORT_SYMBOL(dev_disable_lro); /** + * dev_set_promiscuity() - update promiscuity count on a device + * @dev: device + * @inc: modifier + * + * Add or remove promiscuity from a device. While the count in the device + * remains above zero the interface remains promiscuous. Once it hits zero + * the device reverts back to normal filtering operation. A negative inc + * value is used to drop promiscuity on the device. + * Return 0 if successful or a negative errno code on error. + */ +int dev_set_promiscuity(struct net_device *dev, int inc) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_set_promiscuity(dev, inc); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_set_promiscuity); + +/** * dev_set_allmulti() - update allmulti count on a device * @dev: device * @inc: modifier diff --git a/net/core/filter.c b/net/core/filter.c index 79cab4d78dc3..577a4504e26f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2509,6 +2509,7 @@ int skb_do_redirect(struct sk_buff *skb) goto out_drop; skb->dev = dev; dev_sw_netstats_rx_add(dev, skb->len); + skb_scrub_packet(skb, false); return -EAGAIN; } return flags & BPF_F_NEIGH ? diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 230743bdbb14..dae9f0d432fb 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -708,25 +708,66 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, return 0; } +/** + * netdev_stat_queue_sum() - add up queue stats from range of queues + * @netdev: net_device + * @rx_start: index of the first Rx queue to query + * @rx_end: index after the last Rx queue (first *not* to query) + * @rx_sum: output Rx stats, should be already initialized + * @tx_start: index of the first Tx queue to query + * @tx_end: index after the last Tx queue (first *not* to query) + * @tx_sum: output Tx stats, should be already initialized + * + * Add stats from [start, end) range of queue IDs to *x_sum structs. + * The sum structs must be already initialized. Usually this + * helper is invoked from the .get_base_stats callbacks of drivers + * to account for stats of disabled queues. In that case the ranges + * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues). + */ +void netdev_stat_queue_sum(struct net_device *netdev, + int rx_start, int rx_end, + struct netdev_queue_stats_rx *rx_sum, + int tx_start, int tx_end, + struct netdev_queue_stats_tx *tx_sum) +{ + const struct netdev_stat_ops *ops; + struct netdev_queue_stats_rx rx; + struct netdev_queue_stats_tx tx; + int i; + + ops = netdev->stat_ops; + + for (i = rx_start; i < rx_end; i++) { + memset(&rx, 0xff, sizeof(rx)); + if (ops->get_queue_stats_rx) + ops->get_queue_stats_rx(netdev, i, &rx); + netdev_nl_stats_add(rx_sum, &rx, sizeof(rx)); + } + for (i = tx_start; i < tx_end; i++) { + memset(&tx, 0xff, sizeof(tx)); + if (ops->get_queue_stats_tx) + ops->get_queue_stats_tx(netdev, i, &tx); + netdev_nl_stats_add(tx_sum, &tx, sizeof(tx)); + } +} +EXPORT_SYMBOL(netdev_stat_queue_sum); + static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { - struct netdev_queue_stats_rx rx_sum, rx; - struct netdev_queue_stats_tx tx_sum, tx; - const struct netdev_stat_ops *ops; + struct netdev_queue_stats_rx rx_sum; + struct netdev_queue_stats_tx tx_sum; void *hdr; - int i; - ops = netdev->stat_ops; /* Netdev can't guarantee any complete counters */ - if (!ops->get_base_stats) + if (!netdev->stat_ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); - ops->get_base_stats(netdev, &rx_sum, &tx_sum); + netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && @@ -739,18 +780,8 @@ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; - for (i = 0; i < netdev->real_num_rx_queues; i++) { - memset(&rx, 0xff, sizeof(rx)); - if (ops->get_queue_stats_rx) - ops->get_queue_stats_rx(netdev, i, &rx); - netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx)); - } - for (i = 0; i < netdev->real_num_tx_queues; i++) { - memset(&tx, 0xff, sizeof(tx)); - if (ops->get_queue_stats_tx) - ops->get_queue_stats_tx(netdev, i, &tx); - netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx)); - } + netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum, + 0, netdev->real_num_tx_queues, &tx_sum); if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 934f777f29d3..d293087b426d 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -439,7 +439,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, iif, sdif); NAPI_GRO_CB(skb)->is_flist = !sk; if (sk) - sock_put(sk); + sock_gen_put(sk); } INDIRECT_CALLABLE_SCOPE diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 2c0725583be3..9a8142ccbabe 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) return segs; } +static void __udpv6_gso_segment_csum(struct sk_buff *seg, + struct in6_addr *oldip, + const struct in6_addr *newip, + __be16 *oldport, __be16 newport) +{ + struct udphdr *uh = udp_hdr(seg); + + if (ipv6_addr_equal(oldip, newip) && *oldport == newport) + return; + + if (uh->check) { + inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32, + newip->s6_addr32, true); + + inet_proto_csum_replace2(&uh->check, seg, *oldport, newport, + false); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + + *oldip = *newip; + *oldport = newport; +} + +static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs) +{ + const struct ipv6hdr *iph; + const struct udphdr *uh; + struct ipv6hdr *iph2; + struct sk_buff *seg; + struct udphdr *uh2; + + seg = segs; + uh = udp_hdr(seg); + iph = ipv6_hdr(seg); + uh2 = udp_hdr(seg->next); + iph2 = ipv6_hdr(seg->next); + + if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) && + ipv6_addr_equal(&iph->saddr, &iph2->saddr) && + ipv6_addr_equal(&iph->daddr, &iph2->daddr)) + return segs; + + while ((seg = seg->next)) { + uh2 = udp_hdr(seg); + iph2 = ipv6_hdr(seg); + + __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, + &uh2->source, uh->source); + __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, + &uh2->dest, uh->dest); + } + + return segs; +} + static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, netdev_features_t features, bool is_ipv6) @@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); - return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); + if (is_ipv6) + return __udpv6_gso_segment_list_csum(skb); + else + return __udpv4_gso_segment_list_csum(skb); } struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9ba83f0c9928..c6b22170dc49 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3214,16 +3214,13 @@ static void add_v4_addrs(struct inet6_dev *idev) struct in6_addr addr; struct net_device *dev; struct net *net = dev_net(idev->dev); - int scope, plen, offset = 0; + int scope, plen; u32 pflags = 0; ASSERT_RTNL(); memset(&addr, 0, sizeof(struct in6_addr)); - /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ - if (idev->dev->addr_len == sizeof(struct in6_addr)) - offset = sizeof(struct in6_addr) - 4; - memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); + memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { scope = IPV6_ADDR_COMPATv4; @@ -3534,7 +3531,13 @@ static void addrconf_gre_config(struct net_device *dev) return; } - if (dev->type == ARPHRD_ETHER) { + /* Generate the IPv6 link-local address using addrconf_addr_gen(), + * unless we have an IPv4 GRE device not bound to an IP address and + * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this + * case). Such devices fall back to add_v4_addrs() instead. + */ + if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 && + idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) { addrconf_addr_gen(idev, true); return; } diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index d9b11fe41bf0..a8a04f441e78 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -42,7 +42,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, iif, sdif); NAPI_GRO_CB(skb)->is_flist = !sk; if (sk) - sock_put(sk); + sock_gen_put(sk); #endif /* IS_ENABLED(CONFIG_IPV6) */ } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 5d1f2d6d09ad..35eaf0812c5b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -7675,6 +7675,7 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res); int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 + 2 * 2 * IEEE80211_TTLM_NUM_TIDS; + u16 status_code; skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len); if (!skb) @@ -7697,19 +7698,18 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, WARN_ON(1); fallthrough; case NEG_TTLM_RES_REJECT: - mgmt->u.action.u.ttlm_res.status_code = - WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING; + status_code = WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING; break; case NEG_TTLM_RES_ACCEPT: - mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS; + status_code = WLAN_STATUS_SUCCESS; break; case NEG_TTLM_RES_SUGGEST_PREFERRED: - mgmt->u.action.u.ttlm_res.status_code = - WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED; + status_code = WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED; ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm); break; } + mgmt->u.action.u.ttlm_res.status_code = cpu_to_le16(status_code); ieee80211_tx_skb(sdata, skb); } @@ -7875,7 +7875,7 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, * This can be better implemented in the future, to handle request * rejections. */ - if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS) + if (le16_to_cpu(mgmt->u.action.u.ttlm_res.status_code) != WLAN_STATUS_SUCCESS) __ieee80211_disconnect(sdata); } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index b17b3cc7fb90..a362254b310c 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -1085,7 +1085,13 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp); - if (status->free_list) + /* + * This is a bit racy but we can avoid a lot of work + * with this test... + */ + if (local->tx_mntrs) + ieee80211_tx_monitor(local, skb, retry_count, status); + else if (status->free_list) list_add_tail(&skb->list, status->free_list); else dev_kfree_skb(skb); diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index cf3ce72c3de6..5251524b96af 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -64,7 +64,7 @@ struct hbucket { #define ahash_sizeof_regions(htable_bits) \ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) #define ahash_region(n, htable_bits) \ - ((n) % ahash_numof_locks(htable_bits)) + ((n) / jhash_size(HTABLE_REGION_BITS)) #define ahash_bucket_start(h, htable_bits) \ ((htable_bits) < HTABLE_REGION_BITS ? 0 \ : (h) * jhash_size(HTABLE_REGION_BITS)) diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 3313bceb6cc9..014f07740369 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) return false; } -/* Get route to daddr, update *saddr, optionally bind route to saddr */ +/* Get route to daddr, optionally bind route to saddr */ static struct rtable *do_output_route4(struct net *net, __be32 daddr, - int rt_mode, __be32 *saddr) + int rt_mode, __be32 *ret_saddr) { struct flowi4 fl4; struct rtable *rt; - bool loop = false; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = daddr; @@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, retry: rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) { - /* Invalid saddr ? */ - if (PTR_ERR(rt) == -EINVAL && *saddr && - rt_mode & IP_VS_RT_MODE_CONNECT && !loop) { - *saddr = 0; - flowi4_update_output(&fl4, 0, daddr, 0); - goto retry; - } IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr); return NULL; - } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { + } + if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { ip_rt_put(rt); - *saddr = fl4.saddr; flowi4_update_output(&fl4, 0, daddr, fl4.saddr); - loop = true; + rt_mode = 0; goto retry; } - *saddr = fl4.saddr; + if (ret_saddr) + *ret_saddr = fl4.saddr; return rt; } @@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, if (ret_saddr) *ret_saddr = dest_dst->dst_saddr.ip; } else { - __be32 saddr = htonl(INADDR_ANY); - noref = 0; /* For such unconfigured boxes avoid many route lookups * for performance reasons because we do not remember saddr */ rt_mode &= ~IP_VS_RT_MODE_CONNECT; - rt = do_output_route4(net, daddr, rt_mode, &saddr); + rt = do_output_route4(net, daddr, rt_mode, ret_saddr); if (!rt) goto err_unreach; - if (ret_saddr) - *ret_saddr = saddr; } local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 61fea7baae5d..2f22ca59586f 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -975,8 +975,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, upcall.cmd = OVS_PACKET_CMD_ACTION; upcall.mru = OVS_CB(skb)->mru; - for (a = nla_data(attr), rem = nla_len(attr); rem > 0; - a = nla_next(a, &rem)) { + nla_for_each_nested(a, attr, rem) { switch (nla_type(a)) { case OVS_USERSPACE_ATTR_USERDATA: upcall.userdata = a; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index e0a81d313aa7..9b6d79bd8737 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -35,6 +35,11 @@ struct drr_sched { struct Qdisc_class_hash clhash; }; +static bool cl_is_active(struct drr_class *cl) +{ + return !list_empty(&cl->alist); +} + static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) { struct drr_sched *q = qdisc_priv(sch); @@ -337,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; int err = 0; - bool first; cl = drr_classify(skb, sch, &err); if (cl == NULL) { @@ -347,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -357,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - if (first) { + if (!cl_is_active(cl)) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index c3bdeb14185b..2c069f0181c6 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = { [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, }; +static bool cl_is_active(struct ets_class *cl) +{ + return !list_empty(&cl->alist); +} + static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr, unsigned int *quantum, struct netlink_ext_ack *extack) @@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct ets_sched *q = qdisc_priv(sch); struct ets_class *cl; int err = 0; - bool first; cl = ets_classify(skb, sch, &err); if (!cl) { @@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - if (first && !ets_class_is_strict(q, cl)) { + if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 6c8ef826cec0..cb8c525ea20e 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1569,7 +1569,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } - if (first) { + if (first && !cl->cl_nactive) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 4b9a639b642e..14bf71f57057 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q, */ static inline void htb_next_rb_node(struct rb_node **n) { - *n = rb_next(*n); + if (*n) + *n = rb_next(*n); } /** @@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) */ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) { - WARN_ON(!cl->prio_activity); - + if (!cl->prio_activity) + return; htb_deactivate_prios(q, cl); cl->prio_activity = 0; } @@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct htb_class *cl = (struct htb_class *)arg; - if (!cl->prio_activity) - return; htb_deactivate(qdisc_priv(sch), cl); } @@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, if (cl->parent) cl->parent->children--; - if (cl->prio_activity) - htb_deactivate(q, cl); + htb_deactivate(q, cl); if (cl->cmode != HTB_CAN_SEND) htb_safe_rb_erase(&cl->pq_node, @@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* turn parent into inner node */ qdisc_purge_queue(parent->leaf.q); parent_qdisc = parent->leaf.q; - if (parent->prio_activity) - htb_deactivate(q, parent); + htb_deactivate(q, parent); /* remove from evt list because of level change */ if (parent->cmode != HTB_CAN_SEND) { diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 687a932eb9b2..bf1282cb22eb 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -202,6 +202,11 @@ struct qfq_sched { */ enum update_reason {enqueue, requeue}; +static bool cl_is_active(struct qfq_class *cl) +{ + return !list_empty(&cl->alist); +} + static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) { struct qfq_sched *q = qdisc_priv(sch); @@ -1215,7 +1220,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct qfq_class *cl; struct qfq_aggregate *agg; int err = 0; - bool first; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -1237,7 +1241,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); @@ -1253,8 +1256,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ++sch->q.qlen; agg = cl->agg; - /* if the queue was not empty, then done here */ - if (!first) { + /* if the class is active, then done here */ + if (cl_is_active(cl)) { if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && list_first_entry(&agg->active, struct qfq_class, alist) == cl && cl->deficit < len) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 9865f305275d..ddd3a97f6609 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2681,7 +2681,7 @@ cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen, /* Required length for first defragmentation */ buf_len = mle->datalen - 1; for_each_element(elem, mle->data + mle->datalen, - ielen - sizeof(*mle) + mle->datalen) { + ie + ielen - mle->data - mle->datalen) { if (elem->id != WLAN_EID_FRAGMENT) break; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 5696af45bcf7..4abc81f33d3e 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -338,13 +338,14 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) u32 len = xdp_get_buff_len(xdp); int err; - spin_lock_bh(&xs->rx_lock); err = xsk_rcv_check(xs, xdp, len); if (!err) { + spin_lock_bh(&xs->pool->rx_lock); err = __xsk_rcv(xs, xdp, len); xsk_flush(xs); + spin_unlock_bh(&xs->pool->rx_lock); } - spin_unlock_bh(&xs->rx_lock); + return err; } @@ -1734,7 +1735,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, xs = xdp_sk(sk); xs->state = XSK_READY; mutex_init(&xs->mutex); - spin_lock_init(&xs->rx_lock); INIT_LIST_HEAD(&xs->map_list); spin_lock_init(&xs->map_list_lock); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 25a76c5ce0f1..c5181a9044ad 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -89,6 +89,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, pool->addrs = umem->addrs; pool->tx_metadata_len = umem->tx_metadata_len; pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM; + spin_lock_init(&pool->rx_lock); INIT_LIST_HEAD(&pool->free_list); INIT_LIST_HEAD(&pool->xskb_list); INIT_LIST_HEAD(&pool->xsk_tx_list); diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs index 014af0d1fc70..a08eb5518cac 100644 --- a/rust/bindings/lib.rs +++ b/rust/bindings/lib.rs @@ -26,6 +26,7 @@ #[allow(dead_code)] #[allow(clippy::undocumented_unsafe_blocks)] +#[cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))] mod bindings_raw { // Manual definition for blocklisted types. type __kernel_size_t = usize; diff --git a/rust/kernel/alloc/kvec.rs b/rust/kernel/alloc/kvec.rs index ae9d072741ce..87a71fd40c3c 100644 --- a/rust/kernel/alloc/kvec.rs +++ b/rust/kernel/alloc/kvec.rs @@ -2,6 +2,9 @@ //! Implementation of [`Vec`]. +// May not be needed in Rust 1.87.0 (pending beta backport). +#![allow(clippy::ptr_eq)] + use super::{ allocator::{KVmalloc, Kmalloc, Vmalloc}, layout::ArrayLayout, diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs index a335c3b1ff5e..2054682c5724 100644 --- a/rust/kernel/list.rs +++ b/rust/kernel/list.rs @@ -4,6 +4,9 @@ //! A linked list implementation. +// May not be needed in Rust 1.87.0 (pending beta backport). +#![allow(clippy::ptr_eq)] + use crate::sync::ArcBorrow; use crate::types::Opaque; use core::iter::{DoubleEndedIterator, FusedIterator}; diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs index 878111cb77bc..fb61ce81ea28 100644 --- a/rust/kernel/str.rs +++ b/rust/kernel/str.rs @@ -73,7 +73,7 @@ impl fmt::Display for BStr { b'\r' => f.write_str("\\r")?, // Printable characters. 0x20..=0x7e => f.write_char(b as char)?, - _ => write!(f, "\\x{:02x}", b)?, + _ => write!(f, "\\x{b:02x}")?, } } Ok(()) @@ -109,7 +109,7 @@ impl fmt::Debug for BStr { b'\\' => f.write_str("\\\\")?, // Printable characters. 0x20..=0x7e => f.write_char(b as char)?, - _ => write!(f, "\\x{:02x}", b)?, + _ => write!(f, "\\x{b:02x}")?, } } f.write_char('"') @@ -447,7 +447,7 @@ impl fmt::Display for CStr { // Printable character. f.write_char(c as char)?; } else { - write!(f, "\\x{:02x}", c)?; + write!(f, "\\x{c:02x}")?; } } Ok(()) @@ -479,7 +479,7 @@ impl fmt::Debug for CStr { // Printable characters. b'\"' => f.write_str("\\\"")?, 0x20..=0x7e => f.write_char(c as char)?, - _ => write!(f, "\\x{:02x}", c)?, + _ => write!(f, "\\x{c:02x}")?, } } f.write_str("\"") @@ -641,13 +641,13 @@ mod tests { #[test] fn test_cstr_display() { let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap(); - assert_eq!(format!("{}", hello_world), "hello, world!"); + assert_eq!(format!("{hello_world}"), "hello, world!"); let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap(); - assert_eq!(format!("{}", non_printables), "\\x01\\x09\\x0a"); + assert_eq!(format!("{non_printables}"), "\\x01\\x09\\x0a"); let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap(); - assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu"); + assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu"); let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap(); - assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80"); + assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80"); } #[test] @@ -658,47 +658,47 @@ mod tests { bytes[i as usize] = i.wrapping_add(1); } let cstr = CStr::from_bytes_with_nul(&bytes).unwrap(); - assert_eq!(format!("{}", cstr), ALL_ASCII_CHARS); + assert_eq!(format!("{cstr}"), ALL_ASCII_CHARS); } #[test] fn test_cstr_debug() { let hello_world = CStr::from_bytes_with_nul(b"hello, world!\0").unwrap(); - assert_eq!(format!("{:?}", hello_world), "\"hello, world!\""); + assert_eq!(format!("{hello_world:?}"), "\"hello, world!\""); let non_printables = CStr::from_bytes_with_nul(b"\x01\x09\x0a\0").unwrap(); - assert_eq!(format!("{:?}", non_printables), "\"\\x01\\x09\\x0a\""); + assert_eq!(format!("{non_printables:?}"), "\"\\x01\\x09\\x0a\""); let non_ascii = CStr::from_bytes_with_nul(b"d\xe9j\xe0 vu\0").unwrap(); - assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\""); + assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\""); let good_bytes = CStr::from_bytes_with_nul(b"\xf0\x9f\xa6\x80\0").unwrap(); - assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\""); + assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\""); } #[test] fn test_bstr_display() { let hello_world = BStr::from_bytes(b"hello, world!"); - assert_eq!(format!("{}", hello_world), "hello, world!"); + assert_eq!(format!("{hello_world}"), "hello, world!"); let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_"); - assert_eq!(format!("{}", escapes), "_\\t_\\n_\\r_\\_'_\"_"); + assert_eq!(format!("{escapes}"), "_\\t_\\n_\\r_\\_'_\"_"); let others = BStr::from_bytes(b"\x01"); - assert_eq!(format!("{}", others), "\\x01"); + assert_eq!(format!("{others}"), "\\x01"); let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu"); - assert_eq!(format!("{}", non_ascii), "d\\xe9j\\xe0 vu"); + assert_eq!(format!("{non_ascii}"), "d\\xe9j\\xe0 vu"); let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80"); - assert_eq!(format!("{}", good_bytes), "\\xf0\\x9f\\xa6\\x80"); + assert_eq!(format!("{good_bytes}"), "\\xf0\\x9f\\xa6\\x80"); } #[test] fn test_bstr_debug() { let hello_world = BStr::from_bytes(b"hello, world!"); - assert_eq!(format!("{:?}", hello_world), "\"hello, world!\""); + assert_eq!(format!("{hello_world:?}"), "\"hello, world!\""); let escapes = BStr::from_bytes(b"_\t_\n_\r_\\_\'_\"_"); - assert_eq!(format!("{:?}", escapes), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\""); + assert_eq!(format!("{escapes:?}"), "\"_\\t_\\n_\\r_\\\\_'_\\\"_\""); let others = BStr::from_bytes(b"\x01"); - assert_eq!(format!("{:?}", others), "\"\\x01\""); + assert_eq!(format!("{others:?}"), "\"\\x01\""); let non_ascii = BStr::from_bytes(b"d\xe9j\xe0 vu"); - assert_eq!(format!("{:?}", non_ascii), "\"d\\xe9j\\xe0 vu\""); + assert_eq!(format!("{non_ascii:?}"), "\"d\\xe9j\\xe0 vu\""); let good_bytes = BStr::from_bytes(b"\xf0\x9f\xa6\x80"); - assert_eq!(format!("{:?}", good_bytes), "\"\\xf0\\x9f\\xa6\\x80\""); + assert_eq!(format!("{good_bytes:?}"), "\"\\xf0\\x9f\\xa6\\x80\""); } } diff --git a/rust/macros/kunit.rs b/rust/macros/kunit.rs index 4f553ecf40c0..99ccac82edde 100644 --- a/rust/macros/kunit.rs +++ b/rust/macros/kunit.rs @@ -15,10 +15,7 @@ pub(crate) fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream { } if attr.len() > 255 { - panic!( - "The test suite name `{}` exceeds the maximum length of 255 bytes", - attr - ) + panic!("The test suite name `{attr}` exceeds the maximum length of 255 bytes") } let mut tokens: Vec<_> = ts.into_iter().collect(); @@ -102,16 +99,14 @@ pub(crate) fn kunit_tests(attr: TokenStream, ts: TokenStream) -> TokenStream { let mut kunit_macros = "".to_owned(); let mut test_cases = "".to_owned(); for test in &tests { - let kunit_wrapper_fn_name = format!("kunit_rust_wrapper_{}", test); + let kunit_wrapper_fn_name = format!("kunit_rust_wrapper_{test}"); let kunit_wrapper = format!( - "unsafe extern \"C\" fn {}(_test: *mut kernel::bindings::kunit) {{ {}(); }}", - kunit_wrapper_fn_name, test + "unsafe extern \"C\" fn {kunit_wrapper_fn_name}(_test: *mut kernel::bindings::kunit) {{ {test}(); }}" ); writeln!(kunit_macros, "{kunit_wrapper}").unwrap(); writeln!( test_cases, - " kernel::kunit::kunit_case(kernel::c_str!(\"{}\"), {}),", - test, kunit_wrapper_fn_name + " kernel::kunit::kunit_case(kernel::c_str!(\"{test}\"), {kunit_wrapper_fn_name})," ) .unwrap(); } diff --git a/rust/macros/module.rs b/rust/macros/module.rs index a9418fbc9b44..2f66107847f7 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -48,7 +48,7 @@ impl<'a> ModInfoBuilder<'a> { ) } else { // Loadable modules' modinfo strings go as-is. - format!("{field}={content}\0", field = field, content = content) + format!("{field}={content}\0") }; write!( @@ -126,10 +126,7 @@ impl ModuleInfo { }; if seen_keys.contains(&key) { - panic!( - "Duplicated key \"{}\". Keys can only be specified once.", - key - ); + panic!("Duplicated key \"{key}\". Keys can only be specified once."); } assert_eq!(expect_punct(it), ':'); @@ -143,10 +140,7 @@ impl ModuleInfo { "license" => info.license = expect_string_ascii(it), "alias" => info.alias = Some(expect_string_array(it)), "firmware" => info.firmware = Some(expect_string_array(it)), - _ => panic!( - "Unknown key \"{}\". Valid keys are: {:?}.", - key, EXPECTED_KEYS - ), + _ => panic!("Unknown key \"{key}\". Valid keys are: {EXPECTED_KEYS:?}."), } assert_eq!(expect_punct(it), ','); @@ -158,7 +152,7 @@ impl ModuleInfo { for key in REQUIRED_KEYS { if !seen_keys.iter().any(|e| e == key) { - panic!("Missing required key \"{}\".", key); + panic!("Missing required key \"{key}\"."); } } @@ -170,10 +164,7 @@ impl ModuleInfo { } if seen_keys != ordered_keys { - panic!( - "Keys are not ordered as expected. Order them like: {:?}.", - ordered_keys - ); + panic!("Keys are not ordered as expected. Order them like: {ordered_keys:?}."); } info diff --git a/rust/macros/paste.rs b/rust/macros/paste.rs index 6529a387673f..cce712d19855 100644 --- a/rust/macros/paste.rs +++ b/rust/macros/paste.rs @@ -50,7 +50,7 @@ fn concat_helper(tokens: &[TokenTree]) -> Vec<(String, Span)> { let tokens = group.stream().into_iter().collect::<Vec<TokenTree>>(); segments.append(&mut concat_helper(tokens.as_slice())); } - token => panic!("unexpected token in paste segments: {:?}", token), + token => panic!("unexpected token in paste segments: {token:?}"), }; } diff --git a/rust/pin-init/internal/src/pinned_drop.rs b/rust/pin-init/internal/src/pinned_drop.rs index c824dd8b436d..c4ca7a70b726 100644 --- a/rust/pin-init/internal/src/pinned_drop.rs +++ b/rust/pin-init/internal/src/pinned_drop.rs @@ -28,8 +28,7 @@ pub(crate) fn pinned_drop(_args: TokenStream, input: TokenStream) -> TokenStream // Found the end of the generics, this should be `PinnedDrop`. assert!( matches!(tt, TokenTree::Ident(i) if i.to_string() == "PinnedDrop"), - "expected 'PinnedDrop', found: '{:?}'", - tt + "expected 'PinnedDrop', found: '{tt:?}'" ); pinned_drop_idx = Some(i); break; diff --git a/rust/uapi/lib.rs b/rust/uapi/lib.rs index 13495910271f..c98d7a8cde77 100644 --- a/rust/uapi/lib.rs +++ b/rust/uapi/lib.rs @@ -24,6 +24,7 @@ unreachable_pub, unsafe_op_in_unsafe_fn )] +#![cfg_attr(CONFIG_RUSTC_HAS_UNNECESSARY_TRANSMUTES, allow(unnecessary_transmutes))] // Manual definition of blocklisted types. type __kernel_size_t = usize; diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 2d6e59561c9d..d88acdf40855 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -8,6 +8,7 @@ # Default set of warnings, always enabled KBUILD_CFLAGS += -Wall +KBUILD_CFLAGS += -Wextra KBUILD_CFLAGS += -Wundef KBUILD_CFLAGS += -Werror=implicit-function-declaration KBUILD_CFLAGS += -Werror=implicit-int @@ -56,6 +57,13 @@ KBUILD_CFLAGS += -Wno-pointer-sign # globally built with -Wcast-function-type. KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type) +# Currently, disable -Wstringop-overflow for GCC 11, globally. +KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-disable-warning, stringop-overflow) +KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow) + +# Currently, disable -Wunterminated-string-initialization as broken +KBUILD_CFLAGS += $(call cc-disable-warning, unterminated-string-initialization) + # The allocators already balk at large sizes, so silence the compiler # warnings for bounds checks involving those possible values. While # -Wno-alloc-size-larger-than would normally be used here, earlier versions @@ -82,7 +90,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) # Warn if there is an enum types mismatch KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion) -KBUILD_CFLAGS += -Wextra KBUILD_CFLAGS += -Wunused # diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux index b0a6cd5b818c..85d60d986401 100644 --- a/scripts/Makefile.vmlinux +++ b/scripts/Makefile.vmlinux @@ -13,7 +13,7 @@ ifdef CONFIG_ARCH_VMLINUX_NEEDS_RELOCS vmlinux-final := vmlinux.unstripped quiet_cmd_strip_relocs = RSTRIP $@ - cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' $< $@ + cmd_strip_relocs = $(OBJCOPY) --remove-section='.rel*' --remove-section=!'.rel*.dyn' $< $@ vmlinux: $(vmlinux-final) FORCE $(call if_changed,strip_relocs) diff --git a/sound/core/jack.c b/sound/core/jack.c index e4bcecdf89b7..de7c603e92b7 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -34,6 +34,7 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = { SW_JACK_PHYSICAL_INSERT, SW_VIDEOOUT_INSERT, SW_LINEIN_INSERT, + SW_USB_INSERT, }; #endif /* CONFIG_SND_JACK_INPUT_DEV */ @@ -241,8 +242,9 @@ static ssize_t jack_kctl_id_read(struct file *file, static const char * const jack_events_name[] = { "HEADPHONE(0x0001)", "MICROPHONE(0x0002)", "LINEOUT(0x0004)", "MECHANICAL(0x0008)", "VIDEOOUT(0x0010)", "LINEIN(0x0020)", - "", "", "", "BTN_5(0x0200)", "BTN_4(0x0400)", "BTN_3(0x0800)", - "BTN_2(0x1000)", "BTN_1(0x2000)", "BTN_0(0x4000)", "", + "USB(0x0040)", "", "", "BTN_5(0x0200)", "BTN_4(0x0400)", + "BTN_3(0x0800)", "BTN_2(0x1000)", "BTN_1(0x2000)", "BTN_0(0x4000)", + "", }; /* the recommended buffer size is 256 */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 877137cb09ac..8a2b09e4a7d5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -441,6 +441,10 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); fallthrough; case 0x10ec0215: + case 0x10ec0236: + case 0x10ec0245: + case 0x10ec0256: + case 0x10ec0257: case 0x10ec0285: case 0x10ec0289: alc_update_coef_idx(codec, 0x36, 1<<13, 0); @@ -448,12 +452,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0233: case 0x10ec0235: - case 0x10ec0236: - case 0x10ec0245: case 0x10ec0255: - case 0x10ec0256: case 0x19e58326: - case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: case 0x10ec0286: @@ -6742,6 +6742,25 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec, codec->power_save_node = 0; } +/* avoid DAC 0x06 for speaker switch 0x17; it has no volume control */ +static void alc274_fixup_hp_aio_bind_dacs(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */ + /* The speaker is routed to the Node 0x06 by a mistake, thus the + * speaker's volume can't be adjusted since the node doesn't have + * Amp-out capability. Assure the speaker and lineout pin to be + * coupled with DAC NID 0x02. + */ + static const hda_nid_t preferred_pairs[] = { + 0x16, 0x02, 0x17, 0x02, 0x21, 0x03, 0 + }; + struct alc_spec *spec = codec->spec; + + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + spec->gen.preferred_dacs = preferred_pairs; +} + /* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */ static void alc289_fixup_asus_ga401(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -6963,6 +6982,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec, } } +/* GPIO1 = amplifier on/off */ +static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + static const hda_nid_t conn[] = { 0x02 }; + static const struct hda_pintbl pincfgs[] = { + { 0x14, 0x90170110 }, /* front/high speakers */ + { 0x17, 0x90170130 }, /* back/bass speakers */ + { } + }; + + // enable mute led + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + /* needed for amp of back speakers */ + spec->gpio_mask |= 0x01; + spec->gpio_dir |= 0x01; + snd_hda_apply_pincfgs(codec, pincfgs); + /* share DAC to have unified volume control */ + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + break; + case HDA_FIXUP_ACT_INIT: + /* need to toggle GPIO to enable the amp of back speakers */ + alc_update_gpio_data(codec, 0x01, true); + msleep(100); + alc_update_gpio_data(codec, 0x01, false); + break; + } +} + static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -7761,6 +7815,7 @@ enum { ALC280_FIXUP_HP_9480M, ALC245_FIXUP_HP_X360_AMP, ALC285_FIXUP_HP_SPECTRE_X360_EB1, + ALC285_FIXUP_HP_SPECTRE_X360_DF1, ALC285_FIXUP_HP_ENVY_X360, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7970,6 +8025,7 @@ enum { ALC294_FIXUP_BASS_SPEAKER_15, ALC283_FIXUP_DELL_HP_RESUME, ALC294_FIXUP_ASUS_CS35L41_SPI_2, + ALC274_FIXUP_HP_AIO_BIND_DACS, }; /* A special fixup for Lenovo C940 and Yoga Duet 7; @@ -9837,6 +9893,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_eb1 }, + [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_df1 + }, [ALC285_FIXUP_HP_ENVY_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_envy_x360, @@ -10340,6 +10400,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC, }, + [ALC274_FIXUP_HP_AIO_BIND_DACS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc274_fixup_hp_aio_bind_dacs, + }, }; static const struct hda_quirk alc269_fixup_tbl[] = { @@ -10564,6 +10628,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10768,12 +10833,13 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8caf, "HP Elite mt645 G8 Mobile Thin Client", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8cbd, "HP Pavilion Aero Laptop 13-bg0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), - SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), - SND_PCI_QUIRK(0x103c, 0x8cde, "HP Spectre", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8cdd, "HP Spectre", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), + SND_PCI_QUIRK(0x103c, 0x8cde, "HP OmniBook Ultra Flip Laptop 14t", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX), SND_PCI_QUIRK(0x103c, 0x8cdf, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ce0, "HP SnowWhite", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d18, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS), SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d85, "HP EliteBook 14 G12", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8d86, "HP Elite X360 14 G12", ALC285_FIXUP_HP_GPIO_LED), @@ -10793,11 +10859,15 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8da1, "HP 16 Clipper OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8da7, "HP 14 Enstrom OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8da8, "HP 16 Piston OmniBook X", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x103c, 0x8dd4, "HP EliteStudio 8 AIO", ALC274_FIXUP_HP_AIO_BIND_DACS), SND_PCI_QUIRK(0x103c, 0x8de8, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2), SND_PCI_QUIRK(0x103c, 0x8de9, "HP Gemtree", ALC245_FIXUP_TAS2781_SPI_2), SND_PCI_QUIRK(0x103c, 0x8dec, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ded, "HP EliteBook 640 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8dee, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8def, "HP EliteBook 660 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8df0, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8df1, "HP EliteBook 630 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8dfc, "HP EliteBook 645 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8dfe, "HP EliteBook 665 G12", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8e11, "HP Trekker", ALC287_FIXUP_CS35L41_I2C_2), @@ -10843,10 +10913,10 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM), SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC294_FIXUP_ASUS_CS35L41_SPI_2), - SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), - SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), @@ -10900,7 +10970,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JU/JV/JI", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JY/JZ/JI/JG", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), - SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1ccf, "ASUS G814JU/JV/JI", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1cdf, "ASUS G814JY/JZ/JG", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1cef, "ASUS G834JY/JZ/JI/JG", ALC285_FIXUP_ASUS_HEADSET_MIC), @@ -11494,6 +11564,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, + {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"}, {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"}, diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 8b7d51266f81..1b983c7006f1 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig @@ -91,6 +91,16 @@ config SND_SOC_OPS_KUNIT_TEST config SND_SOC_ACPI tristate +config SND_SOC_USB + tristate "SoC based USB audio offloading" + depends on SND_USB_AUDIO + help + Enable this option if an ASoC platform card has support to handle + USB audio offloading. This enables the SoC USB layer, which will + notify the ASoC USB DPCM backend DAI link about available USB audio + devices. Based on the notifications, sequences to enable the audio + stream can be taken based on the design. + # All the supported SoCs source "sound/soc/adi/Kconfig" source "sound/soc/amd/Kconfig" diff --git a/sound/soc/Makefile b/sound/soc/Makefile index 358e227c5ab6..462322c38aa4 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile @@ -39,6 +39,8 @@ endif obj-$(CONFIG_SND_SOC_ACPI) += snd-soc-acpi.o +obj-$(CONFIG_SND_SOC_USB) += soc-usb.o + obj-$(CONFIG_SND_SOC) += snd-soc-core.o obj-$(CONFIG_SND_SOC) += codecs/ obj-$(CONFIG_SND_SOC) += generic/ diff --git a/sound/soc/amd/acp/acp-i2s.c b/sound/soc/amd/acp/acp-i2s.c index a38409dd1d34..70fa54d568ef 100644 --- a/sound/soc/amd/acp/acp-i2s.c +++ b/sound/soc/amd/acp/acp-i2s.c @@ -97,7 +97,7 @@ static int acp_i2s_set_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mas struct acp_stream *stream; int slot_len, no_of_slots; - chip = dev_get_platdata(dev); + chip = dev_get_drvdata(dev->parent); switch (slot_width) { case SLOT_WIDTH_8: slot_len = 8; diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c index b4d68484e06d..ba8db0851daa 100644 --- a/sound/soc/amd/acp/acp-legacy-common.c +++ b/sound/soc/amd/acp/acp-legacy-common.c @@ -450,7 +450,7 @@ int acp_machine_select(struct acp_chip_info *chip) struct snd_soc_acpi_mach *mach; int size, platform; - if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC) { + if (chip->flag == FLAG_AMD_LEGACY_ONLY_DMIC && chip->is_pdm_dev) { platform = chip->acp_rev; chip->mach_dev = platform_device_register_data(chip->dev, "acp-pdm-mach", PLATFORM_DEVID_NONE, &platform, diff --git a/sound/soc/amd/acp/acp-rembrandt.c b/sound/soc/amd/acp/acp-rembrandt.c index 746b6ed72029..cccdd10c345e 100644 --- a/sound/soc/amd/acp/acp-rembrandt.c +++ b/sound/soc/amd/acp/acp-rembrandt.c @@ -199,7 +199,7 @@ static void rembrandt_audio_remove(struct platform_device *pdev) static int rmb_pcm_resume(struct device *dev) { - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_stream *stream; struct snd_pcm_substream *substream; snd_pcm_uframes_t buf_in_frames; diff --git a/sound/soc/amd/acp/acp-renoir.c b/sound/soc/amd/acp/acp-renoir.c index ebf0106fc737..04f6d70b6a92 100644 --- a/sound/soc/amd/acp/acp-renoir.c +++ b/sound/soc/amd/acp/acp-renoir.c @@ -146,7 +146,7 @@ static void renoir_audio_remove(struct platform_device *pdev) static int rn_pcm_resume(struct device *dev) { - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_stream *stream; struct snd_pcm_substream *substream; snd_pcm_uframes_t buf_in_frames; diff --git a/sound/soc/amd/acp/acp63.c b/sound/soc/amd/acp/acp63.c index 52d895e624c7..1f15c96a9b94 100644 --- a/sound/soc/amd/acp/acp63.c +++ b/sound/soc/amd/acp/acp63.c @@ -250,7 +250,7 @@ static void acp63_audio_remove(struct platform_device *pdev) static int acp63_pcm_resume(struct device *dev) { - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_stream *stream; struct snd_pcm_substream *substream; snd_pcm_uframes_t buf_in_frames; diff --git a/sound/soc/amd/acp/acp70.c b/sound/soc/amd/acp/acp70.c index 6d5f5ade075c..217b717e9beb 100644 --- a/sound/soc/amd/acp/acp70.c +++ b/sound/soc/amd/acp/acp70.c @@ -182,7 +182,7 @@ static void acp_acp70_audio_remove(struct platform_device *pdev) static int acp70_pcm_resume(struct device *dev) { - struct acp_chip_info *chip = dev_get_platdata(dev); + struct acp_chip_info *chip = dev_get_drvdata(dev->parent); struct acp_stream *stream; struct snd_pcm_substream *substream; snd_pcm_uframes_t buf_in_frames; diff --git a/sound/soc/amd/ps/pci-ps.c b/sound/soc/amd/ps/pci-ps.c index 8e57f31ef7f7..7936b3173632 100644 --- a/sound/soc/amd/ps/pci-ps.c +++ b/sound/soc/amd/ps/pci-ps.c @@ -193,6 +193,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id) struct amd_sdw_manager *amd_manager; u32 ext_intr_stat, ext_intr_stat1; u16 irq_flag = 0; + u16 wake_irq_flag = 0; u16 sdw_dma_irq_flag = 0; adata = dev_id; @@ -231,7 +232,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id) } if (adata->acp_rev >= ACP70_PCI_REV) - irq_flag = check_and_handle_acp70_sdw_wake_irq(adata); + wake_irq_flag = check_and_handle_acp70_sdw_wake_irq(adata); if (ext_intr_stat & BIT(PDM_DMA_STAT)) { ps_pdm_data = dev_get_drvdata(&adata->pdm_dev->dev); @@ -245,7 +246,7 @@ static irqreturn_t acp63_irq_handler(int irq, void *dev_id) if (sdw_dma_irq_flag) return IRQ_WAKE_THREAD; - if (irq_flag) + if (irq_flag | wake_irq_flag) return IRQ_HANDLED; else return IRQ_NONE; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 40bb7a1d44bc..20f99cbee29b 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -776,10 +776,9 @@ config SND_SOC_CS_AMP_LIB tristate config SND_SOC_CS_AMP_LIB_TEST - tristate "KUnit test for Cirrus Logic cs-amp-lib" - depends on KUNIT + tristate "KUnit test for Cirrus Logic cs-amp-lib" if !KUNIT_ALL_TESTS + depends on SND_SOC_CS_AMP_LIB && KUNIT default KUNIT_ALL_TESTS - select SND_SOC_CS_AMP_LIB help This builds KUnit tests for the Cirrus Logic common amplifier library. diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c index 20e6ab6f0d4a..6165ac16c3a9 100644 --- a/sound/soc/codecs/cs42l43-jack.c +++ b/sound/soc/codecs/cs42l43-jack.c @@ -654,6 +654,10 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv) reinit_completion(&priv->type_detect); + regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK); + cs42l43_start_hs_bias(priv, true); regmap_update_bits(cs42l43->regmap, CS42L43_HS2, CS42L43_HSDET_MODE_MASK, 0x3 << CS42L43_HSDET_MODE_SHIFT); @@ -665,6 +669,9 @@ static int cs42l43_run_type_detect(struct cs42l43_codec *priv) CS42L43_HSDET_MODE_MASK, 0x2 << CS42L43_HSDET_MODE_SHIFT); cs42l43_stop_hs_bias(priv); + regmap_update_bits(cs42l43->regmap, CS42L43_STEREO_MIC_CLAMP_CTRL, + CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK, 0); + if (!time_left) return -ETIMEDOUT; diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index 3686d468506b..45e000f61ecc 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -544,7 +544,7 @@ static int imx_card_parse_of(struct imx_card_data *data) if (!card->dai_link) return -ENOMEM; - data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL); + data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL); if (!data->link_data) return -ENOMEM; diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index a1ccc300e68c..3ae2a212a2e3 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -1174,9 +1174,9 @@ void graph_util_parse_link_direction(struct device_node *np, bool is_playback_only = of_property_read_bool(np, "playback-only"); bool is_capture_only = of_property_read_bool(np, "capture-only"); - if (is_playback_only) + if (playback_only) *playback_only = is_playback_only; - if (is_capture_only) + if (capture_only) *capture_only = is_capture_only; } EXPORT_SYMBOL_GPL(graph_util_parse_link_direction); diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 6446cda0f857..0f3b8f44e701 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -576,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF2 | BYT_RT5640_MCLK_EN), }, + { /* Acer Aspire SW3-013 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c index 5993819cc58a..008a20a2acbd 100644 --- a/sound/soc/intel/catpt/dsp.c +++ b/sound/soc/intel/catpt/dsp.c @@ -156,7 +156,7 @@ static void catpt_dsp_set_srampge(struct catpt_dev *cdev, struct resource *sram, { unsigned long old; u32 off = sram->start; - u32 b = __ffs(mask); + unsigned long b = __ffs(mask); old = catpt_readl_pci(cdev, VDRTCTL0) & mask; dev_dbg(cdev->dev, "SRAMPGE [0x%08lx] 0x%08lx -> 0x%08lx", diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c index 6603d8de501c..c599eb43eeb1 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c @@ -431,7 +431,8 @@ static const struct snd_soc_acpi_link_adr ptl_cs42l43_l3[] = { .mask = BIT(3), .num_adr = ARRAY_SIZE(cs42l43_3_adr), .adr_d = cs42l43_3_adr, - } + }, + {} }; static const struct snd_soc_acpi_link_adr ptl_rt722_only[] = { diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig index ca7a30ebd26a..e86b4a03dd61 100644 --- a/sound/soc/qcom/Kconfig +++ b/sound/soc/qcom/Kconfig @@ -118,6 +118,22 @@ config SND_SOC_QDSP6_PRM tristate select SND_SOC_QDSP6_PRM_LPASS_CLOCKS +config SND_SOC_QCOM_OFFLOAD_UTILS + tristate + +config SND_SOC_QDSP6_USB + tristate "SoC ALSA USB offloading backing for QDSP6" + depends on SND_SOC_USB + select AUXILIARY_BUS + select SND_SOC_QCOM_OFFLOAD_UTILS + + help + Adds support for USB offloading for QDSP6 ASoC + based platform sound cards. This will enable the + Q6USB DPCM backend DAI link, which will interact + with the SoC USB framework to initialize a session + with active USB SND devices. + config SND_SOC_QDSP6 tristate "SoC ALSA audio driver for QDSP6" depends on QCOM_APR diff --git a/sound/soc/qcom/Makefile b/sound/soc/qcom/Makefile index 16db7b53ddac..985ce2ae286b 100644 --- a/sound/soc/qcom/Makefile +++ b/sound/soc/qcom/Makefile @@ -30,6 +30,7 @@ snd-soc-sc8280xp-y := sc8280xp.o snd-soc-qcom-common-y := common.o snd-soc-qcom-sdw-y := sdw.o snd-soc-x1e80100-y := x1e80100.o +snd-soc-qcom-offload-utils-objs := usb_offload_utils.o obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o @@ -42,6 +43,7 @@ obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o obj-$(CONFIG_SND_SOC_X1E80100) += snd-soc-x1e80100.o +obj-$(CONFIG_SND_SOC_QCOM_OFFLOAD_UTILS) += snd-soc-qcom-offload-utils.o #DSP lib obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/ diff --git a/sound/soc/qcom/qdsp6/Makefile b/sound/soc/qcom/qdsp6/Makefile index 26b7c55c9c11..67267304e7e9 100644 --- a/sound/soc/qcom/qdsp6/Makefile +++ b/sound/soc/qcom/qdsp6/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_SND_SOC_QDSP6_APM_DAI) += q6apm-dai.o obj-$(CONFIG_SND_SOC_QDSP6_APM_LPASS_DAI) += q6apm-lpass-dais.o obj-$(CONFIG_SND_SOC_QDSP6_PRM) += q6prm.o obj-$(CONFIG_SND_SOC_QDSP6_PRM_LPASS_CLOCKS) += q6prm-clocks.o +obj-$(CONFIG_SND_SOC_QDSP6_USB) += q6usb.o diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index 7d9628cda875..0f47aadaabe1 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -92,6 +92,39 @@ static int q6hdmi_hw_params(struct snd_pcm_substream *substream, return 0; } +static int q6afe_usb_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct q6afe_dai_data *dai_data = dev_get_drvdata(dai->dev); + int channels = params_channels(params); + int rate = params_rate(params); + struct q6afe_usb_cfg *usb = &dai_data->port_config[dai->id].usb_audio; + + usb->sample_rate = rate; + usb->num_channels = channels; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_U16_LE: + case SNDRV_PCM_FORMAT_S16_LE: + usb->bit_width = 16; + break; + case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S24_3LE: + usb->bit_width = 24; + break; + case SNDRV_PCM_FORMAT_S32_LE: + usb->bit_width = 32; + break; + default: + dev_err(dai->dev, "%s: invalid format %d\n", + __func__, params_format(params)); + return -EINVAL; + } + + return 0; +} + static int q6i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) @@ -394,6 +427,10 @@ static int q6afe_dai_prepare(struct snd_pcm_substream *substream, q6afe_cdc_dma_port_prepare(dai_data->port[dai->id], &dai_data->port_config[dai->id].dma_cfg); break; + case USB_RX: + q6afe_usb_port_prepare(dai_data->port[dai->id], + &dai_data->port_config[dai->id].usb_audio); + break; default: return -EINVAL; } @@ -622,6 +659,9 @@ static const struct snd_soc_dapm_route q6afe_dapm_routes[] = { {"TX_CODEC_DMA_TX_5", NULL, "TX_CODEC_DMA_TX_5 Capture"}, {"RX_CODEC_DMA_RX_6 Playback", NULL, "RX_CODEC_DMA_RX_6"}, {"RX_CODEC_DMA_RX_7 Playback", NULL, "RX_CODEC_DMA_RX_7"}, + + /* USB playback AFE port receives data for playback, hence use the RX port */ + {"USB Playback", NULL, "USB_RX"}, }; static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai) @@ -649,6 +689,23 @@ static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai) return 0; } +static const struct snd_soc_dai_ops q6afe_usb_ops = { + .probe = msm_dai_q6_dai_probe, + .prepare = q6afe_dai_prepare, + .hw_params = q6afe_usb_hw_params, + /* + * Shutdown callback required to stop the USB AFE port, which is enabled + * by the prepare() stage. This stops the audio traffic on the USB AFE + * port on the Q6DSP. + */ + .shutdown = q6afe_dai_shutdown, + /* + * Startup callback not needed, as AFE port start command passes the PCM + * parameters within the AFE command, which is provided by the PCM core + * during the prepare() stage. + */ +}; + static const struct snd_soc_dai_ops q6hdmi_ops = { .probe = msm_dai_q6_dai_probe, .remove = msm_dai_q6_dai_remove, @@ -947,6 +1004,8 @@ static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = { 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("RX_CODEC_DMA_RX_7", "NULL", 0, SND_SOC_NOPM, 0, 0), + + SND_SOC_DAPM_AIF_IN("USB_RX", NULL, 0, SND_SOC_NOPM, 0, 0), }; static const struct snd_soc_component_driver q6afe_dai_component = { @@ -1061,6 +1120,7 @@ static int q6afe_dai_dev_probe(struct platform_device *pdev) cfg.q6i2s_ops = &q6i2s_ops; cfg.q6tdm_ops = &q6tdm_ops; cfg.q6dma_ops = &q6dma_ops; + cfg.q6usb_ops = &q6afe_usb_ops; dais = q6dsp_audio_ports_set_config(dev, &cfg, &num_dais); return devm_snd_soc_register_component(dev, &q6afe_dai_component, dais, num_dais); diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c index ef7557be5d66..7b59d514b432 100644 --- a/sound/soc/qcom/qdsp6/q6afe.c +++ b/sound/soc/qcom/qdsp6/q6afe.c @@ -35,6 +35,8 @@ #define AFE_MODULE_TDM 0x0001028A #define AFE_PARAM_ID_CDC_SLIMBUS_SLAVE_CFG 0x00010235 +#define AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS 0x000102A5 +#define AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT 0x000102AA #define AFE_PARAM_ID_LPAIF_CLK_CONFIG 0x00010238 #define AFE_PARAM_ID_INT_DIGITAL_CDC_CLK_CONFIG 0x00010239 @@ -44,6 +46,7 @@ #define AFE_PARAM_ID_TDM_CONFIG 0x0001029D #define AFE_PARAM_ID_PORT_SLOT_MAPPING_CONFIG 0x00010297 #define AFE_PARAM_ID_CODEC_DMA_CONFIG 0x000102B8 +#define AFE_PARAM_ID_USB_AUDIO_CONFIG 0x000102A4 #define AFE_CMD_REMOTE_LPASS_CORE_HW_VOTE_REQUEST 0x000100f4 #define AFE_CMD_RSP_REMOTE_LPASS_CORE_HW_VOTE_REQUEST 0x000100f5 #define AFE_CMD_REMOTE_LPASS_CORE_HW_DEVOTE_REQUEST 0x000100f6 @@ -72,12 +75,16 @@ #define AFE_PORT_CONFIG_I2S_WS_SRC_INTERNAL 0x1 #define AFE_LINEAR_PCM_DATA 0x0 +#define AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG 0x1 /* Port IDs */ #define AFE_API_VERSION_HDMI_CONFIG 0x1 #define AFE_PORT_ID_MULTICHAN_HDMI_RX 0x100E #define AFE_PORT_ID_HDMI_OVER_DP_RX 0x6020 +/* USB AFE port */ +#define AFE_PORT_ID_USB_RX 0x7000 + #define AFE_API_VERSION_SLIMBUS_CONFIG 0x1 /* Clock set API version */ #define AFE_API_VERSION_CLOCK_SET 1 @@ -359,7 +366,7 @@ #define AFE_API_VERSION_SLOT_MAPPING_CONFIG 1 #define AFE_API_VERSION_CODEC_DMA_CONFIG 1 -#define TIMEOUT_MS 1000 +#define TIMEOUT_MS 3000 #define AFE_CMD_RESP_AVAIL 0 #define AFE_CMD_RESP_NONE 1 #define AFE_CLK_TOKEN 1024 @@ -513,12 +520,96 @@ struct afe_param_id_cdc_dma_cfg { u16 active_channels_mask; } __packed; +struct afe_param_id_usb_cfg { +/* Minor version used for tracking USB audio device configuration. + * Supported values: AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG + */ + u32 cfg_minor_version; +/* Sampling rate of the port. + * Supported values: + * - AFE_PORT_SAMPLE_RATE_8K + * - AFE_PORT_SAMPLE_RATE_11025 + * - AFE_PORT_SAMPLE_RATE_12K + * - AFE_PORT_SAMPLE_RATE_16K + * - AFE_PORT_SAMPLE_RATE_22050 + * - AFE_PORT_SAMPLE_RATE_24K + * - AFE_PORT_SAMPLE_RATE_32K + * - AFE_PORT_SAMPLE_RATE_44P1K + * - AFE_PORT_SAMPLE_RATE_48K + * - AFE_PORT_SAMPLE_RATE_96K + * - AFE_PORT_SAMPLE_RATE_192K + */ + u32 sample_rate; +/* Bit width of the sample. + * Supported values: 16, 24 + */ + u16 bit_width; +/* Number of channels. + * Supported values: 1 and 2 + */ + u16 num_channels; +/* Data format supported by the USB. The supported value is + * 0 (#AFE_USB_AUDIO_DATA_FORMAT_LINEAR_PCM). + */ + u16 data_format; +/* this field must be 0 */ + u16 reserved; +/* device token of actual end USB audio device */ + u32 dev_token; +/* endianness of this interface */ + u32 endian; +/* service interval */ + u32 service_interval; +} __packed; + +/** + * struct afe_param_id_usb_audio_dev_params + * @cfg_minor_version: Minor version used for tracking USB audio device + * configuration. + * Supported values: + * AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG + * @dev_token: device token of actual end USB audio device + **/ +struct afe_param_id_usb_audio_dev_params { + u32 cfg_minor_version; + u32 dev_token; +} __packed; + +/** + * struct afe_param_id_usb_audio_dev_lpcm_fmt + * @cfg_minor_version: Minor version used for tracking USB audio device + * configuration. + * Supported values: + * AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG + * @endian: endianness of this interface + **/ +struct afe_param_id_usb_audio_dev_lpcm_fmt { + u32 cfg_minor_version; + u32 endian; +} __packed; + +#define AFE_PARAM_ID_USB_AUDIO_SVC_INTERVAL 0x000102B7 + +/** + * struct afe_param_id_usb_audio_svc_interval + * @cfg_minor_version: Minor version used for tracking USB audio device + * configuration. + * Supported values: + * AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG + * @svc_interval: service interval + **/ +struct afe_param_id_usb_audio_svc_interval { + u32 cfg_minor_version; + u32 svc_interval; +} __packed; + union afe_port_config { struct afe_param_id_hdmi_multi_chan_audio_cfg hdmi_multi_ch; struct afe_param_id_slimbus_cfg slim_cfg; struct afe_param_id_i2s_cfg i2s_cfg; struct afe_param_id_tdm_cfg tdm_cfg; struct afe_param_id_cdc_dma_cfg dma_cfg; + struct afe_param_id_usb_cfg usb_cfg; } __packed; @@ -833,6 +924,7 @@ static struct afe_port_map port_maps[AFE_PORT_MAX] = { RX_CODEC_DMA_RX_6, 1, 1}, [RX_CODEC_DMA_RX_7] = { AFE_PORT_ID_RX_CODEC_DMA_RX_7, RX_CODEC_DMA_RX_7, 1, 1}, + [USB_RX] = { AFE_PORT_ID_USB_RX, USB_RX, 1, 1}, }; static void q6afe_port_free(struct kref *ref) @@ -1291,6 +1383,99 @@ void q6afe_tdm_port_prepare(struct q6afe_port *port, EXPORT_SYMBOL_GPL(q6afe_tdm_port_prepare); /** + * afe_port_send_usb_dev_param() - Send USB dev token + * + * @port: Instance of afe port + * @cardidx: USB SND card index to reference + * @pcmidx: USB SND PCM device index to reference + * + * The USB dev token carries information about which USB SND card instance and + * PCM device to execute the offload on. This information is carried through + * to the stream enable QMI request, which is handled by the offload class + * driver. The information is parsed to determine which USB device to query + * the required resources for. + */ +int afe_port_send_usb_dev_param(struct q6afe_port *port, int cardidx, int pcmidx) +{ + struct afe_param_id_usb_audio_dev_params usb_dev; + int ret; + + memset(&usb_dev, 0, sizeof(usb_dev)); + + usb_dev.cfg_minor_version = AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG; + usb_dev.dev_token = (cardidx << 16) | (pcmidx << 8); + ret = q6afe_port_set_param_v2(port, &usb_dev, + AFE_PARAM_ID_USB_AUDIO_DEV_PARAMS, + AFE_MODULE_AUDIO_DEV_INTERFACE, + sizeof(usb_dev)); + if (ret) + dev_err(port->afe->dev, "%s: AFE device param cmd failed %d\n", + __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(afe_port_send_usb_dev_param); + +static int afe_port_send_usb_params(struct q6afe_port *port, struct q6afe_usb_cfg *cfg) +{ + union afe_port_config *pcfg = &port->port_cfg; + struct afe_param_id_usb_audio_dev_lpcm_fmt lpcm_fmt; + struct afe_param_id_usb_audio_svc_interval svc_int; + int ret; + + if (!pcfg) { + dev_err(port->afe->dev, "%s: Error, no configuration data\n", __func__); + return -EINVAL; + } + + memset(&lpcm_fmt, 0, sizeof(lpcm_fmt)); + memset(&svc_int, 0, sizeof(svc_int)); + + lpcm_fmt.cfg_minor_version = AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG; + lpcm_fmt.endian = pcfg->usb_cfg.endian; + ret = q6afe_port_set_param_v2(port, &lpcm_fmt, + AFE_PARAM_ID_USB_AUDIO_DEV_LPCM_FMT, + AFE_MODULE_AUDIO_DEV_INTERFACE, sizeof(lpcm_fmt)); + if (ret) { + dev_err(port->afe->dev, "%s: AFE device param cmd LPCM_FMT failed %d\n", + __func__, ret); + return ret; + } + + svc_int.cfg_minor_version = AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG; + svc_int.svc_interval = pcfg->usb_cfg.service_interval; + ret = q6afe_port_set_param_v2(port, &svc_int, + AFE_PARAM_ID_USB_AUDIO_SVC_INTERVAL, + AFE_MODULE_AUDIO_DEV_INTERFACE, sizeof(svc_int)); + if (ret) + dev_err(port->afe->dev, "%s: AFE device param cmd svc_interval failed %d\n", + __func__, ret); + + return ret; +} + +/** + * q6afe_usb_port_prepare() - Prepare usb afe port. + * + * @port: Instance of afe port + * @cfg: USB configuration for the afe port + * + */ +void q6afe_usb_port_prepare(struct q6afe_port *port, + struct q6afe_usb_cfg *cfg) +{ + union afe_port_config *pcfg = &port->port_cfg; + + pcfg->usb_cfg.cfg_minor_version = AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG; + pcfg->usb_cfg.sample_rate = cfg->sample_rate; + pcfg->usb_cfg.num_channels = cfg->num_channels; + pcfg->usb_cfg.bit_width = cfg->bit_width; + + afe_port_send_usb_params(port, cfg); +} +EXPORT_SYMBOL_GPL(q6afe_usb_port_prepare); + +/** * q6afe_hdmi_port_prepare() - Prepare hdmi afe port. * * @port: Instance of afe port @@ -1612,7 +1797,10 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id) break; case AFE_PORT_ID_WSA_CODEC_DMA_RX_0 ... AFE_PORT_ID_RX_CODEC_DMA_RX_7: cfg_type = AFE_PARAM_ID_CODEC_DMA_CONFIG; - break; + break; + case AFE_PORT_ID_USB_RX: + cfg_type = AFE_PARAM_ID_USB_AUDIO_CONFIG; + break; default: dev_err(dev, "Invalid port id 0x%x\n", port_id); return ERR_PTR(-EINVAL); diff --git a/sound/soc/qcom/qdsp6/q6afe.h b/sound/soc/qcom/qdsp6/q6afe.h index 65d0676075e1..a29abe4ce436 100644 --- a/sound/soc/qcom/qdsp6/q6afe.h +++ b/sound/soc/qcom/qdsp6/q6afe.h @@ -3,7 +3,7 @@ #ifndef __Q6AFE_H__ #define __Q6AFE_H__ -#define AFE_PORT_MAX 129 +#define AFE_PORT_MAX 137 #define MSM_AFE_PORT_TYPE_RX 0 #define MSM_AFE_PORT_TYPE_TX 1 @@ -203,6 +203,36 @@ struct q6afe_cdc_dma_cfg { u16 active_channels_mask; }; +/** + * struct q6afe_usb_cfg + * @cfg_minor_version: Minor version used for tracking USB audio device + * configuration. + * Supported values: + * AFE_API_MINOR_VERSION_USB_AUDIO_CONFIG + * @sample_rate: Sampling rate of the port + * Supported values: + * AFE_PORT_SAMPLE_RATE_8K + * AFE_PORT_SAMPLE_RATE_11025 + * AFE_PORT_SAMPLE_RATE_12K + * AFE_PORT_SAMPLE_RATE_16K + * AFE_PORT_SAMPLE_RATE_22050 + * AFE_PORT_SAMPLE_RATE_24K + * AFE_PORT_SAMPLE_RATE_32K + * AFE_PORT_SAMPLE_RATE_44P1K + * AFE_PORT_SAMPLE_RATE_48K + * AFE_PORT_SAMPLE_RATE_96K + * AFE_PORT_SAMPLE_RATE_192K + * @bit_width: Bit width of the sample. + * Supported values: 16, 24 + * @num_channels: Number of channels + * Supported values: 1, 2 + **/ +struct q6afe_usb_cfg { + u32 cfg_minor_version; + u32 sample_rate; + u16 bit_width; + u16 num_channels; +}; struct q6afe_port_config { struct q6afe_hdmi_cfg hdmi; @@ -210,6 +240,7 @@ struct q6afe_port_config { struct q6afe_i2s_cfg i2s_cfg; struct q6afe_tdm_cfg tdm; struct q6afe_cdc_dma_cfg dma_cfg; + struct q6afe_usb_cfg usb_audio; }; struct q6afe_port; @@ -219,6 +250,8 @@ int q6afe_port_start(struct q6afe_port *port); int q6afe_port_stop(struct q6afe_port *port); void q6afe_port_put(struct q6afe_port *port); int q6afe_get_port_id(int index); +void q6afe_usb_port_prepare(struct q6afe_port *port, + struct q6afe_usb_cfg *cfg); void q6afe_hdmi_port_prepare(struct q6afe_port *port, struct q6afe_hdmi_cfg *cfg); void q6afe_slim_port_prepare(struct q6afe_port *port, @@ -228,6 +261,7 @@ void q6afe_tdm_port_prepare(struct q6afe_port *port, struct q6afe_tdm_cfg *cfg); void q6afe_cdc_dma_port_prepare(struct q6afe_port *port, struct q6afe_cdc_dma_cfg *cfg); +int afe_port_send_usb_dev_param(struct q6afe_port *port, int cardidx, int pcmidx); int q6afe_port_set_sysclk(struct q6afe_port *port, int clk_id, int clk_src, int clk_root, unsigned int freq, int dir); diff --git a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c index 4919001de08b..4eed54b071a5 100644 --- a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c +++ b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c @@ -99,6 +99,26 @@ static struct snd_soc_dai_driver q6dsp_audio_fe_dais[] = { { .playback = { + .stream_name = "USB Playback", + .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | + SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | + SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | + SNDRV_PCM_RATE_192000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | + SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE | + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE | + SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE, + .channels_min = 1, + .channels_max = 2, + .rate_min = 8000, + .rate_max = 192000, + }, + .id = USB_RX, + .name = "USB_RX", + }, + { + .playback = { .stream_name = "HDMI Playback", .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | @@ -624,6 +644,9 @@ struct snd_soc_dai_driver *q6dsp_audio_ports_set_config(struct device *dev, case WSA_CODEC_DMA_RX_0 ... RX_CODEC_DMA_RX_7: q6dsp_audio_fe_dais[i].ops = cfg->q6dma_ops; break; + case USB_RX: + q6dsp_audio_fe_dais[i].ops = cfg->q6usb_ops; + break; default: break; } diff --git a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.h b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.h index 7f052c8a1257..d8dde6dd0aca 100644 --- a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.h +++ b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.h @@ -11,6 +11,7 @@ struct q6dsp_audio_port_dai_driver_config { const struct snd_soc_dai_ops *q6i2s_ops; const struct snd_soc_dai_ops *q6tdm_ops; const struct snd_soc_dai_ops *q6dma_ops; + const struct snd_soc_dai_ops *q6usb_ops; }; struct snd_soc_dai_driver *q6dsp_audio_ports_set_config(struct device *dev, diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index 90228699ba7d..f49243daa517 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -435,6 +435,7 @@ static struct session_data *get_session_from_id(struct msm_routing_data *data, return NULL; } + /** * q6routing_stream_close() - Deregister a stream * @@ -515,6 +516,9 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol, return 1; } +static const struct snd_kcontrol_new usb_rx_mixer_controls[] = { + Q6ROUTING_RX_MIXERS(USB_RX) }; + static const struct snd_kcontrol_new hdmi_mixer_controls[] = { Q6ROUTING_RX_MIXERS(HDMI_RX) }; @@ -933,6 +937,9 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { SND_SOC_DAPM_MIXER("RX_CODEC_DMA_RX_7 Audio Mixer", SND_SOC_NOPM, 0, 0, rx_codec_dma_rx_7_mixer_controls, ARRAY_SIZE(rx_codec_dma_rx_7_mixer_controls)), + SND_SOC_DAPM_MIXER("USB_RX Audio Mixer", SND_SOC_NOPM, 0, 0, + usb_rx_mixer_controls, + ARRAY_SIZE(usb_rx_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia1 Mixer", SND_SOC_NOPM, 0, 0, mmul1_mixer_controls, ARRAY_SIZE(mmul1_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia2 Mixer", SND_SOC_NOPM, 0, 0, @@ -949,7 +956,6 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { mmul7_mixer_controls, ARRAY_SIZE(mmul7_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia8 Mixer", SND_SOC_NOPM, 0, 0, mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)), - }; static const struct snd_soc_dapm_route intercon[] = { @@ -1026,6 +1032,7 @@ static const struct snd_soc_dapm_route intercon[] = { Q6ROUTING_RX_DAPM_ROUTE("RX_CODEC_DMA_RX_5 Audio Mixer", "RX_CODEC_DMA_RX_5"), Q6ROUTING_RX_DAPM_ROUTE("RX_CODEC_DMA_RX_6 Audio Mixer", "RX_CODEC_DMA_RX_6"), Q6ROUTING_RX_DAPM_ROUTE("RX_CODEC_DMA_RX_7 Audio Mixer", "RX_CODEC_DMA_RX_7"), + Q6ROUTING_RX_DAPM_ROUTE("USB_RX Audio Mixer", "USB_RX"), Q6ROUTING_TX_DAPM_ROUTE("MultiMedia1 Mixer"), Q6ROUTING_TX_DAPM_ROUTE("MultiMedia2 Mixer"), Q6ROUTING_TX_DAPM_ROUTE("MultiMedia3 Mixer"), diff --git a/sound/soc/qcom/qdsp6/q6usb.c b/sound/soc/qcom/qdsp6/q6usb.c new file mode 100644 index 000000000000..adba0446f301 --- /dev/null +++ b/sound/soc/qcom/qdsp6/q6usb.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <sound/asound.h> +#include <sound/jack.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/q6usboffload.h> +#include <sound/soc.h> +#include <sound/soc-usb.h> + +#include <dt-bindings/sound/qcom,q6afe.h> + +#include "q6afe.h" +#include "q6dsp-lpass-ports.h" + +#define Q6_USB_SID_MASK 0xF + +struct q6usb_port_data { + struct auxiliary_device uauxdev; + struct q6afe_usb_cfg usb_cfg; + struct snd_soc_usb *usb; + struct snd_soc_jack *hs_jack; + struct q6usb_offload priv; + + /* Protects against operations between SOC USB and ASoC */ + struct mutex mutex; + struct list_head devices; +}; + +static const struct snd_soc_dapm_widget q6usb_dai_widgets[] = { + SND_SOC_DAPM_HP("USB_RX_BE", NULL), +}; + +static const struct snd_soc_dapm_route q6usb_dapm_routes[] = { + {"USB Playback", NULL, "USB_RX_BE"}, +}; + +static int q6usb_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct q6usb_port_data *data = dev_get_drvdata(dai->dev); + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + int direction = substream->stream; + struct q6afe_port *q6usb_afe; + struct snd_soc_usb_device *sdev; + int ret = -EINVAL; + + mutex_lock(&data->mutex); + + /* No active chip index */ + if (list_empty(&data->devices)) + goto out; + + sdev = list_last_entry(&data->devices, struct snd_soc_usb_device, list); + + ret = snd_soc_usb_find_supported_format(sdev->chip_idx, params, direction); + if (ret < 0) + goto out; + + q6usb_afe = q6afe_port_get_from_id(cpu_dai->dev, USB_RX); + if (IS_ERR(q6usb_afe)) { + ret = PTR_ERR(q6usb_afe); + goto out; + } + + /* Notify audio DSP about the devices being offloaded */ + ret = afe_port_send_usb_dev_param(q6usb_afe, sdev->card_idx, + sdev->ppcm_idx[sdev->num_playback - 1]); + +out: + mutex_unlock(&data->mutex); + + return ret; +} + +static const struct snd_soc_dai_ops q6usb_ops = { + .hw_params = q6usb_hw_params, +}; + +static struct snd_soc_dai_driver q6usb_be_dais[] = { + { + .playback = { + .stream_name = "USB BE RX", + .rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | + SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | + SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | + SNDRV_PCM_RATE_192000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | + SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE | + SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE | + SNDRV_PCM_FMTBIT_U24_LE | SNDRV_PCM_FMTBIT_U24_BE, + .channels_min = 1, + .channels_max = 2, + .rate_max = 192000, + .rate_min = 8000, + }, + .id = USB_RX, + .name = "USB_RX_BE", + .ops = &q6usb_ops, + }, +}; + +static int q6usb_audio_ports_of_xlate_dai_name(struct snd_soc_component *component, + const struct of_phandle_args *args, + const char **dai_name) +{ + int id = args->args[0]; + int ret = -EINVAL; + int i; + + for (i = 0; i < ARRAY_SIZE(q6usb_be_dais); i++) { + if (q6usb_be_dais[i].id == id) { + *dai_name = q6usb_be_dais[i].name; + ret = 0; + break; + } + } + + return ret; +} + +static int q6usb_get_pcm_id_from_widget(struct snd_soc_dapm_widget *w) +{ + struct snd_soc_pcm_runtime *rtd; + struct snd_soc_dai *dai; + + for_each_card_rtds(w->dapm->card, rtd) { + dai = snd_soc_rtd_to_cpu(rtd, 0); + /* + * Only look for playback widget. RTD number carries the assigned + * PCM index. + */ + if (dai->stream[0].widget == w) + return rtd->id; + } + + return -1; +} + +static int q6usb_usb_mixer_enabled(struct snd_soc_dapm_widget *w) +{ + struct snd_soc_dapm_path *p; + + /* Checks to ensure USB path is enabled/connected */ + snd_soc_dapm_widget_for_each_sink_path(w, p) + if (!strcmp(p->sink->name, "USB Mixer") && p->connect) + return 1; + + return 0; +} + +static int q6usb_get_pcm_id(struct snd_soc_component *component) +{ + struct snd_soc_dapm_widget *w; + struct snd_soc_dapm_path *p; + int pidx; + + /* + * Traverse widgets to find corresponding FE widget. The DAI links are + * built like the following: + * MultiMedia* <-> MM_DL* <-> USB Mixer* + */ + for_each_card_widgets(component->card, w) { + if (!strncmp(w->name, "MultiMedia", 10)) { + /* + * Look up all paths associated with the FE widget to see if + * the USB BE is enabled. The sink widget is responsible to + * link with the USB mixers. + */ + snd_soc_dapm_widget_for_each_sink_path(w, p) { + if (q6usb_usb_mixer_enabled(p->sink)) { + pidx = q6usb_get_pcm_id_from_widget(w); + return pidx; + } + } + } + } + + return -1; +} + +static int q6usb_update_offload_route(struct snd_soc_component *component, int card, + int pcm, int direction, enum snd_soc_usb_kctl path, + long *route) +{ + struct q6usb_port_data *data = dev_get_drvdata(component->dev); + struct snd_soc_usb_device *sdev; + int ret = 0; + int idx = -1; + + mutex_lock(&data->mutex); + + if (list_empty(&data->devices) || + direction == SNDRV_PCM_STREAM_CAPTURE) { + ret = -ENODEV; + goto out; + } + + sdev = list_last_entry(&data->devices, struct snd_soc_usb_device, list); + + /* + * Will always look for last PCM device discovered/probed as the + * active offload index. + */ + if (card == sdev->card_idx && + pcm == sdev->ppcm_idx[sdev->num_playback - 1]) { + idx = path == SND_SOC_USB_KCTL_CARD_ROUTE ? + component->card->snd_card->number : + q6usb_get_pcm_id(component); + } + +out: + route[0] = idx; + mutex_unlock(&data->mutex); + + return ret; +} + +static int q6usb_alsa_connection_cb(struct snd_soc_usb *usb, + struct snd_soc_usb_device *sdev, bool connected) +{ + struct q6usb_port_data *data; + + if (!usb->component) + return -ENODEV; + + data = dev_get_drvdata(usb->component->dev); + + mutex_lock(&data->mutex); + if (connected) { + if (data->hs_jack) + snd_jack_report(data->hs_jack->jack, SND_JACK_USB); + + /* Selects the latest USB headset plugged in for offloading */ + list_add_tail(&sdev->list, &data->devices); + } else { + list_del(&sdev->list); + + if (data->hs_jack) + snd_jack_report(data->hs_jack->jack, 0); + } + mutex_unlock(&data->mutex); + + return 0; +} + +static void q6usb_component_disable_jack(struct q6usb_port_data *data) +{ + /* Offload jack has already been disabled */ + if (!data->hs_jack) + return; + + snd_jack_report(data->hs_jack->jack, 0); + data->hs_jack = NULL; +} + +static void q6usb_component_enable_jack(struct q6usb_port_data *data, + struct snd_soc_jack *jack) +{ + snd_jack_report(jack->jack, !list_empty(&data->devices) ? SND_JACK_USB : 0); + data->hs_jack = jack; +} + +static int q6usb_component_set_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack, void *priv) +{ + struct q6usb_port_data *data = dev_get_drvdata(component->dev); + + mutex_lock(&data->mutex); + if (jack) + q6usb_component_enable_jack(data, jack); + else + q6usb_component_disable_jack(data); + mutex_unlock(&data->mutex); + + return 0; +} + +static void q6usb_dai_aux_release(struct device *dev) {} + +static int q6usb_dai_add_aux_device(struct q6usb_port_data *data, + struct auxiliary_device *auxdev) +{ + int ret; + + auxdev->dev.parent = data->priv.dev; + auxdev->dev.release = q6usb_dai_aux_release; + auxdev->name = "qc-usb-audio-offload"; + + ret = auxiliary_device_init(auxdev); + if (ret) + return ret; + + ret = auxiliary_device_add(auxdev); + if (ret) + auxiliary_device_uninit(auxdev); + + return ret; +} + +static int q6usb_component_probe(struct snd_soc_component *component) +{ + struct q6usb_port_data *data = dev_get_drvdata(component->dev); + struct snd_soc_usb *usb; + int ret; + + /* Add the QC USB SND aux device */ + ret = q6usb_dai_add_aux_device(data, &data->uauxdev); + if (ret < 0) + return ret; + + usb = snd_soc_usb_allocate_port(component, &data->priv); + if (IS_ERR(usb)) + return -ENOMEM; + + usb->connection_status_cb = q6usb_alsa_connection_cb; + usb->update_offload_route_info = q6usb_update_offload_route; + + snd_soc_usb_add_port(usb); + data->usb = usb; + + return 0; +} + +static void q6usb_component_remove(struct snd_soc_component *component) +{ + struct q6usb_port_data *data = dev_get_drvdata(component->dev); + + snd_soc_usb_remove_port(data->usb); + auxiliary_device_delete(&data->uauxdev); + auxiliary_device_uninit(&data->uauxdev); + snd_soc_usb_free_port(data->usb); +} + +static const struct snd_soc_component_driver q6usb_dai_component = { + .probe = q6usb_component_probe, + .set_jack = q6usb_component_set_jack, + .remove = q6usb_component_remove, + .name = "q6usb-dai-component", + .dapm_widgets = q6usb_dai_widgets, + .num_dapm_widgets = ARRAY_SIZE(q6usb_dai_widgets), + .dapm_routes = q6usb_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(q6usb_dapm_routes), + .of_xlate_dai_name = q6usb_audio_ports_of_xlate_dai_name, +}; + +static int q6usb_dai_dev_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct q6usb_port_data *data; + struct device *dev = &pdev->dev; + struct of_phandle_args args; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = of_property_read_u16(node, "qcom,usb-audio-intr-idx", + &data->priv.intr_num); + if (ret) { + dev_err(&pdev->dev, "failed to read intr idx.\n"); + return ret; + } + + ret = of_parse_phandle_with_fixed_args(node, "iommus", 1, 0, &args); + if (!ret) + data->priv.sid = args.args[0] & Q6_USB_SID_MASK; + + ret = devm_mutex_init(dev, &data->mutex); + if (ret < 0) + return ret; + + data->priv.domain = iommu_get_domain_for_dev(&pdev->dev); + + data->priv.dev = dev; + INIT_LIST_HEAD(&data->devices); + dev_set_drvdata(dev, data); + + return devm_snd_soc_register_component(dev, &q6usb_dai_component, + q6usb_be_dais, ARRAY_SIZE(q6usb_be_dais)); +} + +static const struct of_device_id q6usb_dai_device_id[] = { + { .compatible = "qcom,q6usb" }, + {}, +}; +MODULE_DEVICE_TABLE(of, q6usb_dai_device_id); + +static struct platform_driver q6usb_dai_platform_driver = { + .driver = { + .name = "q6usb-dai", + .of_match_table = of_match_ptr(q6usb_dai_device_id), + }, + .probe = q6usb_dai_dev_probe, + /* + * Remove not required as resources are cleaned up as part of + * component removal. Others are device managed resources. + */ +}; +module_platform_driver(q6usb_dai_platform_driver); + +MODULE_DESCRIPTION("Q6 USB backend dai driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c index 9039107972e2..b70b2a5031df 100644 --- a/sound/soc/qcom/sm8250.c +++ b/sound/soc/qcom/sm8250.c @@ -13,6 +13,7 @@ #include <linux/input-event-codes.h> #include "qdsp6/q6afe.h" #include "common.h" +#include "usb_offload_utils.h" #include "sdw.h" #define DRIVER_NAME "sm8250" @@ -23,14 +24,34 @@ struct sm8250_snd_data { struct snd_soc_card *card; struct sdw_stream_runtime *sruntime[AFE_PORT_MAX]; struct snd_soc_jack jack; + struct snd_soc_jack usb_offload_jack; + bool usb_offload_jack_setup; bool jack_setup; }; static int sm8250_snd_init(struct snd_soc_pcm_runtime *rtd) { struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + int ret; + + if (cpu_dai->id == USB_RX) + ret = qcom_snd_usb_offload_jack_setup(rtd, &data->usb_offload_jack, + &data->usb_offload_jack_setup); + else + ret = qcom_snd_wcd_jack_setup(rtd, &data->jack, &data->jack_setup); + return ret; +} + +static void sm8250_snd_exit(struct snd_soc_pcm_runtime *rtd) +{ + struct sm8250_snd_data *data = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + + if (cpu_dai->id == USB_RX) + qcom_snd_usb_offload_jack_remove(rtd, + &data->usb_offload_jack_setup); - return qcom_snd_wcd_jack_setup(rtd, &data->jack, &data->jack_setup); } static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, @@ -148,6 +169,7 @@ static void sm8250_add_be_ops(struct snd_soc_card *card) for_each_card_prelinks(card, i, link) { if (link->no_pcm == 1) { link->init = sm8250_snd_init; + link->exit = sm8250_snd_exit; link->be_hw_params_fixup = sm8250_be_hw_params_fixup; link->ops = &sm8250_be_ops; } diff --git a/sound/soc/qcom/usb_offload_utils.c b/sound/soc/qcom/usb_offload_utils.c new file mode 100644 index 000000000000..0a24b278fcdf --- /dev/null +++ b/sound/soc/qcom/usb_offload_utils.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <dt-bindings/sound/qcom,q6afe.h> +#include <linux/module.h> +#include <sound/jack.h> +#include <sound/soc-usb.h> + +#include "usb_offload_utils.h" + +int qcom_snd_usb_offload_jack_setup(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_jack *jack, bool *jack_setup) +{ + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); + int ret = 0; + + if (cpu_dai->id != USB_RX) + return -EINVAL; + + if (!*jack_setup) { + ret = snd_soc_usb_setup_offload_jack(codec_dai->component, jack); + if (ret) + return ret; + } + + *jack_setup = true; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_snd_usb_offload_jack_setup); + +int qcom_snd_usb_offload_jack_remove(struct snd_soc_pcm_runtime *rtd, + bool *jack_setup) +{ + struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); + struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); + int ret = 0; + + if (cpu_dai->id != USB_RX) + return -EINVAL; + + if (*jack_setup) { + ret = snd_soc_component_set_jack(codec_dai->component, NULL, NULL); + if (ret) + return ret; + } + + *jack_setup = false; + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_snd_usb_offload_jack_remove); +MODULE_DESCRIPTION("ASoC Q6 USB offload controls"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/qcom/usb_offload_utils.h b/sound/soc/qcom/usb_offload_utils.h new file mode 100644 index 000000000000..c3f411f565b0 --- /dev/null +++ b/sound/soc/qcom/usb_offload_utils.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __QCOM_SND_USB_OFFLOAD_UTILS_H__ +#define __QCOM_SND_USB_OFFLOAD_UTILS_H__ + +#include <sound/soc.h> + +#if IS_ENABLED(CONFIG_SND_SOC_QCOM_OFFLOAD_UTILS) +int qcom_snd_usb_offload_jack_setup(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_jack *jack, bool *jack_setup); + +int qcom_snd_usb_offload_jack_remove(struct snd_soc_pcm_runtime *rtd, + bool *jack_setup); +#else +static inline int qcom_snd_usb_offload_jack_setup(struct snd_soc_pcm_runtime *rtd, + struct snd_soc_jack *jack, + bool *jack_setup) +{ + return -ENODEV; +} + +static inline int qcom_snd_usb_offload_jack_remove(struct snd_soc_pcm_runtime *rtd, + bool *jack_setup) +{ + return -ENODEV; +} +#endif /* IS_ENABLED(CONFIG_SND_SOC_QCOM_OFFLOAD_UTILS) */ +#endif /* __QCOM_SND_USB_OFFLOAD_UTILS_H__ */ diff --git a/sound/soc/renesas/rz-ssi.c b/sound/soc/renesas/rz-ssi.c index 3a0af4ca7ab6..0f7458a43901 100644 --- a/sound/soc/renesas/rz-ssi.c +++ b/sound/soc/renesas/rz-ssi.c @@ -1244,7 +1244,7 @@ static int rz_ssi_runtime_resume(struct device *dev) static const struct dev_pm_ops rz_ssi_pm_ops = { RUNTIME_PM_OPS(rz_ssi_runtime_suspend, rz_ssi_runtime_resume, NULL) - SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct platform_driver rz_ssi_driver = { diff --git a/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c b/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c index 246e5c2e0af5..c7e55f443351 100644 --- a/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c +++ b/sound/soc/sdw_utils/soc_sdw_bridge_cs35l56.c @@ -60,6 +60,10 @@ static int asoc_sdw_bridge_cs35l56_asp_init(struct snd_soc_pcm_runtime *rtd) /* 4 x 16-bit sample slots and FSYNC=48000, BCLK=3.072 MHz */ for_each_rtd_codec_dais(rtd, i, codec_dai) { + ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix); + if (ret) + return ret; + ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_mask, rx_mask, 4, 16); if (ret < 0) return ret; diff --git a/sound/soc/sdw_utils/soc_sdw_cs42l43.c b/sound/soc/sdw_utils/soc_sdw_cs42l43.c index 668c9d28a1c1..b415d45d520d 100644 --- a/sound/soc/sdw_utils/soc_sdw_cs42l43.c +++ b/sound/soc/sdw_utils/soc_sdw_cs42l43.c @@ -20,6 +20,8 @@ #include <sound/soc-dapm.h> #include <sound/soc_sdw_utils.h> +#define CS42L43_SPK_VOLUME_0DB 128 /* 0dB Max */ + static const struct snd_soc_dapm_route cs42l43_hs_map[] = { { "Headphone", NULL, "cs42l43 AMP3_OUT" }, { "Headphone", NULL, "cs42l43 AMP4_OUT" }, @@ -117,6 +119,14 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so return -ENOMEM; } + ret = snd_soc_limit_volume(card, "cs42l43 Speaker Digital Volume", + CS42L43_SPK_VOLUME_0DB); + if (ret) + dev_err(card->dev, "cs42l43 speaker volume limit failed: %d\n", ret); + else + dev_info(card->dev, "Setting CS42L43 Speaker volume limit to %d\n", + CS42L43_SPK_VOLUME_0DB); + ret = snd_soc_dapm_add_routes(&card->dapm, cs42l43_spk_map, ARRAY_SIZE(cs42l43_spk_map)); if (ret) diff --git a/sound/soc/sdw_utils/soc_sdw_cs_amp.c b/sound/soc/sdw_utils/soc_sdw_cs_amp.c index 4b6181cf2971..35b550bcd4de 100644 --- a/sound/soc/sdw_utils/soc_sdw_cs_amp.c +++ b/sound/soc/sdw_utils/soc_sdw_cs_amp.c @@ -16,6 +16,25 @@ #define CODEC_NAME_SIZE 8 #define CS_AMP_CHANNELS_PER_AMP 4 +#define CS35L56_SPK_VOLUME_0DB 400 /* 0dB Max */ + +int asoc_sdw_cs35l56_volume_limit(struct snd_soc_card *card, const char *name_prefix) +{ + char *volume_ctl_name; + int ret; + + volume_ctl_name = kasprintf(GFP_KERNEL, "%s Speaker Volume", name_prefix); + if (!volume_ctl_name) + return -ENOMEM; + + ret = snd_soc_limit_volume(card, volume_ctl_name, CS35L56_SPK_VOLUME_0DB); + if (ret) + dev_err(card->dev, "%s limit set failed: %d\n", volume_ctl_name, ret); + + kfree(volume_ctl_name); + return ret; +} +EXPORT_SYMBOL_NS(asoc_sdw_cs35l56_volume_limit, "SND_SOC_SDW_UTILS"); int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai) { @@ -40,6 +59,11 @@ int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai snprintf(widget_name, sizeof(widget_name), "%s SPK", codec_dai->component->name_prefix); + + ret = asoc_sdw_cs35l56_volume_limit(card, codec_dai->component->name_prefix); + if (ret) + return ret; + ret = snd_soc_dapm_add_routes(&card->dapm, &route, 1); if (ret) return ret; diff --git a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c index 46d917a99c51..97be110a59b6 100644 --- a/sound/soc/sdw_utils/soc_sdw_rt_dmic.c +++ b/sound/soc/sdw_utils/soc_sdw_rt_dmic.c @@ -29,6 +29,8 @@ int asoc_sdw_rt_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_da mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "rt715-sdca"); else mic_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s", component->name_prefix); + if (!mic_name) + return -ENOMEM; card->components = devm_kasprintf(card->dev, GFP_KERNEL, "%s mic:%s", card->components, diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 4308a6cbb2e6..43835197d1fe 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1584,10 +1584,13 @@ int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream, /* * Filter for systems with 'component_chaining' enabled. * This helps to avoid unnecessary re-configuration of an - * already active BE on such systems. + * already active BE on such systems and ensures the BE DAI + * widget is powered ON after hw_params() BE DAI callback. */ if (fe->card->component_chaining && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) continue; diff --git a/sound/soc/soc-usb.c b/sound/soc/soc-usb.c new file mode 100644 index 000000000000..26baa66d29a8 --- /dev/null +++ b/sound/soc/soc-usb.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/of.h> +#include <linux/usb.h> + +#include <sound/jack.h> +#include <sound/soc-usb.h> + +#include "../usb/card.h" + +static DEFINE_MUTEX(ctx_mutex); +static LIST_HEAD(usb_ctx_list); + +static struct device_node *snd_soc_find_phandle(struct device *dev) +{ + struct device_node *node; + + node = of_parse_phandle(dev->of_node, "usb-soc-be", 0); + if (!node) + return ERR_PTR(-ENODEV); + + return node; +} + +static struct snd_soc_usb *snd_soc_usb_ctx_lookup(struct device_node *node) +{ + struct snd_soc_usb *ctx; + + if (!node) + return NULL; + + list_for_each_entry(ctx, &usb_ctx_list, list) { + if (ctx->component->dev->of_node == node) + return ctx; + } + + return NULL; +} + +static struct snd_soc_usb *snd_soc_find_usb_ctx(struct device *dev) +{ + struct snd_soc_usb *ctx; + struct device_node *node; + + node = snd_soc_find_phandle(dev); + if (!IS_ERR(node)) { + ctx = snd_soc_usb_ctx_lookup(node); + of_node_put(node); + } else { + ctx = snd_soc_usb_ctx_lookup(dev->of_node); + } + + return ctx ? ctx : NULL; +} + +/* SOC USB sound kcontrols */ +/** + * snd_soc_usb_setup_offload_jack() - Create USB offloading jack + * @component: USB DPCM backend DAI component + * @jack: jack structure to create + * + * Creates a jack device for notifying userspace of the availability + * of an offload capable device. + * + * Returns 0 on success, negative on error. + * + */ +int snd_soc_usb_setup_offload_jack(struct snd_soc_component *component, + struct snd_soc_jack *jack) +{ + int ret; + + ret = snd_soc_card_jack_new(component->card, "USB Offload Jack", + SND_JACK_USB, jack); + if (ret < 0) { + dev_err(component->card->dev, "Unable to add USB offload jack: %d\n", + ret); + return ret; + } + + ret = snd_soc_component_set_jack(component, jack, NULL); + if (ret) { + dev_err(component->card->dev, "Failed to set jack: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_setup_offload_jack); + +/** + * snd_soc_usb_update_offload_route - Find active USB offload path + * @dev: USB device to get offload status + * @card: USB card index + * @pcm: USB PCM device index + * @direction: playback or capture direction + * @path: pcm or card index + * @route: pointer to route output array + * + * Fetch the current status for the USB SND card and PCM device indexes + * specified. The "route" argument should be an array of integers being + * used for a kcontrol output. The first element should have the selected + * card index, and the second element should have the selected pcm device + * index. + */ +int snd_soc_usb_update_offload_route(struct device *dev, int card, int pcm, + int direction, enum snd_soc_usb_kctl path, + long *route) +{ + struct snd_soc_usb *ctx; + int ret = -ENODEV; + + mutex_lock(&ctx_mutex); + ctx = snd_soc_find_usb_ctx(dev); + if (!ctx) + goto exit; + + if (ctx->update_offload_route_info) + ret = ctx->update_offload_route_info(ctx->component, card, pcm, + direction, path, route); +exit: + mutex_unlock(&ctx_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_update_offload_route); + +/** + * snd_soc_usb_find_priv_data() - Retrieve private data stored + * @usbdev: device reference + * + * Fetch the private data stored in the USB SND SoC structure. + * + */ +void *snd_soc_usb_find_priv_data(struct device *usbdev) +{ + struct snd_soc_usb *ctx; + + mutex_lock(&ctx_mutex); + ctx = snd_soc_find_usb_ctx(usbdev); + mutex_unlock(&ctx_mutex); + + return ctx ? ctx->priv_data : NULL; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_find_priv_data); + +/** + * snd_soc_usb_find_supported_format() - Check if audio format is supported + * @card_idx: USB sound chip array index + * @params: PCM parameters + * @direction: capture or playback + * + * Ensure that a requested audio profile from the ASoC side is able to be + * supported by the USB device. + * + * Return 0 on success, negative on error. + * + */ +int snd_soc_usb_find_supported_format(int card_idx, + struct snd_pcm_hw_params *params, + int direction) +{ + struct snd_usb_stream *as; + + as = snd_usb_find_suppported_substream(card_idx, params, direction); + if (!as) + return -EOPNOTSUPP; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_find_supported_format); + +/** + * snd_soc_usb_allocate_port() - allocate a SoC USB port for offloading support + * @component: USB DPCM backend DAI component + * @data: private data + * + * Allocate and initialize a SoC USB port. The SoC USB port is used to communicate + * different USB audio devices attached, in order to start audio offloading handled + * by an ASoC entity. USB device plug in/out events are signaled with a + * notification, but don't directly impact the memory allocated for the SoC USB + * port. + * + */ +struct snd_soc_usb *snd_soc_usb_allocate_port(struct snd_soc_component *component, + void *data) +{ + struct snd_soc_usb *usb; + + usb = kzalloc(sizeof(*usb), GFP_KERNEL); + if (!usb) + return ERR_PTR(-ENOMEM); + + usb->component = component; + usb->priv_data = data; + + return usb; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_allocate_port); + +/** + * snd_soc_usb_free_port() - free a SoC USB port used for offloading support + * @usb: allocated SoC USB port + * + * Free and remove the SoC USB port from the available list of ports. This will + * ensure that the communication between USB SND and ASoC is halted. + * + */ +void snd_soc_usb_free_port(struct snd_soc_usb *usb) +{ + snd_soc_usb_remove_port(usb); + kfree(usb); +} +EXPORT_SYMBOL_GPL(snd_soc_usb_free_port); + +/** + * snd_soc_usb_add_port() - Add a USB backend port + * @usb: soc usb port to add + * + * Register a USB backend DAI link to the USB SoC framework. Memory is allocated + * as part of the USB backend DAI link. + * + */ +void snd_soc_usb_add_port(struct snd_soc_usb *usb) +{ + mutex_lock(&ctx_mutex); + list_add_tail(&usb->list, &usb_ctx_list); + mutex_unlock(&ctx_mutex); + + snd_usb_rediscover_devices(); +} +EXPORT_SYMBOL_GPL(snd_soc_usb_add_port); + +/** + * snd_soc_usb_remove_port() - Remove a USB backend port + * @usb: soc usb port to remove + * + * Remove a USB backend DAI link from USB SoC. Memory is freed when USB backend + * DAI is removed, or when snd_soc_usb_free_port() is called. + * + */ +void snd_soc_usb_remove_port(struct snd_soc_usb *usb) +{ + struct snd_soc_usb *ctx, *tmp; + + mutex_lock(&ctx_mutex); + list_for_each_entry_safe(ctx, tmp, &usb_ctx_list, list) { + if (ctx == usb) { + list_del(&ctx->list); + break; + } + } + mutex_unlock(&ctx_mutex); +} +EXPORT_SYMBOL_GPL(snd_soc_usb_remove_port); + +/** + * snd_soc_usb_connect() - Notification of USB device connection + * @usbdev: USB bus device + * @sdev: USB SND device to add + * + * Notify of a new USB SND device connection. The sdev->card_idx can be used to + * handle how the DPCM backend selects, which device to enable USB offloading + * on. + * + */ +int snd_soc_usb_connect(struct device *usbdev, struct snd_soc_usb_device *sdev) +{ + struct snd_soc_usb *ctx; + + if (!usbdev) + return -ENODEV; + + mutex_lock(&ctx_mutex); + ctx = snd_soc_find_usb_ctx(usbdev); + if (!ctx) + goto exit; + + if (ctx->connection_status_cb) + ctx->connection_status_cb(ctx, sdev, true); + +exit: + mutex_unlock(&ctx_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_connect); + +/** + * snd_soc_usb_disconnect() - Notification of USB device disconnection + * @usbdev: USB bus device + * @sdev: USB SND device to remove + * + * Notify of a new USB SND device disconnection to the USB backend. + * + */ +int snd_soc_usb_disconnect(struct device *usbdev, struct snd_soc_usb_device *sdev) +{ + struct snd_soc_usb *ctx; + + if (!usbdev) + return -ENODEV; + + mutex_lock(&ctx_mutex); + ctx = snd_soc_find_usb_ctx(usbdev); + if (!ctx) + goto exit; + + if (ctx->connection_status_cb) + ctx->connection_status_cb(ctx, sdev, false); + +exit: + mutex_unlock(&ctx_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_usb_disconnect); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SoC USB driver for offloading"); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index e8c1abf1ae0a..bf5299ba11c3 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -409,11 +409,11 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai, unsigned int rate) { struct platform_device *pdev = sai->pdev; - unsigned int sai_ck_rate, sai_ck_max_rate, sai_curr_rate, sai_new_rate; + unsigned int sai_ck_rate, sai_ck_max_rate, sai_ck_min_rate, sai_curr_rate, sai_new_rate; int div, ret; /* - * Set maximum expected kernel clock frequency + * Set minimum and maximum expected kernel clock frequency * - mclk on or spdif: * f_sai_ck = MCKDIV * mclk-fs * fs * Here typical 256 ratio is assumed for mclk-fs @@ -423,13 +423,16 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai, * Set constraint MCKDIV * FRL <= 256, to ensure MCKDIV is in available range * f_sai_ck = sai_ck_max_rate * pow_of_two(FRL) / 256 */ + sai_ck_min_rate = rate * 256; if (!(rate % SAI_RATE_11K)) sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_11K * 256; else sai_ck_max_rate = SAI_MAX_SAMPLE_RATE_8K * 256; - if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai)) + if (!sai->sai_mclk && !STM_SAI_PROTOCOL_IS_SPDIF(sai)) { + sai_ck_min_rate = rate * sai->fs_length; sai_ck_max_rate /= DIV_ROUND_CLOSEST(256, roundup_pow_of_two(sai->fs_length)); + } /* * Request exclusivity, as the clock is shared by SAI sub-blocks and by @@ -444,7 +447,10 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai, * return immediately. */ sai_curr_rate = clk_get_rate(sai->sai_ck); - if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate)) + dev_dbg(&pdev->dev, "kernel clock rate: min [%u], max [%u], current [%u]", + sai_ck_min_rate, sai_ck_max_rate, sai_curr_rate); + if (stm32_sai_rate_accurate(sai_ck_max_rate, sai_curr_rate) && + sai_curr_rate >= sai_ck_min_rate) return 0; /* @@ -472,7 +478,7 @@ static int stm32_sai_set_parent_rate(struct stm32_sai_sub_data *sai, /* Try a lower frequency */ div++; sai_ck_rate = sai_ck_max_rate / div; - } while (sai_ck_rate > rate); + } while (sai_ck_rate >= sai_ck_min_rate); /* No accurate rate found */ dev_err(&pdev->dev, "Failed to find an accurate rate"); diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig index 4a9569a3a39a..6daa551738da 100644 --- a/sound/usb/Kconfig +++ b/sound/usb/Kconfig @@ -176,6 +176,20 @@ config SND_BCD2000 To compile this driver as a module, choose M here: the module will be called snd-bcd2000. +config SND_USB_AUDIO_QMI + tristate "Qualcomm Audio Offload driver" + depends on QCOM_QMI_HELPERS && SND_USB_AUDIO && USB_XHCI_SIDEBAND && SND_SOC_USB + help + Say Y here to enable the Qualcomm USB audio offloading feature. + + This module sets up the required QMI stream enable/disable + responses to requests generated by the audio DSP. It passes the + USB transfer resource references, so that the audio DSP can issue + USB transfers to the host controller. + + To compile this driver as a module, choose M here: the module + will be called snd-usb-audio-qmi. + source "sound/usb/line6/Kconfig" endif # SND_USB diff --git a/sound/usb/Makefile b/sound/usb/Makefile index 30bd5348477b..e62794a87e73 100644 --- a/sound/usb/Makefile +++ b/sound/usb/Makefile @@ -35,5 +35,5 @@ obj-$(CONFIG_SND_USB_UA101) += snd-usbmidi-lib.o obj-$(CONFIG_SND_USB_USX2Y) += snd-usbmidi-lib.o obj-$(CONFIG_SND_USB_US122L) += snd-usbmidi-lib.o -obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/ +obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/ qcom/ obj-$(CONFIG_SND_USB_LINE6) += line6/ diff --git a/sound/usb/card.c b/sound/usb/card.c index 9c411b82a218..9fb8726a6c93 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -118,6 +118,95 @@ MODULE_PARM_DESC(skip_validation, "Skip unit descriptor validation (default: no) static DEFINE_MUTEX(register_mutex); static struct snd_usb_audio *usb_chip[SNDRV_CARDS]; static struct usb_driver usb_audio_driver; +static struct snd_usb_platform_ops *platform_ops; + +/* + * Register platform specific operations that will be notified on events + * which occur in USB SND. The platform driver can utilize this path to + * enable features, such as USB audio offloading, which allows for audio data + * to be queued by an audio DSP. + * + * Only one set of platform operations can be registered to USB SND. The + * platform register operation is protected by the register_mutex. + */ +int snd_usb_register_platform_ops(struct snd_usb_platform_ops *ops) +{ + guard(mutex)(®ister_mutex); + if (platform_ops) + return -EEXIST; + + platform_ops = ops; + return 0; +} +EXPORT_SYMBOL_GPL(snd_usb_register_platform_ops); + +/* + * Unregisters the current set of platform operations. This allows for + * a new set to be registered if required. + * + * The platform unregister operation is protected by the register_mutex. + */ +int snd_usb_unregister_platform_ops(void) +{ + guard(mutex)(®ister_mutex); + platform_ops = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_usb_unregister_platform_ops); + +/* + * in case the platform driver was not ready at the time of USB SND + * device connect, expose an API to discover all connected USB devices + * so it can populate any dependent resources/structures. + */ +void snd_usb_rediscover_devices(void) +{ + int i; + + guard(mutex)(®ister_mutex); + + if (!platform_ops || !platform_ops->connect_cb) + return; + + for (i = 0; i < SNDRV_CARDS; i++) { + if (usb_chip[i]) + platform_ops->connect_cb(usb_chip[i]); + } +} +EXPORT_SYMBOL_GPL(snd_usb_rediscover_devices); + +/* + * Checks to see if requested audio profile, i.e sample rate, # of + * channels, etc... is supported by the substream associated to the + * USB audio device. + */ +struct snd_usb_stream * +snd_usb_find_suppported_substream(int card_idx, struct snd_pcm_hw_params *params, + int direction) +{ + struct snd_usb_audio *chip; + struct snd_usb_substream *subs; + struct snd_usb_stream *as; + + /* + * Register mutex is held when populating and clearing usb_chip + * array. + */ + guard(mutex)(®ister_mutex); + chip = usb_chip[card_idx]; + + if (chip && enable[card_idx]) { + list_for_each_entry(as, &chip->pcm_list, list) { + subs = &as->substream[direction]; + if (snd_usb_find_substream_format(subs, params)) + return as; + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(snd_usb_find_suppported_substream); /* * disconnect streams @@ -922,7 +1011,11 @@ static int usb_audio_probe(struct usb_interface *intf, chip->num_interfaces++; usb_set_intfdata(intf, chip); atomic_dec(&chip->active); + + if (platform_ops && platform_ops->connect_cb) + platform_ops->connect_cb(chip); mutex_unlock(®ister_mutex); + return 0; __error: @@ -959,6 +1052,9 @@ static void usb_audio_disconnect(struct usb_interface *intf) card = chip->card; mutex_lock(®ister_mutex); + if (platform_ops && platform_ops->disconnect_cb) + platform_ops->disconnect_cb(chip); + if (atomic_inc_return(&chip->shutdown) == 1) { struct snd_usb_stream *as; struct snd_usb_endpoint *ep; @@ -1030,6 +1126,7 @@ int snd_usb_lock_shutdown(struct snd_usb_audio *chip) wake_up(&chip->shutdown_wait); return err; } +EXPORT_SYMBOL_GPL(snd_usb_lock_shutdown); /* autosuspend and unlock the shutdown */ void snd_usb_unlock_shutdown(struct snd_usb_audio *chip) @@ -1038,6 +1135,7 @@ void snd_usb_unlock_shutdown(struct snd_usb_audio *chip) if (atomic_dec_and_test(&chip->usage_count)) wake_up(&chip->shutdown_wait); } +EXPORT_SYMBOL_GPL(snd_usb_unlock_shutdown); int snd_usb_autoresume(struct snd_usb_audio *chip) { @@ -1060,6 +1158,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) } return 0; } +EXPORT_SYMBOL_GPL(snd_usb_autoresume); void snd_usb_autosuspend(struct snd_usb_audio *chip) { @@ -1073,6 +1172,7 @@ void snd_usb_autosuspend(struct snd_usb_audio *chip) for (i = 0; i < chip->num_interfaces; i++) usb_autopm_put_interface(chip->intf[i]); } +EXPORT_SYMBOL_GPL(snd_usb_autosuspend); static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) { @@ -1102,6 +1202,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) chip->system_suspend = chip->num_suspended_intf; } + if (platform_ops && platform_ops->suspend_cb) + platform_ops->suspend_cb(intf, message); + return 0; } @@ -1142,6 +1245,9 @@ static int usb_audio_resume(struct usb_interface *intf) snd_usb_midi_v2_resume_all(chip); + if (platform_ops && platform_ops->resume_cb) + platform_ops->resume_cb(intf); + out: if (chip->num_suspended_intf == chip->system_suspend) { snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); diff --git a/sound/usb/card.h b/sound/usb/card.h index 6ec95b2edf86..94404c24d240 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -15,6 +15,7 @@ struct audioformat { unsigned int channels; /* # channels */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int fmt_bits; /* number of significant bits */ + unsigned int fmt_sz; /* overall audio sub frame/slot size */ unsigned int frame_size; /* samples per frame for non-audio */ unsigned char iface; /* interface number */ unsigned char altsetting; /* corresponding alternate setting */ @@ -164,6 +165,7 @@ struct snd_usb_substream { unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ + unsigned int opened:1; /* pcm device opened */ unsigned int running: 1; /* running status */ unsigned int period_elapsed_pending; /* delay period handling */ @@ -207,4 +209,19 @@ struct snd_usb_stream { struct list_head list; }; +struct snd_usb_platform_ops { + void (*connect_cb)(struct snd_usb_audio *chip); + void (*disconnect_cb)(struct snd_usb_audio *chip); + void (*suspend_cb)(struct usb_interface *intf, pm_message_t message); + void (*resume_cb)(struct usb_interface *intf); +}; + +struct snd_usb_stream * +snd_usb_find_suppported_substream(int card_idx, struct snd_pcm_hw_params *params, + int direction); + +int snd_usb_register_platform_ops(struct snd_usb_platform_ops *ops); +int snd_usb_unregister_platform_ops(void); + +void snd_usb_rediscover_devices(void); #endif /* __USBAUDIO_CARD_H */ diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index a29f28eb7d0c..7b01e7b4e335 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -926,6 +926,8 @@ static int endpoint_set_interface(struct snd_usb_audio *chip, { int altset = set ? ep->altsetting : 0; int err; + int retries = 0; + const int max_retries = 5; if (ep->iface_ref->altset == altset) return 0; @@ -935,8 +937,13 @@ static int endpoint_set_interface(struct snd_usb_audio *chip, usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n", ep->iface, altset, ep->ep_num); +retry: err = usb_set_interface(chip->dev, ep->iface, altset); if (err < 0) { + if (err == -EPROTO && ++retries <= max_retries) { + msleep(5 * (1 << (retries - 1))); + goto retry; + } usb_audio_err_ratelimited( chip, "%d:%d: usb_set_interface failed (%d)\n", ep->iface, altset, err); @@ -1524,6 +1531,7 @@ unlock: mutex_unlock(&chip->mutex); return err; } +EXPORT_SYMBOL_GPL(snd_usb_endpoint_prepare); /* get the current rate set to the given clock by any endpoint */ int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock) diff --git a/sound/usb/format.c b/sound/usb/format.c index 9d32b21a5fbb..8cd54f7bf33a 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -85,6 +85,7 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, } fp->fmt_bits = sample_width; + fp->fmt_sz = sample_bytes; if ((pcm_formats == 0) && (format == 0 || format == BIT(UAC_FORMAT_TYPE_I_UNDEFINED))) { @@ -260,7 +261,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof } /* Jabra Evolve 65 headset */ - if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) { + if (chip->usb_id == USB_ID(0x0b0e, 0x030b) || + chip->usb_id == USB_ID(0x0b0e, 0x030c)) { /* only 48kHz for playback while keeping 16kHz for capture */ if (fp->nr_rates != 1) return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); diff --git a/sound/usb/helper.c b/sound/usb/helper.c index 72b671fb2c84..497d2b27fb59 100644 --- a/sound/usb/helper.c +++ b/sound/usb/helper.c @@ -62,6 +62,7 @@ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype } return NULL; } +EXPORT_SYMBOL_GPL(snd_usb_find_csint_desc); /* * Wrapper for usb_control_msg(). diff --git a/sound/usb/midi.c b/sound/usb/midi.c index dcdd7e9e1ae9..cfed000f243a 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1885,10 +1885,18 @@ static void snd_usbmidi_init_substream(struct snd_usb_midi *umidi, } port_info = find_port_info(umidi, number); - name_format = port_info ? port_info->name : - (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); - snprintf(substream->name, sizeof(substream->name), - name_format, umidi->card->shortname, jack_name, number + 1); + if (port_info || jack_name == default_jack_name || + strncmp(umidi->card->shortname, jack_name, strlen(umidi->card->shortname)) != 0) { + name_format = port_info ? port_info->name : + (jack_name != default_jack_name ? "%s %s" : "%s %s %d"); + snprintf(substream->name, sizeof(substream->name), + name_format, umidi->card->shortname, jack_name, number + 1); + } else { + /* The manufacturer included the iProduct name in the jack + * name, do not use both + */ + strscpy(substream->name, jack_name); + } *rsubstream = substream; } diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 08bf535ed163..b24ee38fad72 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -148,6 +148,16 @@ find_format(struct list_head *fmt_list_head, snd_pcm_format_t format, return found; } +const struct audioformat * +snd_usb_find_format(struct list_head *fmt_list_head, snd_pcm_format_t format, + unsigned int rate, unsigned int channels, bool strict_match, + struct snd_usb_substream *subs) +{ + return find_format(fmt_list_head, format, rate, channels, strict_match, + subs); +} +EXPORT_SYMBOL_GPL(snd_usb_find_format); + static const struct audioformat * find_substream_format(struct snd_usb_substream *subs, const struct snd_pcm_hw_params *params) @@ -157,6 +167,14 @@ find_substream_format(struct snd_usb_substream *subs, true, subs); } +const struct audioformat * +snd_usb_find_substream_format(struct snd_usb_substream *subs, + const struct snd_pcm_hw_params *params) +{ + return find_substream_format(subs, params); +} +EXPORT_SYMBOL_GPL(snd_usb_find_substream_format); + bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs) { const struct audioformat *fp; @@ -461,20 +479,9 @@ static void close_endpoints(struct snd_usb_audio *chip, } } -/* - * hw_params callback - * - * allocate a buffer and set the given audio format. - * - * so far we use a physically linear buffer although packetize transfer - * doesn't need a continuous area. - * if sg buffer is supported on the later version of alsa, we'll follow - * that. - */ -static int snd_usb_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *hw_params) +int snd_usb_hw_params(struct snd_usb_substream *subs, + struct snd_pcm_hw_params *hw_params) { - struct snd_usb_substream *subs = substream->runtime->private_data; struct snd_usb_audio *chip = subs->stream->chip; const struct audioformat *fmt; const struct audioformat *sync_fmt; @@ -499,7 +506,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, if (fmt->implicit_fb) { sync_fmt = snd_usb_find_implicit_fb_sync_format(chip, fmt, hw_params, - !substream->stream, + !subs->direction, &sync_fixed_rate); if (!sync_fmt) { usb_audio_dbg(chip, @@ -579,15 +586,28 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, return ret; } +EXPORT_SYMBOL_GPL(snd_usb_hw_params); /* - * hw_free callback + * hw_params callback * - * reset the audio format and release the buffer + * allocate a buffer and set the given audio format. + * + * so far we use a physically linear buffer although packetize transfer + * doesn't need a continuous area. + * if sg buffer is supported on the later version of alsa, we'll follow + * that. */ -static int snd_usb_hw_free(struct snd_pcm_substream *substream) +static int snd_usb_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) { struct snd_usb_substream *subs = substream->runtime->private_data; + + return snd_usb_hw_params(subs, hw_params); +} + +int snd_usb_hw_free(struct snd_usb_substream *subs) +{ struct snd_usb_audio *chip = subs->stream->chip; snd_media_stop_pipeline(subs); @@ -603,6 +623,19 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) return 0; } +EXPORT_SYMBOL_GPL(snd_usb_hw_free); + +/* + * hw_free callback + * + * reset the audio format and release the buffer + */ +static int snd_usb_pcm_hw_free(struct snd_pcm_substream *substream) +{ + struct snd_usb_substream *subs = substream->runtime->private_data; + + return snd_usb_hw_free(subs); +} /* free-wheeling mode? (e.g. dmix) */ static int in_free_wheeling_mode(struct snd_pcm_runtime *runtime) @@ -1208,8 +1241,17 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream) struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = &as->substream[direction]; + struct snd_usb_audio *chip = subs->stream->chip; int ret; + mutex_lock(&chip->mutex); + if (subs->opened) { + mutex_unlock(&chip->mutex); + return -EBUSY; + } + subs->opened = 1; + mutex_unlock(&chip->mutex); + runtime->hw = snd_usb_hardware; /* need an explicit sync to catch applptr update in low-latency mode */ if (direction == SNDRV_PCM_STREAM_PLAYBACK && @@ -1226,13 +1268,23 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream) ret = setup_hw_info(runtime, subs); if (ret < 0) - return ret; + goto err_open; ret = snd_usb_autoresume(subs->stream->chip); if (ret < 0) - return ret; + goto err_open; ret = snd_media_stream_init(subs, as->pcm, direction); if (ret < 0) - snd_usb_autosuspend(subs->stream->chip); + goto err_resume; + + return 0; + +err_resume: + snd_usb_autosuspend(subs->stream->chip); +err_open: + mutex_lock(&chip->mutex); + subs->opened = 0; + mutex_unlock(&chip->mutex); + return ret; } @@ -1241,6 +1293,7 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream) int direction = substream->stream; struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_usb_substream *subs = &as->substream[direction]; + struct snd_usb_audio *chip = subs->stream->chip; int ret; snd_media_stop_pipeline(subs); @@ -1254,6 +1307,9 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream) subs->pcm_substream = NULL; snd_usb_autosuspend(subs->stream->chip); + mutex_lock(&chip->mutex); + subs->opened = 0; + mutex_unlock(&chip->mutex); return 0; } @@ -1746,8 +1802,8 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream static const struct snd_pcm_ops snd_usb_playback_ops = { .open = snd_usb_pcm_open, .close = snd_usb_pcm_close, - .hw_params = snd_usb_hw_params, - .hw_free = snd_usb_hw_free, + .hw_params = snd_usb_pcm_hw_params, + .hw_free = snd_usb_pcm_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_playback_trigger, .sync_stop = snd_usb_pcm_sync_stop, @@ -1758,8 +1814,8 @@ static const struct snd_pcm_ops snd_usb_playback_ops = { static const struct snd_pcm_ops snd_usb_capture_ops = { .open = snd_usb_pcm_open, .close = snd_usb_pcm_close, - .hw_params = snd_usb_hw_params, - .hw_free = snd_usb_hw_free, + .hw_params = snd_usb_pcm_hw_params, + .hw_free = snd_usb_pcm_hw_free, .prepare = snd_usb_pcm_prepare, .trigger = snd_usb_substream_capture_trigger, .sync_stop = snd_usb_pcm_sync_stop, diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h index 388fe2ba346d..c096021adb2b 100644 --- a/sound/usb/pcm.h +++ b/sound/usb/pcm.h @@ -15,4 +15,15 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs); int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip, struct audioformat *fmt); +const struct audioformat * +snd_usb_find_format(struct list_head *fmt_list_head, snd_pcm_format_t format, + unsigned int rate, unsigned int channels, bool strict_match, + struct snd_usb_substream *subs); +const struct audioformat * +snd_usb_find_substream_format(struct snd_usb_substream *subs, + const struct snd_pcm_hw_params *params); + +int snd_usb_hw_params(struct snd_usb_substream *subs, + struct snd_pcm_hw_params *hw_params); +int snd_usb_hw_free(struct snd_usb_substream *subs); #endif /* __USBAUDIO_PCM_H */ diff --git a/sound/usb/qcom/Makefile b/sound/usb/qcom/Makefile new file mode 100644 index 000000000000..6567727b66f0 --- /dev/null +++ b/sound/usb/qcom/Makefile @@ -0,0 +1,4 @@ +snd-usb-audio-qmi-y := usb_audio_qmi_v01.o qc_audio_offload.o +snd-usb-audio-qmi-y += mixer_usb_offload.o +obj-$(CONFIG_SND_USB_AUDIO_QMI) += snd-usb-audio-qmi.o + diff --git a/sound/usb/qcom/mixer_usb_offload.c b/sound/usb/qcom/mixer_usb_offload.c new file mode 100644 index 000000000000..2adeb64f4d33 --- /dev/null +++ b/sound/usb/qcom/mixer_usb_offload.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/usb.h> + +#include <sound/core.h> +#include <sound/control.h> +#include <sound/soc-usb.h> + +#include "../usbaudio.h" +#include "../card.h" +#include "../helper.h" +#include "../mixer.h" + +#include "mixer_usb_offload.h" + +#define PCM_IDX(n) ((n) & 0xffff) +#define CARD_IDX(n) ((n) >> 16) + +static int +snd_usb_offload_card_route_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct device *sysdev = snd_kcontrol_chip(kcontrol); + int ret; + + ret = snd_soc_usb_update_offload_route(sysdev, + CARD_IDX(kcontrol->private_value), + PCM_IDX(kcontrol->private_value), + SNDRV_PCM_STREAM_PLAYBACK, + SND_SOC_USB_KCTL_CARD_ROUTE, + ucontrol->value.integer.value); + if (ret < 0) { + ucontrol->value.integer.value[0] = -1; + ucontrol->value.integer.value[1] = -1; + } + + return 0; +} + +static int snd_usb_offload_card_route_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; + uinfo->count = 1; + uinfo->value.integer.min = -1; + uinfo->value.integer.max = SNDRV_CARDS; + + return 0; +} + +static struct snd_kcontrol_new snd_usb_offload_mapped_card_ctl = { + .iface = SNDRV_CTL_ELEM_IFACE_CARD, + .access = SNDRV_CTL_ELEM_ACCESS_READ, + .info = snd_usb_offload_card_route_info, + .get = snd_usb_offload_card_route_get, +}; + +static int +snd_usb_offload_pcm_route_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct device *sysdev = snd_kcontrol_chip(kcontrol); + int ret; + + ret = snd_soc_usb_update_offload_route(sysdev, + CARD_IDX(kcontrol->private_value), + PCM_IDX(kcontrol->private_value), + SNDRV_PCM_STREAM_PLAYBACK, + SND_SOC_USB_KCTL_PCM_ROUTE, + ucontrol->value.integer.value); + if (ret < 0) { + ucontrol->value.integer.value[0] = -1; + ucontrol->value.integer.value[1] = -1; + } + + return 0; +} + +static int snd_usb_offload_pcm_route_info(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; + uinfo->count = 1; + uinfo->value.integer.min = -1; + /* Arbitrary max value, as there is no 'limit' on number of PCM devices */ + uinfo->value.integer.max = 0xff; + + return 0; +} + +static struct snd_kcontrol_new snd_usb_offload_mapped_pcm_ctl = { + .iface = SNDRV_CTL_ELEM_IFACE_CARD, + .access = SNDRV_CTL_ELEM_ACCESS_READ, + .info = snd_usb_offload_pcm_route_info, + .get = snd_usb_offload_pcm_route_get, +}; + +/** + * snd_usb_offload_create_ctl() - Add USB offload bounded mixer + * @chip: USB SND chip device + * @bedev: Reference to USB backend DAI device + * + * Creates a sound control for a USB audio device, so that applications can + * query for if there is an available USB audio offload path, and which + * card is managing it. + */ +int snd_usb_offload_create_ctl(struct snd_usb_audio *chip, struct device *bedev) +{ + struct snd_kcontrol_new *chip_kctl; + struct snd_usb_substream *subs; + struct snd_usb_stream *as; + char ctl_name[48]; + int ret; + + list_for_each_entry(as, &chip->pcm_list, list) { + subs = &as->substream[SNDRV_PCM_STREAM_PLAYBACK]; + if (!subs->ep_num || as->pcm_index > 0xff) + continue; + + chip_kctl = &snd_usb_offload_mapped_card_ctl; + chip_kctl->count = 1; + /* + * Store the associated USB SND card number and PCM index for + * the kctl. + */ + chip_kctl->private_value = as->pcm_index | + chip->card->number << 16; + sprintf(ctl_name, "USB Offload Playback Card Route PCM#%d", + as->pcm_index); + chip_kctl->name = ctl_name; + ret = snd_ctl_add(chip->card, snd_ctl_new1(chip_kctl, bedev)); + if (ret < 0) + break; + + chip_kctl = &snd_usb_offload_mapped_pcm_ctl; + chip_kctl->count = 1; + /* + * Store the associated USB SND card number and PCM index for + * the kctl. + */ + chip_kctl->private_value = as->pcm_index | + chip->card->number << 16; + sprintf(ctl_name, "USB Offload Playback PCM Route PCM#%d", + as->pcm_index); + chip_kctl->name = ctl_name; + ret = snd_ctl_add(chip->card, snd_ctl_new1(chip_kctl, bedev)); + if (ret < 0) + break; + } + + return ret; +} diff --git a/sound/usb/qcom/mixer_usb_offload.h b/sound/usb/qcom/mixer_usb_offload.h new file mode 100644 index 000000000000..cf20673f4dc9 --- /dev/null +++ b/sound/usb/qcom/mixer_usb_offload.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __USB_OFFLOAD_MIXER_H +#define __USB_OFFLOAD_MIXER_H + +int snd_usb_offload_create_ctl(struct snd_usb_audio *chip, struct device *bedev); + +#endif /* __USB_OFFLOAD_MIXER_H */ diff --git a/sound/usb/qcom/qc_audio_offload.c b/sound/usb/qcom/qc_audio_offload.c new file mode 100644 index 000000000000..5bc27c82e0af --- /dev/null +++ b/sound/usb/qcom/qc_audio_offload.c @@ -0,0 +1,2017 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/auxiliary_bus.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> +#include <linux/init.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/soc/qcom/qmi.h> +#include <linux/usb.h> +#include <linux/usb/audio.h> +#include <linux/usb/audio-v2.h> +#include <linux/usb/audio-v3.h> +#include <linux/usb/hcd.h> +#include <linux/usb/quirks.h> +#include <linux/usb/xhci-sideband.h> + +#include <sound/control.h> +#include <sound/core.h> +#include <sound/info.h> +#include <sound/initval.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> +#include <sound/q6usboffload.h> +#include <sound/soc.h> +#include <sound/soc-usb.h> + +#include "../usbaudio.h" +#include "../card.h" +#include "../endpoint.h" +#include "../format.h" +#include "../helper.h" +#include "../pcm.h" +#include "../power.h" + +#include "mixer_usb_offload.h" +#include "usb_audio_qmi_v01.h" + +/* Stream disable request timeout during USB device disconnect */ +#define DEV_RELEASE_WAIT_TIMEOUT 10000 /* in ms */ + +/* Data interval calculation parameters */ +#define BUS_INTERVAL_FULL_SPEED 1000 /* in us */ +#define BUS_INTERVAL_HIGHSPEED_AND_ABOVE 125 /* in us */ +#define MAX_BINTERVAL_ISOC_EP 16 + +#define QMI_STREAM_REQ_CARD_NUM_MASK 0xffff0000 +#define QMI_STREAM_REQ_DEV_NUM_MASK 0xff00 +#define QMI_STREAM_REQ_DIRECTION 0xff + +/* iommu resource parameters and management */ +#define PREPEND_SID_TO_IOVA(iova, sid) ((u64)(((u64)(iova)) | \ + (((u64)sid) << 32))) +#define IOVA_MASK(iova) (((u64)(iova)) & 0xFFFFFFFF) +#define IOVA_BASE 0x1000 +#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1)) +#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32) +#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE) +#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE) + +#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE) + +struct iova_info { + struct list_head list; + unsigned long start_iova; + size_t size; + bool in_use; +}; + +struct intf_info { + /* IOMMU ring/buffer mapping information */ + unsigned long data_xfer_ring_va; + size_t data_xfer_ring_size; + unsigned long sync_xfer_ring_va; + size_t sync_xfer_ring_size; + dma_addr_t xfer_buf_iova; + size_t xfer_buf_size; + dma_addr_t xfer_buf_dma; + u8 *xfer_buf_cpu; + + /* USB endpoint information */ + unsigned int data_ep_pipe; + unsigned int sync_ep_pipe; + unsigned int data_ep_idx; + unsigned int sync_ep_idx; + + u8 intf_num; + u8 pcm_card_num; + u8 pcm_dev_num; + u8 direction; + bool in_use; +}; + +struct uaudio_qmi_dev { + struct device *dev; + struct q6usb_offload *data; + struct auxiliary_device *auxdev; + + /* list to keep track of available iova */ + struct list_head xfer_ring_list; + size_t xfer_ring_iova_size; + unsigned long curr_xfer_ring_iova; + struct list_head xfer_buf_list; + size_t xfer_buf_iova_size; + unsigned long curr_xfer_buf_iova; + + /* bit fields representing pcm card enabled */ + unsigned long card_slot; + /* indicate event ring mapped or not */ + bool er_mapped; +}; + +struct uaudio_dev { + struct usb_device *udev; + /* audio control interface */ + struct usb_host_interface *ctrl_intf; + unsigned int usb_core_id; + atomic_t in_use; + struct kref kref; + wait_queue_head_t disconnect_wq; + + /* interface specific */ + int num_intf; + struct intf_info *info; + struct snd_usb_audio *chip; + + /* xhci sideband */ + struct xhci_sideband *sb; + + /* SoC USB device */ + struct snd_soc_usb_device *sdev; +}; + +static struct uaudio_dev uadev[SNDRV_CARDS]; +static struct uaudio_qmi_dev *uaudio_qdev; +static struct uaudio_qmi_svc *uaudio_svc; +static DEFINE_MUTEX(qdev_mutex); + +struct uaudio_qmi_svc { + struct qmi_handle *uaudio_svc_hdl; + struct sockaddr_qrtr client_sq; + bool client_connected; +}; + +enum mem_type { + MEM_EVENT_RING, + MEM_XFER_RING, + MEM_XFER_BUF, +}; + +/* Supported audio formats */ +enum usb_qmi_audio_format { + USB_QMI_PCM_FORMAT_S8 = 0, + USB_QMI_PCM_FORMAT_U8, + USB_QMI_PCM_FORMAT_S16_LE, + USB_QMI_PCM_FORMAT_S16_BE, + USB_QMI_PCM_FORMAT_U16_LE, + USB_QMI_PCM_FORMAT_U16_BE, + USB_QMI_PCM_FORMAT_S24_LE, + USB_QMI_PCM_FORMAT_S24_BE, + USB_QMI_PCM_FORMAT_U24_LE, + USB_QMI_PCM_FORMAT_U24_BE, + USB_QMI_PCM_FORMAT_S24_3LE, + USB_QMI_PCM_FORMAT_S24_3BE, + USB_QMI_PCM_FORMAT_U24_3LE, + USB_QMI_PCM_FORMAT_U24_3BE, + USB_QMI_PCM_FORMAT_S32_LE, + USB_QMI_PCM_FORMAT_S32_BE, + USB_QMI_PCM_FORMAT_U32_LE, + USB_QMI_PCM_FORMAT_U32_BE, +}; + +static int usb_qmi_get_pcm_num(struct snd_usb_audio *chip, int direction) +{ + struct snd_usb_substream *subs = NULL; + struct snd_usb_stream *as; + int count = 0; + + list_for_each_entry(as, &chip->pcm_list, list) { + subs = &as->substream[direction]; + if (subs->ep_num) + count++; + } + + return count; +} + +static enum usb_qmi_audio_device_speed_enum_v01 +get_speed_info(enum usb_device_speed udev_speed) +{ + switch (udev_speed) { + case USB_SPEED_LOW: + return USB_QMI_DEVICE_SPEED_LOW_V01; + case USB_SPEED_FULL: + return USB_QMI_DEVICE_SPEED_FULL_V01; + case USB_SPEED_HIGH: + return USB_QMI_DEVICE_SPEED_HIGH_V01; + case USB_SPEED_SUPER: + return USB_QMI_DEVICE_SPEED_SUPER_V01; + case USB_SPEED_SUPER_PLUS: + return USB_QMI_DEVICE_SPEED_SUPER_PLUS_V01; + default: + return USB_QMI_DEVICE_SPEED_INVALID_V01; + } +} + +static struct snd_usb_substream *find_substream(unsigned int card_num, + unsigned int pcm_idx, + unsigned int direction) +{ + struct snd_usb_substream *subs = NULL; + struct snd_usb_audio *chip; + struct snd_usb_stream *as; + + chip = uadev[card_num].chip; + if (!chip || atomic_read(&chip->shutdown)) + goto done; + + if (pcm_idx >= chip->pcm_devs) + goto done; + + if (direction > SNDRV_PCM_STREAM_CAPTURE) + goto done; + + list_for_each_entry(as, &chip->pcm_list, list) { + if (as->pcm_index == pcm_idx) { + subs = &as->substream[direction]; + goto done; + } + } + +done: + return subs; +} + +static int info_idx_from_ifnum(int card_num, int intf_num, bool enable) +{ + int i; + + /* + * default index 0 is used when info is allocated upon + * first enable audio stream req for a pcm device + */ + if (enable && !uadev[card_num].info) + return 0; + + for (i = 0; i < uadev[card_num].num_intf; i++) { + if (enable && !uadev[card_num].info[i].in_use) + return i; + else if (!enable && + uadev[card_num].info[i].intf_num == intf_num) + return i; + } + + return -EINVAL; +} + +static int get_data_interval_from_si(struct snd_usb_substream *subs, + u32 service_interval) +{ + unsigned int bus_intval_mult; + unsigned int bus_intval; + unsigned int binterval; + + if (subs->dev->speed >= USB_SPEED_HIGH) + bus_intval = BUS_INTERVAL_HIGHSPEED_AND_ABOVE; + else + bus_intval = BUS_INTERVAL_FULL_SPEED; + + if (service_interval % bus_intval) + return -EINVAL; + + bus_intval_mult = service_interval / bus_intval; + binterval = ffs(bus_intval_mult); + if (!binterval || binterval > MAX_BINTERVAL_ISOC_EP) + return -EINVAL; + + /* check if another bit is set then bail out */ + bus_intval_mult = bus_intval_mult >> binterval; + if (bus_intval_mult) + return -EINVAL; + + return (binterval - 1); +} + +/* maps audio format received over QMI to asound.h based pcm format */ +static snd_pcm_format_t map_pcm_format(enum usb_qmi_audio_format fmt_received) +{ + switch (fmt_received) { + case USB_QMI_PCM_FORMAT_S8: + return SNDRV_PCM_FORMAT_S8; + case USB_QMI_PCM_FORMAT_U8: + return SNDRV_PCM_FORMAT_U8; + case USB_QMI_PCM_FORMAT_S16_LE: + return SNDRV_PCM_FORMAT_S16_LE; + case USB_QMI_PCM_FORMAT_S16_BE: + return SNDRV_PCM_FORMAT_S16_BE; + case USB_QMI_PCM_FORMAT_U16_LE: + return SNDRV_PCM_FORMAT_U16_LE; + case USB_QMI_PCM_FORMAT_U16_BE: + return SNDRV_PCM_FORMAT_U16_BE; + case USB_QMI_PCM_FORMAT_S24_LE: + return SNDRV_PCM_FORMAT_S24_LE; + case USB_QMI_PCM_FORMAT_S24_BE: + return SNDRV_PCM_FORMAT_S24_BE; + case USB_QMI_PCM_FORMAT_U24_LE: + return SNDRV_PCM_FORMAT_U24_LE; + case USB_QMI_PCM_FORMAT_U24_BE: + return SNDRV_PCM_FORMAT_U24_BE; + case USB_QMI_PCM_FORMAT_S24_3LE: + return SNDRV_PCM_FORMAT_S24_3LE; + case USB_QMI_PCM_FORMAT_S24_3BE: + return SNDRV_PCM_FORMAT_S24_3BE; + case USB_QMI_PCM_FORMAT_U24_3LE: + return SNDRV_PCM_FORMAT_U24_3LE; + case USB_QMI_PCM_FORMAT_U24_3BE: + return SNDRV_PCM_FORMAT_U24_3BE; + case USB_QMI_PCM_FORMAT_S32_LE: + return SNDRV_PCM_FORMAT_S32_LE; + case USB_QMI_PCM_FORMAT_S32_BE: + return SNDRV_PCM_FORMAT_S32_BE; + case USB_QMI_PCM_FORMAT_U32_LE: + return SNDRV_PCM_FORMAT_U32_LE; + case USB_QMI_PCM_FORMAT_U32_BE: + return SNDRV_PCM_FORMAT_U32_BE; + default: + /* + * We expect the caller to do input validation so we should + * never hit this. But we do have to return a proper + * snd_pcm_format_t value due to the __bitwise attribute; so + * just return the equivalent of 0 in case of bad input. + */ + return SNDRV_PCM_FORMAT_S8; + } +} + +/* + * Sends QMI disconnect indication message, assumes chip->mutex and qdev_mutex + * lock held by caller. + */ +static int uaudio_send_disconnect_ind(struct snd_usb_audio *chip) +{ + struct qmi_uaudio_stream_ind_msg_v01 disconnect_ind = {0}; + struct uaudio_qmi_svc *svc = uaudio_svc; + struct uaudio_dev *dev; + int ret = 0; + + dev = &uadev[chip->card->number]; + + if (atomic_read(&dev->in_use)) { + mutex_unlock(&chip->mutex); + mutex_unlock(&qdev_mutex); + dev_dbg(uaudio_qdev->data->dev, "sending qmi indication suspend\n"); + disconnect_ind.dev_event = USB_QMI_DEV_DISCONNECT_V01; + disconnect_ind.slot_id = dev->udev->slot_id; + disconnect_ind.controller_num = dev->usb_core_id; + disconnect_ind.controller_num_valid = 1; + ret = qmi_send_indication(svc->uaudio_svc_hdl, &svc->client_sq, + QMI_UAUDIO_STREAM_IND_V01, + QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN, + qmi_uaudio_stream_ind_msg_v01_ei, + &disconnect_ind); + if (ret < 0) + dev_err(uaudio_qdev->data->dev, + "qmi send failed with err: %d\n", ret); + + ret = wait_event_interruptible_timeout(dev->disconnect_wq, + !atomic_read(&dev->in_use), + msecs_to_jiffies(DEV_RELEASE_WAIT_TIMEOUT)); + if (!ret) { + dev_err(uaudio_qdev->data->dev, + "timeout while waiting for dev_release\n"); + atomic_set(&dev->in_use, 0); + } else if (ret < 0) { + dev_err(uaudio_qdev->data->dev, + "failed with ret %d\n", ret); + atomic_set(&dev->in_use, 0); + } + mutex_lock(&qdev_mutex); + mutex_lock(&chip->mutex); + } + + return ret; +} + +/* Offloading IOMMU management */ +static unsigned long uaudio_get_iova(unsigned long *curr_iova, + size_t *curr_iova_size, + struct list_head *head, size_t size) +{ + struct iova_info *info, *new_info = NULL; + struct list_head *curr_head; + size_t tmp_size = size; + unsigned long iova = 0; + + if (size % PAGE_SIZE) + goto done; + + if (size > *curr_iova_size) + goto done; + + if (*curr_iova_size == 0) + goto done; + + list_for_each_entry(info, head, list) { + /* exact size iova_info */ + if (!info->in_use && info->size == size) { + info->in_use = true; + iova = info->start_iova; + *curr_iova_size -= size; + goto done; + } else if (!info->in_use && tmp_size >= info->size) { + if (!new_info) + new_info = info; + tmp_size -= info->size; + if (tmp_size) + continue; + + iova = new_info->start_iova; + for (curr_head = &new_info->list; curr_head != + &info->list; curr_head = curr_head->next) { + new_info = list_entry(curr_head, struct + iova_info, list); + new_info->in_use = true; + } + info->in_use = true; + *curr_iova_size -= size; + goto done; + } else { + /* iova region in use */ + new_info = NULL; + tmp_size = size; + } + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + iova = 0; + goto done; + } + + iova = *curr_iova; + info->start_iova = *curr_iova; + info->size = size; + info->in_use = true; + *curr_iova += size; + *curr_iova_size -= size; + list_add_tail(&info->list, head); + +done: + return iova; +} + +static void uaudio_put_iova(unsigned long iova, size_t size, struct list_head + *head, size_t *curr_iova_size) +{ + struct iova_info *info; + size_t tmp_size = size; + bool found = false; + + list_for_each_entry(info, head, list) { + if (info->start_iova == iova) { + if (!info->in_use) + return; + + found = true; + info->in_use = false; + if (info->size == size) + goto done; + } + + if (found && tmp_size >= info->size) { + info->in_use = false; + tmp_size -= info->size; + if (!tmp_size) + goto done; + } + } + + if (!found) + return; + +done: + *curr_iova_size += size; +} + +/** + * uaudio_iommu_unmap() - unmaps iommu memory for adsp + * @mtype: ring type + * @iova: virtual address to unmap + * @iova_size: region size + * @mapped_iova_size: mapped region size + * + * Unmaps the memory region that was previously assigned to the adsp. + * + */ +static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long iova, + size_t iova_size, size_t mapped_iova_size) +{ + size_t umap_size; + bool unmap = true; + + if (!iova || !iova_size) + return; + + switch (mtype) { + case MEM_EVENT_RING: + if (uaudio_qdev->er_mapped) + uaudio_qdev->er_mapped = false; + else + unmap = false; + break; + + case MEM_XFER_RING: + uaudio_put_iova(iova, iova_size, &uaudio_qdev->xfer_ring_list, + &uaudio_qdev->xfer_ring_iova_size); + break; + case MEM_XFER_BUF: + uaudio_put_iova(iova, iova_size, &uaudio_qdev->xfer_buf_list, + &uaudio_qdev->xfer_buf_iova_size); + break; + default: + unmap = false; + } + + if (!unmap || !mapped_iova_size) + return; + + umap_size = iommu_unmap(uaudio_qdev->data->domain, iova, mapped_iova_size); + if (umap_size != mapped_iova_size) + dev_err(uaudio_qdev->data->dev, + "unmapped size %zu for iova 0x%08lx of mapped size %zu\n", + umap_size, iova, mapped_iova_size); +} + +/** + * uaudio_iommu_map() - maps iommu memory for adsp + * @mtype: ring type + * @dma_coherent: dma coherent + * @pa: physical address for ring/buffer + * @size: size of memory region + * @sgt: sg table for memory region + * + * Maps the XHCI related resources to a memory region that is assigned to be + * used by the adsp. This will be mapped to the domain, which is created by + * the ASoC USB backend driver. + * + */ +static unsigned long uaudio_iommu_map(enum mem_type mtype, bool dma_coherent, + phys_addr_t pa, size_t size, + struct sg_table *sgt) +{ + struct scatterlist *sg; + unsigned long iova = 0; + size_t total_len = 0; + unsigned long iova_sg; + phys_addr_t pa_sg; + bool map = true; + size_t sg_len; + int prot; + int ret; + int i; + + prot = IOMMU_READ | IOMMU_WRITE; + + if (dma_coherent) + prot |= IOMMU_CACHE; + + switch (mtype) { + case MEM_EVENT_RING: + iova = IOVA_BASE; + /* er already mapped */ + if (uaudio_qdev->er_mapped) + map = false; + break; + case MEM_XFER_RING: + iova = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova, + &uaudio_qdev->xfer_ring_iova_size, + &uaudio_qdev->xfer_ring_list, size); + break; + case MEM_XFER_BUF: + iova = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova, + &uaudio_qdev->xfer_buf_iova_size, + &uaudio_qdev->xfer_buf_list, size); + break; + default: + dev_err(uaudio_qdev->data->dev, "unknown mem type %d\n", mtype); + } + + if (!iova || !map) + goto done; + + if (!sgt) + goto skip_sgt_map; + + iova_sg = iova; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + sg_len = PAGE_ALIGN(sg->offset + sg->length); + pa_sg = page_to_phys(sg_page(sg)); + ret = iommu_map(uaudio_qdev->data->domain, iova_sg, pa_sg, sg_len, + prot, GFP_KERNEL); + if (ret) { + uaudio_iommu_unmap(MEM_XFER_BUF, iova, size, total_len); + iova = 0; + goto done; + } + + iova_sg += sg_len; + total_len += sg_len; + } + + if (size != total_len) { + uaudio_iommu_unmap(MEM_XFER_BUF, iova, size, total_len); + iova = 0; + } + return iova; + +skip_sgt_map: + iommu_map(uaudio_qdev->data->domain, iova, pa, size, prot, GFP_KERNEL); + +done: + return iova; +} + +/* looks up alias, if any, for controller DT node and returns the index */ +static int usb_get_controller_id(struct usb_device *udev) +{ + if (udev->bus->sysdev && udev->bus->sysdev->of_node) + return of_alias_get_id(udev->bus->sysdev->of_node, "usb"); + + return -ENODEV; +} + +/** + * uaudio_dev_intf_cleanup() - cleanup transfer resources + * @udev: usb device + * @info: usb offloading interface + * + * Cleans up the transfer ring related resources which are assigned per + * endpoint from XHCI. This is invoked when the USB endpoints are no + * longer in use by the adsp. + * + */ +static void uaudio_dev_intf_cleanup(struct usb_device *udev, struct intf_info *info) +{ + uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va, + info->data_xfer_ring_size, info->data_xfer_ring_size); + info->data_xfer_ring_va = 0; + info->data_xfer_ring_size = 0; + + uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va, + info->sync_xfer_ring_size, info->sync_xfer_ring_size); + info->sync_xfer_ring_va = 0; + info->sync_xfer_ring_size = 0; + + uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_iova, info->xfer_buf_size, + info->xfer_buf_size); + info->xfer_buf_iova = 0; + + usb_free_coherent(udev, info->xfer_buf_size, info->xfer_buf_cpu, + info->xfer_buf_dma); + info->xfer_buf_size = 0; + info->xfer_buf_cpu = NULL; + info->xfer_buf_dma = 0; + + info->in_use = false; +} + +/** + * uaudio_event_ring_cleanup_free() - cleanup secondary event ring + * @dev: usb offload device + * + * Cleans up the secondary event ring that was requested. This will + * occur when the adsp is no longer transferring data on the USB bus + * across all endpoints. + * + */ +static void uaudio_event_ring_cleanup_free(struct uaudio_dev *dev) +{ + clear_bit(dev->chip->card->number, &uaudio_qdev->card_slot); + /* all audio devices are disconnected */ + if (!uaudio_qdev->card_slot) { + uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, + PAGE_SIZE); + xhci_sideband_remove_interrupter(uadev[dev->chip->card->number].sb); + } +} + +static void uaudio_dev_cleanup(struct uaudio_dev *dev) +{ + int if_idx; + + if (!dev->udev) + return; + + /* free xfer buffer and unmap xfer ring and buf per interface */ + for (if_idx = 0; if_idx < dev->num_intf; if_idx++) { + if (!dev->info[if_idx].in_use) + continue; + uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]); + dev_dbg(uaudio_qdev->data->dev, + "release resources: intf# %d card# %d\n", + dev->info[if_idx].intf_num, dev->chip->card->number); + } + + dev->num_intf = 0; + + /* free interface info */ + kfree(dev->info); + dev->info = NULL; + uaudio_event_ring_cleanup_free(dev); + dev->udev = NULL; +} + +/** + * disable_audio_stream() - disable usb snd endpoints + * @subs: usb substream + * + * Closes the USB SND endpoints associated with the current audio stream + * used. This will decrement the USB SND endpoint opened reference count. + * + */ +static void disable_audio_stream(struct snd_usb_substream *subs) +{ + struct snd_usb_audio *chip = subs->stream->chip; + + snd_usb_hw_free(subs); + snd_usb_autosuspend(chip); +} + +/* QMI service disconnect handlers */ +static void qmi_stop_session(void) +{ + struct snd_usb_substream *subs; + struct usb_host_endpoint *ep; + struct snd_usb_audio *chip; + struct intf_info *info; + int pcm_card_num; + int if_idx; + int idx; + + mutex_lock(&qdev_mutex); + /* find all active intf for set alt 0 and cleanup usb audio dev */ + for (idx = 0; idx < SNDRV_CARDS; idx++) { + if (!atomic_read(&uadev[idx].in_use)) + continue; + + chip = uadev[idx].chip; + for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) { + if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use) + continue; + info = &uadev[idx].info[if_idx]; + pcm_card_num = info->pcm_card_num; + subs = find_substream(pcm_card_num, info->pcm_dev_num, + info->direction); + if (!subs || !chip || atomic_read(&chip->shutdown)) { + dev_err(&subs->dev->dev, + "no sub for c#%u dev#%u dir%u\n", + info->pcm_card_num, + info->pcm_dev_num, + info->direction); + continue; + } + /* Release XHCI endpoints */ + if (info->data_ep_pipe) + ep = usb_pipe_endpoint(uadev[pcm_card_num].udev, + info->data_ep_pipe); + xhci_sideband_remove_endpoint(uadev[pcm_card_num].sb, ep); + + if (info->sync_ep_pipe) + ep = usb_pipe_endpoint(uadev[pcm_card_num].udev, + info->sync_ep_pipe); + xhci_sideband_remove_endpoint(uadev[pcm_card_num].sb, ep); + + disable_audio_stream(subs); + } + atomic_set(&uadev[idx].in_use, 0); + mutex_lock(&chip->mutex); + uaudio_dev_cleanup(&uadev[idx]); + mutex_unlock(&chip->mutex); + } + mutex_unlock(&qdev_mutex); +} + +/** + * uaudio_sideband_notifier() - xHCI sideband event handler + * @intf: USB interface handle + * @evt: xHCI sideband event type + * + * This callback is executed when the xHCI sideband encounters a sequence + * that requires the sideband clients to take action. An example, is when + * xHCI frees the transfer ring, so the client has to ensure that the + * offload path is halted. + * + */ +static int uaudio_sideband_notifier(struct usb_interface *intf, + struct xhci_sideband_event *evt) +{ + struct snd_usb_audio *chip; + struct uaudio_dev *dev; + int if_idx; + + if (!intf || !evt) + return 0; + + chip = usb_get_intfdata(intf); + + mutex_lock(&qdev_mutex); + mutex_lock(&chip->mutex); + + dev = &uadev[chip->card->number]; + + if (evt->type == XHCI_SIDEBAND_XFER_RING_FREE) { + unsigned int *ep = (unsigned int *) evt->evt_data; + + for (if_idx = 0; if_idx < dev->num_intf; if_idx++) { + if (dev->info[if_idx].data_ep_idx == *ep || + dev->info[if_idx].sync_ep_idx == *ep) + uaudio_send_disconnect_ind(chip); + } + } + + mutex_unlock(&qdev_mutex); + mutex_unlock(&chip->mutex); + + return 0; +} + +/** + * qmi_bye_cb() - qmi bye message callback + * @handle: QMI handle + * @node: id of the dying node + * + * This callback is invoked when the QMI bye control message is received + * from the QMI client. Handle the message accordingly by ensuring that + * the USB offload path is disabled and cleaned up. At this point, ADSP + * is not utilizing the USB bus. + * + */ +static void qmi_bye_cb(struct qmi_handle *handle, unsigned int node) +{ + struct uaudio_qmi_svc *svc = uaudio_svc; + + if (svc->uaudio_svc_hdl != handle) + return; + + if (svc->client_connected && svc->client_sq.sq_node == node) { + qmi_stop_session(); + + /* clear QMI client parameters to block further QMI messages */ + svc->client_sq.sq_node = 0; + svc->client_sq.sq_port = 0; + svc->client_sq.sq_family = 0; + svc->client_connected = false; + } +} + +/** + * qmi_svc_disconnect_cb() - qmi client disconnected + * @handle: QMI handle + * @node: id of the dying node + * @port: port of the dying client + * + * Invoked when the remote QMI client is disconnected. Handle this event + * the same way as when the QMI bye message is received. This will ensure + * the USB offloading path is disabled and cleaned up. + * + */ +static void qmi_svc_disconnect_cb(struct qmi_handle *handle, + unsigned int node, unsigned int port) +{ + struct uaudio_qmi_svc *svc; + + if (!uaudio_svc) + return; + + svc = uaudio_svc; + if (svc->uaudio_svc_hdl != handle) + return; + + if (svc->client_connected && svc->client_sq.sq_node == node && + svc->client_sq.sq_port == port) { + qmi_stop_session(); + + /* clear QMI client parameters to block further QMI messages */ + svc->client_sq.sq_node = 0; + svc->client_sq.sq_port = 0; + svc->client_sq.sq_family = 0; + svc->client_connected = false; + } +} + +/* QMI client callback handlers from QMI interface */ +static struct qmi_ops uaudio_svc_ops_options = { + .bye = qmi_bye_cb, + .del_client = qmi_svc_disconnect_cb, +}; + +/* kref release callback when all streams are disabled */ +static void uaudio_dev_release(struct kref *kref) +{ + struct uaudio_dev *dev = container_of(kref, struct uaudio_dev, kref); + + uaudio_event_ring_cleanup_free(dev); + atomic_set(&dev->in_use, 0); + wake_up(&dev->disconnect_wq); +} + +/** + * enable_audio_stream() - enable usb snd endpoints + * @subs: usb substream + * @pcm_format: pcm format requested + * @channels: number of channels + * @cur_rate: sample rate + * @datainterval: interval + * + * Opens all USB SND endpoints used for the data interface. This will increment + * the USB SND endpoint's opened count. Requests to keep the interface resumed + * until the audio stream is stopped. Will issue the USB set interface control + * message to enable the data interface. + * + */ +static int enable_audio_stream(struct snd_usb_substream *subs, + snd_pcm_format_t pcm_format, + unsigned int channels, unsigned int cur_rate, + int datainterval) +{ + struct snd_pcm_hw_params params; + struct snd_usb_audio *chip; + struct snd_interval *i; + struct snd_mask *m; + int ret; + + chip = subs->stream->chip; + + _snd_pcm_hw_params_any(¶ms); + + m = hw_param_mask(¶ms, SNDRV_PCM_HW_PARAM_FORMAT); + snd_mask_leave(m, pcm_format); + + i = hw_param_interval(¶ms, SNDRV_PCM_HW_PARAM_CHANNELS); + snd_interval_setinteger(i); + i->min = channels; + i->max = channels; + + i = hw_param_interval(¶ms, SNDRV_PCM_HW_PARAM_RATE); + snd_interval_setinteger(i); + i->min = cur_rate; + i->max = cur_rate; + + pm_runtime_barrier(&chip->intf[0]->dev); + snd_usb_autoresume(chip); + + ret = snd_usb_hw_params(subs, ¶ms); + if (ret < 0) + goto put_suspend; + + if (!atomic_read(&chip->shutdown)) { + ret = snd_usb_lock_shutdown(chip); + if (ret < 0) + goto detach_ep; + + if (subs->sync_endpoint) { + ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); + if (ret < 0) + goto unlock; + } + + ret = snd_usb_endpoint_prepare(chip, subs->data_endpoint); + if (ret < 0) + goto unlock; + + snd_usb_unlock_shutdown(chip); + + dev_dbg(uaudio_qdev->data->dev, + "selected %s iface:%d altsetting:%d datainterval:%dus\n", + subs->direction ? "capture" : "playback", + subs->cur_audiofmt->iface, subs->cur_audiofmt->altsetting, + (1 << subs->cur_audiofmt->datainterval) * + (subs->dev->speed >= USB_SPEED_HIGH ? + BUS_INTERVAL_HIGHSPEED_AND_ABOVE : + BUS_INTERVAL_FULL_SPEED)); + } + + return 0; + +unlock: + snd_usb_unlock_shutdown(chip); + +detach_ep: + snd_usb_hw_free(subs); + +put_suspend: + snd_usb_autosuspend(chip); + + return ret; +} + +/** + * uaudio_transfer_buffer_setup() - fetch and populate xfer buffer params + * @subs: usb substream + * @xfer_buf: xfer buf to be allocated + * @xfer_buf_len: size of allocation + * @mem_info: QMI response info + * + * Allocates and maps the transfer buffers that will be utilized by the + * audio DSP. Will populate the information in the QMI response that is + * sent back to the stream enable request. + * + */ +static int uaudio_transfer_buffer_setup(struct snd_usb_substream *subs, + void **xfer_buf_cpu, u32 xfer_buf_len, + struct mem_info_v01 *mem_info) +{ + struct sg_table xfer_buf_sgt; + dma_addr_t xfer_buf_dma; + void *xfer_buf; + phys_addr_t xfer_buf_pa; + u32 len = xfer_buf_len; + bool dma_coherent; + dma_addr_t xfer_buf_dma_sysdev; + u32 remainder; + u32 mult; + int ret; + + dma_coherent = dev_is_dma_coherent(subs->dev->bus->sysdev); + + /* xfer buffer, multiple of 4K only */ + if (!len) + len = PAGE_SIZE; + + mult = len / PAGE_SIZE; + remainder = len % PAGE_SIZE; + len = mult * PAGE_SIZE; + len += remainder ? PAGE_SIZE : 0; + + if (len > MAX_XFER_BUFF_LEN) { + dev_err(uaudio_qdev->data->dev, + "req buf len %d > max buf len %lu, setting %lu\n", + len, MAX_XFER_BUFF_LEN, MAX_XFER_BUFF_LEN); + len = MAX_XFER_BUFF_LEN; + } + + /* get buffer mapped into subs->dev */ + xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_dma); + if (!xfer_buf) + return -ENOMEM; + + /* Remapping is not possible if xfer_buf is outside of linear map */ + xfer_buf_pa = virt_to_phys(xfer_buf); + if (WARN_ON(!page_is_ram(PFN_DOWN(xfer_buf_pa)))) { + ret = -ENXIO; + goto unmap_sync; + } + dma_get_sgtable(subs->dev->bus->sysdev, &xfer_buf_sgt, xfer_buf, + xfer_buf_dma, len); + + /* map the physical buffer into sysdev as well */ + xfer_buf_dma_sysdev = uaudio_iommu_map(MEM_XFER_BUF, dma_coherent, + xfer_buf_pa, len, &xfer_buf_sgt); + if (!xfer_buf_dma_sysdev) { + ret = -ENOMEM; + goto unmap_sync; + } + + mem_info->dma = xfer_buf_dma; + mem_info->size = len; + mem_info->iova = PREPEND_SID_TO_IOVA(xfer_buf_dma_sysdev, uaudio_qdev->data->sid); + *xfer_buf_cpu = xfer_buf; + sg_free_table(&xfer_buf_sgt); + + return 0; + +unmap_sync: + usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_dma); + + return ret; +} + +/** + * uaudio_endpoint_setup() - fetch and populate endpoint params + * @subs: usb substream + * @endpoint: usb endpoint to add + * @card_num: uadev index + * @mem_info: QMI response info + * @ep_desc: QMI ep desc response field + * + * Initialize the USB endpoint being used for a particular USB + * stream. Will request XHCI sec intr to reserve the EP for + * offloading as well as populating the QMI response with the + * transfer ring parameters. + * + */ +static phys_addr_t +uaudio_endpoint_setup(struct snd_usb_substream *subs, + struct snd_usb_endpoint *endpoint, int card_num, + struct mem_info_v01 *mem_info, + struct usb_endpoint_descriptor_v01 *ep_desc) +{ + struct usb_host_endpoint *ep; + phys_addr_t tr_pa = 0; + struct sg_table *sgt; + bool dma_coherent; + unsigned long iova; + struct page *pg; + int ret = -ENODEV; + + dma_coherent = dev_is_dma_coherent(subs->dev->bus->sysdev); + + ep = usb_pipe_endpoint(subs->dev, endpoint->pipe); + if (!ep) { + dev_err(uaudio_qdev->data->dev, "data ep # %d context is null\n", + subs->data_endpoint->ep_num); + goto exit; + } + + memcpy(ep_desc, &ep->desc, sizeof(ep->desc)); + + ret = xhci_sideband_add_endpoint(uadev[card_num].sb, ep); + if (ret < 0) { + dev_err(&subs->dev->dev, + "failed to add data ep to sec intr\n"); + ret = -ENODEV; + goto exit; + } + + sgt = xhci_sideband_get_endpoint_buffer(uadev[card_num].sb, ep); + if (!sgt) { + dev_err(&subs->dev->dev, + "failed to get data ep ring address\n"); + ret = -ENODEV; + goto remove_ep; + } + + pg = sg_page(sgt->sgl); + tr_pa = page_to_phys(pg); + mem_info->dma = sg_dma_address(sgt->sgl); + sg_free_table(sgt); + + /* data transfer ring */ + iova = uaudio_iommu_map(MEM_XFER_RING, dma_coherent, tr_pa, + PAGE_SIZE, NULL); + if (!iova) { + ret = -ENOMEM; + goto clear_pa; + } + + mem_info->iova = PREPEND_SID_TO_IOVA(iova, uaudio_qdev->data->sid); + mem_info->size = PAGE_SIZE; + + return 0; + +clear_pa: + mem_info->dma = 0; +remove_ep: + xhci_sideband_remove_endpoint(uadev[card_num].sb, ep); +exit: + return ret; +} + +/** + * uaudio_event_ring_setup() - fetch and populate event ring params + * @subs: usb substream + * @card_num: uadev index + * @mem_info: QMI response info + * + * Register secondary interrupter to XHCI and fetch the event buffer info + * and populate the information into the QMI response. + * + */ +static int uaudio_event_ring_setup(struct snd_usb_substream *subs, + int card_num, struct mem_info_v01 *mem_info) +{ + struct sg_table *sgt; + phys_addr_t er_pa; + bool dma_coherent; + unsigned long iova; + struct page *pg; + int ret; + + dma_coherent = dev_is_dma_coherent(subs->dev->bus->sysdev); + er_pa = 0; + + /* event ring */ + ret = xhci_sideband_create_interrupter(uadev[card_num].sb, 1, false, + 0, uaudio_qdev->data->intr_num); + if (ret < 0) { + dev_err(&subs->dev->dev, "failed to fetch interrupter\n"); + goto exit; + } + + sgt = xhci_sideband_get_event_buffer(uadev[card_num].sb); + if (!sgt) { + dev_err(&subs->dev->dev, + "failed to get event ring address\n"); + ret = -ENODEV; + goto remove_interrupter; + } + + pg = sg_page(sgt->sgl); + er_pa = page_to_phys(pg); + mem_info->dma = sg_dma_address(sgt->sgl); + sg_free_table(sgt); + + iova = uaudio_iommu_map(MEM_EVENT_RING, dma_coherent, er_pa, + PAGE_SIZE, NULL); + if (!iova) { + ret = -ENOMEM; + goto clear_pa; + } + + mem_info->iova = PREPEND_SID_TO_IOVA(iova, uaudio_qdev->data->sid); + mem_info->size = PAGE_SIZE; + + return 0; + +clear_pa: + mem_info->dma = 0; +remove_interrupter: + xhci_sideband_remove_interrupter(uadev[card_num].sb); +exit: + return ret; +} + +/** + * uaudio_populate_uac_desc() - parse UAC parameters and populate QMI resp + * @subs: usb substream + * @resp: QMI response buffer + * + * Parses information specified within UAC descriptors which explain the + * sample parameters that the device expects. This information is populated + * to the QMI response sent back to the audio DSP. + * + */ +static int uaudio_populate_uac_desc(struct snd_usb_substream *subs, + struct qmi_uaudio_stream_resp_msg_v01 *resp) +{ + struct usb_interface_descriptor *altsd; + struct usb_host_interface *alts; + struct usb_interface *iface; + int protocol; + + iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface); + if (!iface) { + dev_err(&subs->dev->dev, "interface # %d does not exist\n", + subs->cur_audiofmt->iface); + return -ENODEV; + } + + alts = &iface->altsetting[subs->cur_audiofmt->altset_idx]; + altsd = get_iface_desc(alts); + protocol = altsd->bInterfaceProtocol; + + if (protocol == UAC_VERSION_1) { + struct uac1_as_header_descriptor *as; + + as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, + UAC_AS_GENERAL); + if (!as) { + dev_err(&subs->dev->dev, + "%u:%d : no UAC_AS_GENERAL desc\n", + subs->cur_audiofmt->iface, + subs->cur_audiofmt->altset_idx); + return -ENODEV; + } + + resp->data_path_delay = as->bDelay; + resp->data_path_delay_valid = 1; + + resp->usb_audio_subslot_size = subs->cur_audiofmt->fmt_sz; + resp->usb_audio_subslot_size_valid = 1; + + resp->usb_audio_spec_revision = le16_to_cpu((__force __le16)0x0100); + resp->usb_audio_spec_revision_valid = 1; + } else if (protocol == UAC_VERSION_2) { + resp->usb_audio_subslot_size = subs->cur_audiofmt->fmt_sz; + resp->usb_audio_subslot_size_valid = 1; + + resp->usb_audio_spec_revision = le16_to_cpu((__force __le16)0x0200); + resp->usb_audio_spec_revision_valid = 1; + } else if (protocol == UAC_VERSION_3) { + if (iface->intf_assoc->bFunctionSubClass == + UAC3_FUNCTION_SUBCLASS_FULL_ADC_3_0) { + dev_err(&subs->dev->dev, + "full adc is not supported\n"); + return -EINVAL; + } + + switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) { + case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16: + case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16: + case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16: + case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16: { + resp->usb_audio_subslot_size = 0x2; + break; + } + + case UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24: + case UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24: + case UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24: + case UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24: { + resp->usb_audio_subslot_size = 0x3; + break; + } + + default: + dev_err(&subs->dev->dev, + "%d: %u: Invalid wMaxPacketSize\n", + subs->cur_audiofmt->iface, + subs->cur_audiofmt->altset_idx); + return -EINVAL; + } + resp->usb_audio_subslot_size_valid = 1; + } else { + dev_err(&subs->dev->dev, "unknown protocol version %x\n", + protocol); + return -ENODEV; + } + + memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc)); + + return 0; +} + +/** + * prepare_qmi_response() - prepare stream enable response + * @subs: usb substream + * @req_msg: QMI request message + * @resp: QMI response buffer + * @info_idx: usb interface array index + * + * Prepares the QMI response for a USB QMI stream enable request. Will parse + * out the parameters within the stream enable request, in order to match + * requested audio profile to the ones exposed by the USB device connected. + * + * In addition, will fetch the XHCI transfer resources needed for the handoff to + * happen. This includes, transfer ring and buffer addresses and secondary event + * ring address. These parameters will be communicated as part of the USB QMI + * stream enable response. + * + */ +static int prepare_qmi_response(struct snd_usb_substream *subs, + struct qmi_uaudio_stream_req_msg_v01 *req_msg, + struct qmi_uaudio_stream_resp_msg_v01 *resp, + int info_idx) +{ + struct q6usb_offload *data; + int pcm_dev_num; + int card_num; + void *xfer_buf_cpu; + int ret; + + pcm_dev_num = (req_msg->usb_token & QMI_STREAM_REQ_DEV_NUM_MASK) >> 8; + card_num = (req_msg->usb_token & QMI_STREAM_REQ_CARD_NUM_MASK) >> 16; + + if (!uadev[card_num].ctrl_intf) { + dev_err(&subs->dev->dev, "audio ctrl intf info not cached\n"); + ret = -ENODEV; + goto err; + } + + ret = uaudio_populate_uac_desc(subs, resp); + if (ret < 0) + goto err; + + resp->slot_id = subs->dev->slot_id; + resp->slot_id_valid = 1; + + data = snd_soc_usb_find_priv_data(uaudio_qdev->auxdev->dev.parent); + if (!data) + goto err; + + uaudio_qdev->data = data; + + resp->std_as_opr_intf_desc_valid = 1; + ret = uaudio_endpoint_setup(subs, subs->data_endpoint, card_num, + &resp->xhci_mem_info.tr_data, + &resp->std_as_data_ep_desc); + if (ret < 0) + goto err; + + resp->std_as_data_ep_desc_valid = 1; + + if (subs->sync_endpoint) { + ret = uaudio_endpoint_setup(subs, subs->sync_endpoint, card_num, + &resp->xhci_mem_info.tr_sync, + &resp->std_as_sync_ep_desc); + if (ret < 0) + goto drop_data_ep; + + resp->std_as_sync_ep_desc_valid = 1; + } + + resp->interrupter_num_valid = 1; + resp->controller_num_valid = 0; + ret = usb_get_controller_id(subs->dev); + if (ret >= 0) { + resp->controller_num = ret; + resp->controller_num_valid = 1; + } + + /* event ring */ + ret = uaudio_event_ring_setup(subs, card_num, + &resp->xhci_mem_info.evt_ring); + if (ret < 0) + goto drop_sync_ep; + + uaudio_qdev->er_mapped = true; + resp->interrupter_num = xhci_sideband_interrupter_id(uadev[card_num].sb); + + resp->speed_info = get_speed_info(subs->dev->speed); + if (resp->speed_info == USB_QMI_DEVICE_SPEED_INVALID_V01) { + ret = -ENODEV; + goto free_sec_ring; + } + + resp->speed_info_valid = 1; + + ret = uaudio_transfer_buffer_setup(subs, &xfer_buf_cpu, req_msg->xfer_buff_size, + &resp->xhci_mem_info.xfer_buff); + if (ret < 0) { + ret = -ENOMEM; + goto free_sec_ring; + } + + resp->xhci_mem_info_valid = 1; + + if (!atomic_read(&uadev[card_num].in_use)) { + kref_init(&uadev[card_num].kref); + init_waitqueue_head(&uadev[card_num].disconnect_wq); + uadev[card_num].num_intf = + subs->dev->config->desc.bNumInterfaces; + uadev[card_num].info = kcalloc(uadev[card_num].num_intf, + sizeof(struct intf_info), + GFP_KERNEL); + if (!uadev[card_num].info) { + ret = -ENOMEM; + goto unmap_er; + } + uadev[card_num].udev = subs->dev; + atomic_set(&uadev[card_num].in_use, 1); + } else { + kref_get(&uadev[card_num].kref); + } + + uadev[card_num].usb_core_id = resp->controller_num; + + /* cache intf specific info to use it for unmap and free xfer buf */ + uadev[card_num].info[info_idx].data_xfer_ring_va = + IOVA_MASK(resp->xhci_mem_info.tr_data.iova); + uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE; + uadev[card_num].info[info_idx].sync_xfer_ring_va = + IOVA_MASK(resp->xhci_mem_info.tr_sync.iova); + uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE; + uadev[card_num].info[info_idx].xfer_buf_iova = + IOVA_MASK(resp->xhci_mem_info.xfer_buff.iova); + uadev[card_num].info[info_idx].xfer_buf_dma = + resp->xhci_mem_info.xfer_buff.dma; + uadev[card_num].info[info_idx].xfer_buf_size = + resp->xhci_mem_info.xfer_buff.size; + uadev[card_num].info[info_idx].data_ep_pipe = subs->data_endpoint ? + subs->data_endpoint->pipe : 0; + uadev[card_num].info[info_idx].sync_ep_pipe = subs->sync_endpoint ? + subs->sync_endpoint->pipe : 0; + uadev[card_num].info[info_idx].data_ep_idx = subs->data_endpoint ? + subs->data_endpoint->ep_num : 0; + uadev[card_num].info[info_idx].sync_ep_idx = subs->sync_endpoint ? + subs->sync_endpoint->ep_num : 0; + uadev[card_num].info[info_idx].xfer_buf_cpu = xfer_buf_cpu; + uadev[card_num].info[info_idx].pcm_card_num = card_num; + uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num; + uadev[card_num].info[info_idx].direction = subs->direction; + uadev[card_num].info[info_idx].intf_num = subs->cur_audiofmt->iface; + uadev[card_num].info[info_idx].in_use = true; + + set_bit(card_num, &uaudio_qdev->card_slot); + + return 0; + +unmap_er: + uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE, PAGE_SIZE); +free_sec_ring: + xhci_sideband_remove_interrupter(uadev[card_num].sb); +drop_sync_ep: + if (subs->sync_endpoint) { + uaudio_iommu_unmap(MEM_XFER_RING, + IOVA_MASK(resp->xhci_mem_info.tr_sync.iova), + PAGE_SIZE, PAGE_SIZE); + xhci_sideband_remove_endpoint(uadev[card_num].sb, + usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe)); + } +drop_data_ep: + uaudio_iommu_unmap(MEM_XFER_RING, IOVA_MASK(resp->xhci_mem_info.tr_data.iova), + PAGE_SIZE, PAGE_SIZE); + xhci_sideband_remove_endpoint(uadev[card_num].sb, + usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe)); + +err: + return ret; +} + +/** + * handle_uaudio_stream_req() - handle stream enable/disable request + * @handle: QMI client handle + * @sq: qrtr socket + * @txn: QMI transaction context + * @decoded_msg: decoded QMI message + * + * Main handler for the QMI stream enable/disable requests. This executes the + * corresponding enable/disable stream apis, respectively. + * + */ +static void handle_uaudio_stream_req(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) +{ + struct qmi_uaudio_stream_req_msg_v01 *req_msg; + struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0}; + struct uaudio_qmi_svc *svc = uaudio_svc; + struct snd_usb_audio *chip = NULL; + struct snd_usb_substream *subs; + struct usb_host_endpoint *ep; + int datainterval = -EINVAL; + int info_idx = -EINVAL; + struct intf_info *info; + u8 pcm_card_num; + u8 pcm_dev_num; + u8 direction; + int ret = 0; + + if (!svc->client_connected) { + svc->client_sq = *sq; + svc->client_connected = true; + } + + mutex_lock(&qdev_mutex); + req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)decoded_msg; + if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid || + !req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) { + ret = -EINVAL; + goto response; + } + + if (!uaudio_qdev) { + ret = -EINVAL; + goto response; + } + + direction = (req_msg->usb_token & QMI_STREAM_REQ_DIRECTION); + pcm_dev_num = (req_msg->usb_token & QMI_STREAM_REQ_DEV_NUM_MASK) >> 8; + pcm_card_num = (req_msg->usb_token & QMI_STREAM_REQ_CARD_NUM_MASK) >> 16; + if (pcm_card_num >= SNDRV_CARDS) { + ret = -EINVAL; + goto response; + } + + if (req_msg->audio_format > USB_QMI_PCM_FORMAT_U32_BE) { + ret = -EINVAL; + goto response; + } + + subs = find_substream(pcm_card_num, pcm_dev_num, direction); + chip = uadev[pcm_card_num].chip; + if (!subs || !chip || atomic_read(&chip->shutdown)) { + ret = -ENODEV; + goto response; + } + + info_idx = info_idx_from_ifnum(pcm_card_num, subs->cur_audiofmt ? + subs->cur_audiofmt->iface : -1, req_msg->enable); + if (atomic_read(&chip->shutdown) || !subs->stream || !subs->stream->pcm || + !subs->stream->chip) { + ret = -ENODEV; + goto response; + } + + mutex_lock(&chip->mutex); + if (req_msg->enable) { + if (info_idx < 0 || chip->system_suspend || subs->opened) { + ret = -EBUSY; + mutex_unlock(&chip->mutex); + + goto response; + } + subs->opened = 1; + } + mutex_unlock(&chip->mutex); + + if (req_msg->service_interval_valid) { + ret = get_data_interval_from_si(subs, + req_msg->service_interval); + if (ret == -EINVAL) + goto response; + + datainterval = ret; + } + + uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf; + + if (req_msg->enable) { + ret = enable_audio_stream(subs, + map_pcm_format(req_msg->audio_format), + req_msg->number_of_ch, req_msg->bit_rate, + datainterval); + + if (!ret) + ret = prepare_qmi_response(subs, req_msg, &resp, + info_idx); + if (ret < 0) { + mutex_lock(&chip->mutex); + subs->opened = 0; + mutex_unlock(&chip->mutex); + } + } else { + info = &uadev[pcm_card_num].info[info_idx]; + if (info->data_ep_pipe) { + ep = usb_pipe_endpoint(uadev[pcm_card_num].udev, + info->data_ep_pipe); + if (ep) { + xhci_sideband_stop_endpoint(uadev[pcm_card_num].sb, + ep); + xhci_sideband_remove_endpoint(uadev[pcm_card_num].sb, + ep); + } + + info->data_ep_pipe = 0; + } + + if (info->sync_ep_pipe) { + ep = usb_pipe_endpoint(uadev[pcm_card_num].udev, + info->sync_ep_pipe); + if (ep) { + xhci_sideband_stop_endpoint(uadev[pcm_card_num].sb, + ep); + xhci_sideband_remove_endpoint(uadev[pcm_card_num].sb, + ep); + } + + info->sync_ep_pipe = 0; + } + + disable_audio_stream(subs); + mutex_lock(&chip->mutex); + subs->opened = 0; + mutex_unlock(&chip->mutex); + } + +response: + if (!req_msg->enable && ret != -EINVAL && ret != -ENODEV) { + mutex_lock(&chip->mutex); + if (info_idx >= 0) { + info = &uadev[pcm_card_num].info[info_idx]; + uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, + info); + } + if (atomic_read(&uadev[pcm_card_num].in_use)) + kref_put(&uadev[pcm_card_num].kref, + uaudio_dev_release); + mutex_unlock(&chip->mutex); + } + mutex_unlock(&qdev_mutex); + + resp.usb_token = req_msg->usb_token; + resp.usb_token_valid = 1; + resp.internal_status = ret; + resp.internal_status_valid = 1; + resp.status = ret ? USB_QMI_STREAM_REQ_FAILURE_V01 : ret; + resp.status_valid = 1; + ret = qmi_send_response(svc->uaudio_svc_hdl, sq, txn, + QMI_UAUDIO_STREAM_RESP_V01, + QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN, + qmi_uaudio_stream_resp_msg_v01_ei, &resp); +} + +static struct qmi_msg_handler uaudio_stream_req_handlers = { + .type = QMI_REQUEST, + .msg_id = QMI_UAUDIO_STREAM_REQ_V01, + .ei = qmi_uaudio_stream_req_msg_v01_ei, + .decoded_size = QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN, + .fn = handle_uaudio_stream_req, +}; + +/** + * qc_usb_audio_offload_init_qmi_dev() - initializes qmi dev + * + * Initializes the USB qdev, which is used to carry information pertaining to + * the offloading resources. This device is freed only when there are no longer + * any offloading candidates. (i.e, when all audio devices are disconnected) + * + */ +static int qc_usb_audio_offload_init_qmi_dev(void) +{ + uaudio_qdev = kzalloc(sizeof(*uaudio_qdev), GFP_KERNEL); + if (!uaudio_qdev) + return -ENOMEM; + + /* initialize xfer ring and xfer buf iova list */ + INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list); + uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE; + uaudio_qdev->xfer_ring_iova_size = + IOVA_XFER_RING_MAX - IOVA_XFER_RING_BASE; + + INIT_LIST_HEAD(&uaudio_qdev->xfer_buf_list); + uaudio_qdev->curr_xfer_buf_iova = IOVA_XFER_BUF_BASE; + uaudio_qdev->xfer_buf_iova_size = + IOVA_XFER_BUF_MAX - IOVA_XFER_BUF_BASE; + + return 0; +} + +/* Populates ppcm_idx array with supported PCM indexes */ +static int qc_usb_audio_offload_fill_avail_pcms(struct snd_usb_audio *chip, + struct snd_soc_usb_device *sdev) +{ + struct snd_usb_stream *as; + struct snd_usb_substream *subs; + int idx = 0; + + list_for_each_entry(as, &chip->pcm_list, list) { + subs = &as->substream[SNDRV_PCM_STREAM_PLAYBACK]; + if (subs->ep_num) { + sdev->ppcm_idx[idx] = as->pcm->device; + idx++; + } + /* + * Break if the current index exceeds the number of possible + * playback streams counted from the UAC descriptors. + */ + if (idx >= sdev->num_playback) + break; + } + + return -1; +} + +/** + * qc_usb_audio_offload_probe() - platform op connect handler + * @chip: USB SND device + * + * Platform connect handler when a USB SND device is detected. Will + * notify SOC USB about the connection to enable the USB ASoC backend + * and populate internal USB chip array. + * + */ +static void qc_usb_audio_offload_probe(struct snd_usb_audio *chip) +{ + struct usb_interface *intf = chip->intf[chip->num_interfaces - 1]; + struct usb_interface_descriptor *altsd; + struct usb_host_interface *alts; + struct snd_soc_usb_device *sdev; + struct xhci_sideband *sb; + + /* + * If there is no priv_data, or no playback paths, the connected + * device doesn't support offloading. Avoid populating entries for + * this device. + */ + if (!snd_soc_usb_find_priv_data(uaudio_qdev->auxdev->dev.parent) || + !usb_qmi_get_pcm_num(chip, 0)) + return; + + mutex_lock(&qdev_mutex); + mutex_lock(&chip->mutex); + if (!uadev[chip->card->number].chip) { + sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); + if (!sdev) + goto exit; + + sb = xhci_sideband_register(intf, XHCI_SIDEBAND_VENDOR, + uaudio_sideband_notifier); + if (!sb) + goto free_sdev; + } else { + sb = uadev[chip->card->number].sb; + sdev = uadev[chip->card->number].sdev; + } + + uadev[chip->card->number].sb = sb; + uadev[chip->card->number].chip = chip; + uadev[chip->card->number].sdev = sdev; + + alts = &intf->altsetting[0]; + altsd = get_iface_desc(alts); + + /* Wait until all PCM devices are populated before notifying soc-usb */ + if (altsd->bInterfaceNumber == chip->last_iface) { + sdev->num_playback = usb_qmi_get_pcm_num(chip, 0); + + /* + * Allocate playback pcm index array based on number of possible + * playback paths within the UAC descriptors. + */ + sdev->ppcm_idx = kcalloc(sdev->num_playback, sizeof(unsigned int), + GFP_KERNEL); + if (!sdev->ppcm_idx) + goto unreg_xhci; + + qc_usb_audio_offload_fill_avail_pcms(chip, sdev); + sdev->card_idx = chip->card->number; + sdev->chip_idx = chip->index; + + snd_usb_offload_create_ctl(chip, uaudio_qdev->auxdev->dev.parent); + snd_soc_usb_connect(uaudio_qdev->auxdev->dev.parent, sdev); + } + + mutex_unlock(&chip->mutex); + mutex_unlock(&qdev_mutex); + + return; + +unreg_xhci: + xhci_sideband_unregister(sb); + uadev[chip->card->number].sb = NULL; +free_sdev: + kfree(sdev); + uadev[chip->card->number].sdev = NULL; + uadev[chip->card->number].chip = NULL; +exit: + mutex_unlock(&chip->mutex); + mutex_unlock(&qdev_mutex); +} + +/** + * qc_usb_audio_cleanup_qmi_dev() - release qmi device + * + * Frees the USB qdev. Only occurs when there are no longer any potential + * devices that can utilize USB audio offloading. + * + */ +static void qc_usb_audio_cleanup_qmi_dev(void) +{ + kfree(uaudio_qdev); + uaudio_qdev = NULL; +} + +/** + * qc_usb_audio_offload_disconnect() - platform op disconnect handler + * @chip: USB SND device + * + * Platform disconnect handler. Will ensure that any pending stream is + * halted by issuing a QMI disconnect indication packet to the adsp. + * + */ +static void qc_usb_audio_offload_disconnect(struct snd_usb_audio *chip) +{ + struct uaudio_dev *dev; + int card_num; + + if (!chip) + return; + + card_num = chip->card->number; + if (card_num >= SNDRV_CARDS) + return; + + mutex_lock(&qdev_mutex); + mutex_lock(&chip->mutex); + dev = &uadev[card_num]; + + /* Device has already been cleaned up, or never populated */ + if (!dev->chip) { + mutex_unlock(&qdev_mutex); + mutex_unlock(&chip->mutex); + return; + } + + /* cleaned up already */ + if (!dev->udev) + goto done; + + uaudio_send_disconnect_ind(chip); + uaudio_dev_cleanup(dev); +done: + /* + * If num_interfaces == 1, the last USB SND interface is being removed. + * This is to accommodate for devices w/ multiple UAC functions. + */ + if (chip->num_interfaces == 1) { + snd_soc_usb_disconnect(uaudio_qdev->auxdev->dev.parent, dev->sdev); + xhci_sideband_unregister(dev->sb); + dev->chip = NULL; + kfree(dev->sdev->ppcm_idx); + kfree(dev->sdev); + dev->sdev = NULL; + } + mutex_unlock(&chip->mutex); + + mutex_unlock(&qdev_mutex); +} + +/** + * qc_usb_audio_offload_suspend() - USB offload PM suspend handler + * @intf: USB interface + * @message: suspend type + * + * PM suspend handler to ensure that the USB offloading driver is able to stop + * any pending traffic, so that the bus can be suspended. + * + */ +static void qc_usb_audio_offload_suspend(struct usb_interface *intf, + pm_message_t message) +{ + struct snd_usb_audio *chip = usb_get_intfdata(intf); + int card_num; + + if (!chip) + return; + + card_num = chip->card->number; + if (card_num >= SNDRV_CARDS) + return; + + mutex_lock(&qdev_mutex); + mutex_lock(&chip->mutex); + + uaudio_send_disconnect_ind(chip); + + mutex_unlock(&qdev_mutex); + mutex_unlock(&chip->mutex); +} + +static struct snd_usb_platform_ops offload_ops = { + .connect_cb = qc_usb_audio_offload_probe, + .disconnect_cb = qc_usb_audio_offload_disconnect, + .suspend_cb = qc_usb_audio_offload_suspend, +}; + +static int qc_usb_audio_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) + +{ + struct uaudio_qmi_svc *svc; + int ret; + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + return -ENOMEM; + + svc->uaudio_svc_hdl = kzalloc(sizeof(*svc->uaudio_svc_hdl), GFP_KERNEL); + if (!svc->uaudio_svc_hdl) { + ret = -ENOMEM; + goto free_svc; + } + + ret = qmi_handle_init(svc->uaudio_svc_hdl, + QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN, + &uaudio_svc_ops_options, + &uaudio_stream_req_handlers); + ret = qmi_add_server(svc->uaudio_svc_hdl, UAUDIO_STREAM_SERVICE_ID_V01, + UAUDIO_STREAM_SERVICE_VERS_V01, 0); + + uaudio_svc = svc; + + qc_usb_audio_offload_init_qmi_dev(); + uaudio_qdev->auxdev = auxdev; + + ret = snd_usb_register_platform_ops(&offload_ops); + if (ret < 0) + goto release_qmi; + + snd_usb_rediscover_devices(); + + return 0; + +release_qmi: + qc_usb_audio_cleanup_qmi_dev(); + qmi_handle_release(svc->uaudio_svc_hdl); +free_svc: + kfree(svc); + + return ret; +} + +static void qc_usb_audio_remove(struct auxiliary_device *auxdev) +{ + struct uaudio_qmi_svc *svc = uaudio_svc; + int idx; + + /* + * Remove all connected devices after unregistering ops, to ensure + * that no further connect events will occur. The disconnect routine + * will issue the QMI disconnect indication, which results in the + * external DSP to stop issuing transfers. + */ + snd_usb_unregister_platform_ops(); + for (idx = 0; idx < SNDRV_CARDS; idx++) + qc_usb_audio_offload_disconnect(uadev[idx].chip); + + qc_usb_audio_cleanup_qmi_dev(); + + qmi_handle_release(svc->uaudio_svc_hdl); + kfree(svc); + uaudio_svc = NULL; +} + +static const struct auxiliary_device_id qc_usb_audio_table[] = { + { .name = "q6usb.qc-usb-audio-offload" }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, qc_usb_audio_table); + +static struct auxiliary_driver qc_usb_audio_offload_drv = { + .name = "qc-usb-audio-offload", + .id_table = qc_usb_audio_table, + .probe = qc_usb_audio_probe, + .remove = qc_usb_audio_remove, +}; +module_auxiliary_driver(qc_usb_audio_offload_drv); + +MODULE_DESCRIPTION("QC USB Audio Offloading"); +MODULE_LICENSE("GPL"); diff --git a/sound/usb/qcom/usb_audio_qmi_v01.c b/sound/usb/qcom/usb_audio_qmi_v01.c new file mode 100644 index 000000000000..502857612277 --- /dev/null +++ b/sound/usb/qcom/usb_audio_qmi_v01.c @@ -0,0 +1,863 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/soc/qcom/qmi.h> + +#include "usb_audio_qmi_v01.h" + +static const struct qmi_elem_info mem_info_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct mem_info_v01, iova), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct mem_info_v01, dma), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct mem_info_v01, size), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info apps_mem_info_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct apps_mem_info_v01, evt_ring), + .ei_array = mem_info_v01_ei, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct apps_mem_info_v01, tr_data), + .ei_array = mem_info_v01_ei, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct apps_mem_info_v01, tr_sync), + .ei_array = mem_info_v01_ei, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct apps_mem_info_v01, xfer_buff), + .ei_array = mem_info_v01_ei, + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct apps_mem_info_v01, dcba), + .ei_array = mem_info_v01_ei, + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info usb_endpoint_descriptor_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bLength), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bDescriptorType), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bEndpointAddress), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bmAttributes), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + wMaxPacketSize), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bInterval), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bRefresh), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_endpoint_descriptor_v01, + bSynchAddress), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static const struct qmi_elem_info usb_interface_descriptor_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bLength), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bDescriptorType), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bInterfaceNumber), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bAlternateSetting), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bNumEndpoints), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bInterfaceClass), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bInterfaceSubClass), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + bInterfaceProtocol), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct usb_interface_descriptor_v01, + iInterface), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +const struct qmi_elem_info qmi_uaudio_stream_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + enable), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + usb_token), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + audio_format_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + audio_format), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + number_of_ch_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + number_of_ch), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + bit_rate_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + bit_rate), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + xfer_buff_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + xfer_buff_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + service_interval_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01, + service_interval), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +const struct qmi_elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + status_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum usb_qmi_audio_stream_status_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + status), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + internal_status_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + internal_status), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + slot_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + slot_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_token_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_token), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_opr_intf_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_interface_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_opr_intf_desc), + .ei_array = usb_interface_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_data_ep_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_endpoint_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_data_ep_desc), + .ei_array = usb_endpoint_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_sync_ep_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_endpoint_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + std_as_sync_ep_desc), + .ei_array = usb_endpoint_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_audio_spec_revision_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_audio_spec_revision), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + data_path_delay_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + data_path_delay), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_audio_subslot_size_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + usb_audio_subslot_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + xhci_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct apps_mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + xhci_mem_info), + .ei_array = apps_mem_info_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + interrupter_num_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + interrupter_num), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + speed_info_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum usb_qmi_audio_device_speed_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + speed_info), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + controller_num_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1D, + .offset = offsetof(struct qmi_uaudio_stream_resp_msg_v01, + controller_num), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +const struct qmi_elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof( + enum usb_qmi_audio_device_indication_enum_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + dev_event), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + slot_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_token_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_token), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_opr_intf_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_interface_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_opr_intf_desc), + .ei_array = usb_interface_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_data_ep_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_endpoint_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_data_ep_desc), + .ei_array = usb_endpoint_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_sync_ep_desc_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct usb_endpoint_descriptor_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + std_as_sync_ep_desc), + .ei_array = usb_endpoint_descriptor_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_audio_spec_revision_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_audio_spec_revision), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + data_path_delay_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + data_path_delay), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_audio_subslot_size_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + usb_audio_subslot_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + xhci_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct apps_mem_info_v01), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + xhci_mem_info), + .ei_array = apps_mem_info_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + interrupter_num_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + interrupter_num), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + controller_num_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01, + controller_num), + }, + { + .data_type = QMI_EOTI, + .array_type = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/sound/usb/qcom/usb_audio_qmi_v01.h b/sound/usb/qcom/usb_audio_qmi_v01.h new file mode 100644 index 000000000000..a1298d75d9f8 --- /dev/null +++ b/sound/usb/qcom/usb_audio_qmi_v01.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef USB_QMI_V01_H +#define USB_QMI_V01_H + +#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D +#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01 + +#define QMI_UAUDIO_STREAM_RESP_V01 0x0001 +#define QMI_UAUDIO_STREAM_REQ_V01 0x0001 +#define QMI_UAUDIO_STREAM_IND_V01 0x0001 + +struct mem_info_v01 { + u64 iova; /* mapped into sysdev */ + u64 dma; /* mapped into usb host */ + u32 size; +}; + +struct apps_mem_info_v01 { + struct mem_info_v01 evt_ring; + struct mem_info_v01 tr_data; + struct mem_info_v01 tr_sync; + struct mem_info_v01 xfer_buff; + struct mem_info_v01 dcba; +}; + +struct usb_endpoint_descriptor_v01 { + u8 bLength; + u8 bDescriptorType; + u8 bEndpointAddress; + u8 bmAttributes; + u16 wMaxPacketSize; + u8 bInterval; + u8 bRefresh; + u8 bSynchAddress; +}; + +struct usb_interface_descriptor_v01 { + u8 bLength; + u8 bDescriptorType; + u8 bInterfaceNumber; + u8 bAlternateSetting; + u8 bNumEndpoints; + u8 bInterfaceClass; + u8 bInterfaceSubClass; + u8 bInterfaceProtocol; + u8 iInterface; +}; + +enum usb_qmi_audio_stream_status_enum_v01 { + USB_QMI_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN, + USB_QMI_STREAM_REQ_SUCCESS_V01 = 0, + USB_QMI_STREAM_REQ_FAILURE_V01 = 1, + USB_QMI_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2, + USB_QMI_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3, + USB_QMI_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4, + USB_QMI_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum usb_qmi_audio_device_indication_enum_v01 { + USB_QMI_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN, + USB_QMI_DEV_CONNECT_V01 = 0, + USB_QMI_DEV_DISCONNECT_V01 = 1, + USB_QMI_DEV_SUSPEND_V01 = 2, + USB_QMI_DEV_RESUME_V01 = 3, + USB_QMI_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum usb_qmi_audio_device_speed_enum_v01 { + USB_QMI_DEVICE_SPEED_ENUM_MIN_VAL_V01 = INT_MIN, + USB_QMI_DEVICE_SPEED_INVALID_V01 = 0, + USB_QMI_DEVICE_SPEED_LOW_V01 = 1, + USB_QMI_DEVICE_SPEED_FULL_V01 = 2, + USB_QMI_DEVICE_SPEED_HIGH_V01 = 3, + USB_QMI_DEVICE_SPEED_SUPER_V01 = 4, + USB_QMI_DEVICE_SPEED_SUPER_PLUS_V01 = 5, + USB_QMI_DEVICE_SPEED_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +struct qmi_uaudio_stream_req_msg_v01 { + u8 enable; + u32 usb_token; + u8 audio_format_valid; + u32 audio_format; + u8 number_of_ch_valid; + u32 number_of_ch; + u8 bit_rate_valid; + u32 bit_rate; + u8 xfer_buff_size_valid; + u32 xfer_buff_size; + u8 service_interval_valid; + u32 service_interval; +}; + +#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 46 +extern const struct qmi_elem_info qmi_uaudio_stream_req_msg_v01_ei[]; + +struct qmi_uaudio_stream_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 status_valid; + enum usb_qmi_audio_stream_status_enum_v01 status; + u8 internal_status_valid; + u32 internal_status; + u8 slot_id_valid; + u32 slot_id; + u8 usb_token_valid; + u32 usb_token; + u8 std_as_opr_intf_desc_valid; + struct usb_interface_descriptor_v01 std_as_opr_intf_desc; + u8 std_as_data_ep_desc_valid; + struct usb_endpoint_descriptor_v01 std_as_data_ep_desc; + u8 std_as_sync_ep_desc_valid; + struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc; + u8 usb_audio_spec_revision_valid; + u16 usb_audio_spec_revision; + u8 data_path_delay_valid; + u8 data_path_delay; + u8 usb_audio_subslot_size_valid; + u8 usb_audio_subslot_size; + u8 xhci_mem_info_valid; + struct apps_mem_info_v01 xhci_mem_info; + u8 interrupter_num_valid; + u8 interrupter_num; + u8 speed_info_valid; + enum usb_qmi_audio_device_speed_enum_v01 speed_info; + u8 controller_num_valid; + u8 controller_num; +}; + +#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 202 +extern const struct qmi_elem_info qmi_uaudio_stream_resp_msg_v01_ei[]; + +struct qmi_uaudio_stream_ind_msg_v01 { + enum usb_qmi_audio_device_indication_enum_v01 dev_event; + u32 slot_id; + u8 usb_token_valid; + u32 usb_token; + u8 std_as_opr_intf_desc_valid; + struct usb_interface_descriptor_v01 std_as_opr_intf_desc; + u8 std_as_data_ep_desc_valid; + struct usb_endpoint_descriptor_v01 std_as_data_ep_desc; + u8 std_as_sync_ep_desc_valid; + struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc; + u8 usb_audio_spec_revision_valid; + u16 usb_audio_spec_revision; + u8 data_path_delay_valid; + u8 data_path_delay; + u8 usb_audio_subslot_size_valid; + u8 usb_audio_subslot_size; + u8 xhci_mem_info_valid; + struct apps_mem_info_v01 xhci_mem_info; + u8 interrupter_num_valid; + u8 interrupter_num; + u8 controller_num_valid; + u8 controller_num; +}; + +#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 181 +extern const struct qmi_elem_info qmi_uaudio_stream_ind_msg_v01_ei[]; + +#endif diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index 04ba035d67e9..b9ce3aab15fe 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -24,6 +24,7 @@ #include <sys/poll.h> #include <sys/utsname.h> +#include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> @@ -677,6 +678,88 @@ static void kvp_process_ipconfig_file(char *cmd, pclose(file); } +static bool kvp_verify_ip_address(const void *address_string) +{ + char verify_buf[sizeof(struct in6_addr)]; + + if (inet_pton(AF_INET, address_string, verify_buf) == 1) + return true; + if (inet_pton(AF_INET6, address_string, verify_buf) == 1) + return true; + return false; +} + +static void kvp_extract_routes(const char *line, void **output, size_t *remaining) +{ + static const char needle[] = "via "; + const char *match, *haystack = line; + + while ((match = strstr(haystack, needle))) { + const char *address, *next_char; + + /* Address starts after needle. */ + address = match + strlen(needle); + + /* The char following address is a space or end of line. */ + next_char = strpbrk(address, " \t\\"); + if (!next_char) + next_char = address + strlen(address) + 1; + + /* Enough room for address and semicolon. */ + if (*remaining >= (next_char - address) + 1) { + memcpy(*output, address, next_char - address); + /* Terminate string for verification. */ + memcpy(*output + (next_char - address), "", 1); + if (kvp_verify_ip_address(*output)) { + /* Advance output buffer. */ + *output += next_char - address; + *remaining -= next_char - address; + + /* Each address needs a trailing semicolon. */ + memcpy(*output, ";", 1); + *output += 1; + *remaining -= 1; + } + } + haystack = next_char; + } +} + +static void kvp_get_gateway(void *buffer, size_t buffer_len) +{ + static const char needle[] = "default "; + FILE *f; + void *output = buffer; + char *line = NULL; + size_t alloc_size = 0, remaining = buffer_len - 1; + ssize_t num_chars; + + /* Show route information in a single line, for each address family */ + f = popen("ip --oneline -4 route show;ip --oneline -6 route show", "r"); + if (!f) { + /* Convert buffer into C-String. */ + memcpy(output, "", 1); + return; + } + while ((num_chars = getline(&line, &alloc_size, f)) > 0) { + /* Skip short lines. */ + if (num_chars <= strlen(needle)) + continue; + /* Skip lines without default route. */ + if (memcmp(line, needle, strlen(needle))) + continue; + /* Remove trailing newline to simplify further parsing. */ + if (line[num_chars - 1] == '\n') + line[num_chars - 1] = '\0'; + /* Search routes after match. */ + kvp_extract_routes(line + strlen(needle), &output, &remaining); + } + /* Convert buffer into C-String. */ + memcpy(output, "", 1); + free(line); + pclose(f); +} + static void kvp_get_ipconfig_info(char *if_name, struct hv_kvp_ipaddr_value *buffer) { @@ -685,30 +768,7 @@ static void kvp_get_ipconfig_info(char *if_name, char *p; FILE *file; - /* - * Get the address of default gateway (ipv4). - */ - sprintf(cmd, "%s %s", "ip route show dev", if_name); - strcat(cmd, " | awk '/default/ {print $3 }'"); - - /* - * Execute the command to gather gateway info. - */ - kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, - (MAX_GATEWAY_SIZE * 2), INET_ADDRSTRLEN, 0); - - /* - * Get the address of default gateway (ipv6). - */ - sprintf(cmd, "%s %s", "ip -f inet6 route show dev", if_name); - strcat(cmd, " | awk '/default/ {print $3 }'"); - - /* - * Execute the command to gather gateway info (ipv6). - */ - kvp_process_ipconfig_file(cmd, (char *)buffer->gate_way, - (MAX_GATEWAY_SIZE * 2), INET6_ADDRSTRLEN, 1); - + kvp_get_gateway(buffer->gate_way, sizeof(buffer->gate_way)); /* * Gather the DNS state. diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 28705ae67784..fd404729b115 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4968,6 +4968,9 @@ union bpf_attr { * the netns switch takes place from ingress to ingress without * going through the CPU's backlog queue. * + * *skb*\ **->mark** and *skb*\ **->tstamp** are not cleared during + * the netns switch. + * * The *flags* argument is reserved and must be 0. The helper is * currently only supported for tc BPF program types at the * ingress hook and for veth and netkit target device types. The diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile index ffcfd777c451..7fbb50b74c00 100644 --- a/tools/lib/perf/Makefile +++ b/tools/lib/perf/Makefile @@ -42,6 +42,7 @@ libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) TEST_ARGS := $(if $(V),-v) INCLUDES = \ +-I$(OUTPUT)arch/$(SRCARCH)/include/generated/uapi \ -I$(srctree)/tools/lib/perf/include \ -I$(srctree)/tools/lib/ \ -I$(srctree)/tools/include \ @@ -99,7 +100,16 @@ $(LIBAPI)-clean: $(call QUIET_CLEAN, libapi) $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null -$(LIBPERF_IN): FORCE +uapi-asm := $(OUTPUT)arch/$(SRCARCH)/include/generated/uapi/asm +ifeq ($(SRCARCH),arm64) + syscall-y := $(uapi-asm)/unistd_64.h +endif +uapi-asm-generic: + $(if $(syscall-y),\ + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-headers obj=$(uapi-asm) \ + generic=include/uapi/asm-generic $(syscall-y),) + +$(LIBPERF_IN): uapi-asm-generic FORCE $(Q)$(MAKE) $(build)=libperf $(LIBPERF_A): $(LIBPERF_IN) @@ -120,7 +130,7 @@ all: fixdep clean: $(LIBAPI)-clean $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \ - $(TESTS_STATIC) $(TESTS_SHARED) + $(TESTS_STATIC) $(TESTS_SHARED) $(syscall-y) TESTS_IN = tests-in.o diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c index ce32cb35007d..c4da34048ef8 100644 --- a/tools/net/ynl/lib/ynl.c +++ b/tools/net/ynl/lib/ynl.c @@ -364,7 +364,7 @@ int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr) "Invalid attribute (binary %s)", policy->name); return -1; case YNL_PT_NUL_STR: - if ((!policy->len || len <= policy->len) && !data[len - 1]) + if (len && (!policy->len || len <= policy->len) && !data[len - 1]) break; yerr(yarg->ys, YNL_ERROR_ATTR_INVALID, "Invalid attribute (string %s)", policy->name); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 3a411064fa34..b21b12ec88d9 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -227,6 +227,7 @@ static bool is_rust_noreturn(const struct symbol *func) str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || str_ends_with(func->name, "_4core9panicking30panic_null_pointer_dereference") || str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || + str_ends_with(func->name, "_7___rustc17rust_begin_unwind") || strstr(func->name, "_4core9panicking13assert_failed") || strstr(func->name, "_4core9panicking11panic_const24panic_const_") || (strstr(func->name, "_4core5slice5index24slice_") && diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index eea95c6c0c71..b7769a22fe1a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -29,6 +29,7 @@ include $(srctree)/tools/scripts/Makefile.arch $(call detected_var,SRCARCH) CFLAGS += -I$(OUTPUT)arch/$(SRCARCH)/include/generated +CFLAGS += -I$(OUTPUT)libperf/arch/$(SRCARCH)/include/generated/uapi # Additional ARCH settings for ppc ifeq ($(SRCARCH),powerpc) diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config index 7bb885b0c32d..422e186cf3cf 100644 --- a/tools/testing/kunit/configs/all_tests.config +++ b/tools/testing/kunit/configs/all_tests.config @@ -20,6 +20,7 @@ CONFIG_VFAT_FS=y CONFIG_PCI=y CONFIG_USB4=y +CONFIG_I2C=y CONFIG_NET=y CONFIG_MCTP=y @@ -53,3 +54,4 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_SOC=y CONFIG_SND_SOC_TOPOLOGY_BUILD=y +CONFIG_SND_SOC_CS35L56_I2C=y diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c index 67503089e6a0..01e836fba488 100644 --- a/tools/testing/memblock/tests/basic_api.c +++ b/tools/testing/memblock/tests/basic_api.c @@ -2434,6 +2434,107 @@ static int memblock_overlaps_region_checks(void) return 0; } +#ifdef CONFIG_NUMA +static int memblock_set_node_check(void) +{ + unsigned long i, max_reserved; + struct memblock_region *rgn; + void *orig_region; + + PREFIX_PUSH(); + + reset_memblock_regions(); + memblock_allow_resize(); + + dummy_physical_memory_init(); + memblock_add(dummy_physical_memory_base(), MEM_SIZE); + orig_region = memblock.reserved.regions; + + /* Equally Split range to node 0 and 1*/ + memblock_set_node(memblock_start_of_DRAM(), + memblock_phys_mem_size() / 2, &memblock.memory, 0); + memblock_set_node(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2, + memblock_phys_mem_size() / 2, &memblock.memory, 1); + + ASSERT_EQ(memblock.memory.cnt, 2); + rgn = &memblock.memory.regions[0]; + ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); + ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2); + ASSERT_EQ(memblock_get_region_node(rgn), 0); + rgn = &memblock.memory.regions[1]; + ASSERT_EQ(rgn->base, memblock_start_of_DRAM() + memblock_phys_mem_size() / 2); + ASSERT_EQ(rgn->size, memblock_phys_mem_size() / 2); + ASSERT_EQ(memblock_get_region_node(rgn), 1); + + /* Reserve 126 regions with the last one across node boundary */ + for (i = 0; i < 125; i++) + memblock_reserve(memblock_start_of_DRAM() + SZ_16 * i, SZ_8); + + memblock_reserve(memblock_start_of_DRAM() + memblock_phys_mem_size() / 2 - SZ_8, + SZ_16); + + /* + * Commit 61167ad5fecd ("mm: pass nid to reserve_bootmem_region()") + * do following process to set nid to each memblock.reserved region. + * But it may miss some region if memblock_set_node() double the + * array. + * + * By checking 'max', we make sure all region nid is set properly. + */ +repeat: + max_reserved = memblock.reserved.max; + for_each_mem_region(rgn) { + int nid = memblock_get_region_node(rgn); + + memblock_set_node(rgn->base, rgn->size, &memblock.reserved, nid); + } + if (max_reserved != memblock.reserved.max) + goto repeat; + + /* Confirm each region has valid node set */ + for_each_reserved_mem_region(rgn) { + ASSERT_TRUE(numa_valid_node(memblock_get_region_node(rgn))); + if (rgn == (memblock.reserved.regions + memblock.reserved.cnt - 1)) + ASSERT_EQ(1, memblock_get_region_node(rgn)); + else + ASSERT_EQ(0, memblock_get_region_node(rgn)); + } + + dummy_physical_memory_cleanup(); + + /* + * The current reserved.regions is occupying a range of memory that + * allocated from dummy_physical_memory_init(). After free the memory, + * we must not use it. So restore the origin memory region to make sure + * the tests can run as normal and not affected by the double array. + */ + memblock.reserved.regions = orig_region; + memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; + + test_pass_pop(); + + return 0; +} + +static int memblock_set_node_checks(void) +{ + prefix_reset(); + prefix_push("memblock_set_node"); + test_print("Running memblock_set_node tests...\n"); + + memblock_set_node_check(); + + prefix_pop(); + + return 0; +} +#else +static int memblock_set_node_checks(void) +{ + return 0; +} +#endif + int memblock_basic_checks(void) { memblock_initialization_check(); @@ -2444,6 +2545,7 @@ int memblock_basic_checks(void) memblock_bottom_up_checks(); memblock_trim_memory_checks(); memblock_overlaps_region_checks(); + memblock_set_node_checks(); return 0; } diff --git a/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh b/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh new file mode 120000 index 000000000000..d16a65e7595d --- /dev/null +++ b/tools/testing/selftests/drivers/net/dsa/tc_taprio.sh @@ -0,0 +1 @@ +run_net_forwarding_test.sh
\ No newline at end of file diff --git a/tools/testing/selftests/drivers/net/ocelot/psfp.sh b/tools/testing/selftests/drivers/net/ocelot/psfp.sh index bed748dde4b0..8972f42dfe03 100755 --- a/tools/testing/selftests/drivers/net/ocelot/psfp.sh +++ b/tools/testing/selftests/drivers/net/ocelot/psfp.sh @@ -266,18 +266,14 @@ run_test() "${base_time}" \ "${CYCLE_TIME_NS}" \ "${SHIFT_TIME_NS}" \ + "${GATE_DURATION_NS}" \ "${NUM_PKTS}" \ "${STREAM_VID}" \ "${STREAM_PRIO}" \ "" \ "${isochron_dat}" - # Count all received packets by looking at the non-zero RX timestamps - received=$(isochron report \ - --input-file "${isochron_dat}" \ - --printf-format "%u\n" --printf-args "R" | \ - grep -w -v '0' | wc -l) - + received=$(isochron_report_num_received "${isochron_dat}") if [ "${received}" = "${expected}" ]; then RET=0 else diff --git a/tools/testing/selftests/drivers/net/ping.py b/tools/testing/selftests/drivers/net/ping.py index 4b6822866066..af8df2313a3b 100755 --- a/tools/testing/selftests/drivers/net/ping.py +++ b/tools/testing/selftests/drivers/net/ping.py @@ -9,11 +9,11 @@ from lib.py import EthtoolFamily, NetDrvEpEnv from lib.py import bkg, cmd, wait_port_listen, rand_port from lib.py import defer, ethtool, ip -remote_ifname="" no_sleep=False def _test_v4(cfg) -> None: - cfg.require_ipver("4") + if not cfg.addr_v["4"]: + return cmd("ping -c 1 -W0.5 " + cfg.remote_addr_v["4"]) cmd("ping -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote) @@ -21,7 +21,8 @@ def _test_v4(cfg) -> None: cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote) def _test_v6(cfg) -> None: - cfg.require_ipver("6") + if not cfg.addr_v["6"]: + return cmd("ping -c 1 -W5 " + cfg.remote_addr_v["6"]) cmd("ping -c 1 -W5 " + cfg.addr_v["6"], host=cfg.remote) @@ -57,7 +58,7 @@ def _set_offload_checksum(cfg, netnl, on) -> None: def _set_xdp_generic_sb_on(cfg) -> None: prog = cfg.net_lib_dir / "xdp_dummy.bpf.o" - cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote) cmd(f"ip link set dev {cfg.ifname} mtu 1500 xdpgeneric obj {prog} sec xdp", shell=True) defer(cmd, f"ip link set dev {cfg.ifname} xdpgeneric off") @@ -66,8 +67,8 @@ def _set_xdp_generic_sb_on(cfg) -> None: def _set_xdp_generic_mb_on(cfg) -> None: prog = cfg.net_lib_dir / "xdp_dummy.bpf.o" - cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote) - defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 9000", shell=True, host=cfg.remote) + defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote) ip("link set dev %s mtu 9000 xdpgeneric obj %s sec xdp.frags" % (cfg.ifname, prog)) defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdpgeneric off") @@ -76,7 +77,7 @@ def _set_xdp_generic_mb_on(cfg) -> None: def _set_xdp_native_sb_on(cfg) -> None: prog = cfg.net_lib_dir / "xdp_dummy.bpf.o" - cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote) cmd(f"ip -j link set dev {cfg.ifname} mtu 1500 xdp obj {prog} sec xdp", shell=True) defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off") xdp_info = ip("-d link show %s" % (cfg.ifname), json=True)[0] @@ -93,8 +94,8 @@ def _set_xdp_native_sb_on(cfg) -> None: def _set_xdp_native_mb_on(cfg) -> None: prog = cfg.net_lib_dir / "xdp_dummy.bpf.o" - cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote) - defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 9000", shell=True, host=cfg.remote) + defer(ip, f"link set dev {cfg.remote_ifname} mtu 1500", host=cfg.remote) try: cmd(f"ip link set dev {cfg.ifname} mtu 9000 xdp obj {prog} sec xdp.frags", shell=True) defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off") @@ -112,18 +113,15 @@ def _set_xdp_offload_on(cfg) -> None: except Exception as e: raise KsftSkipEx('device does not support offloaded XDP') defer(ip, f"link set dev {cfg.ifname} xdpoffload off") - cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote) if no_sleep != True: time.sleep(10) def get_interface_info(cfg) -> None: - global remote_ifname global no_sleep - remote_info = cmd(f"ip -4 -o addr show to {cfg.remote_addr_v['4']} | awk '{{print $2}}'", shell=True, host=cfg.remote).stdout - remote_ifname = remote_info.rstrip('\n') - if remote_ifname == "": + if cfg.remote_ifname == "": raise KsftFailEx('Can not get remote interface') local_info = ip("-d link show %s" % (cfg.ifname), json=True)[0] if 'parentbus' in local_info and local_info['parentbus'] == "netdevsim": @@ -136,15 +134,25 @@ def set_interface_init(cfg) -> None: cmd(f"ip link set dev {cfg.ifname} xdp off ", shell=True) cmd(f"ip link set dev {cfg.ifname} xdpgeneric off ", shell=True) cmd(f"ip link set dev {cfg.ifname} xdpoffload off", shell=True) - cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip link set dev {cfg.remote_ifname} mtu 1500", shell=True, host=cfg.remote) + +def test_default_v4(cfg, netnl) -> None: + cfg.require_ipver("4") -def test_default(cfg, netnl) -> None: _set_offload_checksum(cfg, netnl, "off") _test_v4(cfg) - _test_v6(cfg) _test_tcp(cfg) _set_offload_checksum(cfg, netnl, "on") _test_v4(cfg) + _test_tcp(cfg) + +def test_default_v6(cfg, netnl) -> None: + cfg.require_ipver("6") + + _set_offload_checksum(cfg, netnl, "off") + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") _test_v6(cfg) _test_tcp(cfg) @@ -202,7 +210,8 @@ def main() -> None: with NetDrvEpEnv(__file__) as cfg: get_interface_info(cfg) set_interface_init(cfg) - ksft_run([test_default, + ksft_run([test_default_v4, + test_default_v6, test_xdp_generic_sb, test_xdp_generic_mb, test_xdp_native_sb, diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c index 4a2d5c454fd1..59a71f22fb11 100644 --- a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c +++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c @@ -48,8 +48,16 @@ static uint64_t get_mnt_id(struct __test_metadata *const _metadata, static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX"; +static const int mark_cmds[] = { + FAN_MARK_ADD, + FAN_MARK_REMOVE, + FAN_MARK_FLUSH +}; + +#define NUM_FAN_FDS ARRAY_SIZE(mark_cmds) + FIXTURE(fanotify) { - int fan_fd; + int fan_fd[NUM_FAN_FDS]; char buf[256]; unsigned int rem; void *next; @@ -61,7 +69,7 @@ FIXTURE(fanotify) { FIXTURE_SETUP(fanotify) { - int ret; + int i, ret; ASSERT_EQ(unshare(CLONE_NEWNS), 0); @@ -89,20 +97,34 @@ FIXTURE_SETUP(fanotify) self->root_id = get_mnt_id(_metadata, "/"); ASSERT_NE(self->root_id, 0); - self->fan_fd = fanotify_init(FAN_REPORT_MNT, 0); - ASSERT_GE(self->fan_fd, 0); - - ret = fanotify_mark(self->fan_fd, FAN_MARK_ADD | FAN_MARK_MNTNS, - FAN_MNT_ATTACH | FAN_MNT_DETACH, self->ns_fd, NULL); - ASSERT_EQ(ret, 0); + for (i = 0; i < NUM_FAN_FDS; i++) { + self->fan_fd[i] = fanotify_init(FAN_REPORT_MNT | FAN_NONBLOCK, + 0); + ASSERT_GE(self->fan_fd[i], 0); + ret = fanotify_mark(self->fan_fd[i], FAN_MARK_ADD | + FAN_MARK_MNTNS, + FAN_MNT_ATTACH | FAN_MNT_DETACH, + self->ns_fd, NULL); + ASSERT_EQ(ret, 0); + // On fd[0] we do an extra ADD that changes nothing. + // On fd[1]/fd[2] we REMOVE/FLUSH which removes the mark. + ret = fanotify_mark(self->fan_fd[i], mark_cmds[i] | + FAN_MARK_MNTNS, + FAN_MNT_ATTACH | FAN_MNT_DETACH, + self->ns_fd, NULL); + ASSERT_EQ(ret, 0); + } self->rem = 0; } FIXTURE_TEARDOWN(fanotify) { + int i; + ASSERT_EQ(self->rem, 0); - close(self->fan_fd); + for (i = 0; i < NUM_FAN_FDS; i++) + close(self->fan_fd[i]); ASSERT_EQ(fchdir(self->orig_root), 0); @@ -123,8 +145,21 @@ static uint64_t expect_notify(struct __test_metadata *const _metadata, unsigned int thislen; if (!self->rem) { - ssize_t len = read(self->fan_fd, self->buf, sizeof(self->buf)); - ASSERT_GT(len, 0); + ssize_t len; + int i; + + for (i = NUM_FAN_FDS - 1; i >= 0; i--) { + len = read(self->fan_fd[i], self->buf, + sizeof(self->buf)); + if (i > 0) { + // Groups 1,2 should get EAGAIN + ASSERT_EQ(len, -1); + ASSERT_EQ(errno, EAGAIN); + } else { + // Group 0 should get events + ASSERT_GT(len, 0); + } + } self->rem = len; self->next = (void *) self->buf; diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 322b9d3b0125..57708de2075d 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -129,10 +129,10 @@ static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = { REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0), REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0), - REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 1), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 1), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 1), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 1), REG_FTR_END, }; diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c index 2c3a0eb6b22d..9bc4591c7b16 100644 --- a/tools/testing/selftests/mm/compaction_test.c +++ b/tools/testing/selftests/mm/compaction_test.c @@ -90,6 +90,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size, int compaction_index = 0; char nr_hugepages[20] = {0}; char init_nr_hugepages[24] = {0}; + char target_nr_hugepages[24] = {0}; + int slen; snprintf(init_nr_hugepages, sizeof(init_nr_hugepages), "%lu", initial_nr_hugepages); @@ -106,11 +108,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size, goto out; } - /* Request a large number of huge pages. The Kernel will allocate - as much as it can */ - if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) { - ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n", - strerror(errno)); + /* + * Request huge pages for about half of the free memory. The Kernel + * will allocate as much as it can, and we expect it will get at least 1/3 + */ + nr_hugepages_ul = mem_free / hugepage_size / 2; + snprintf(target_nr_hugepages, sizeof(target_nr_hugepages), + "%lu", nr_hugepages_ul); + + slen = strlen(target_nr_hugepages); + if (write(fd, target_nr_hugepages, slen) != slen) { + ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n", + nr_hugepages_ul, strerror(errno)); goto close_fd; } diff --git a/tools/testing/selftests/mm/guard-regions.c b/tools/testing/selftests/mm/guard-regions.c index b3d0e2771096..eba43ead13ae 100644 --- a/tools/testing/selftests/mm/guard-regions.c +++ b/tools/testing/selftests/mm/guard-regions.c @@ -271,12 +271,16 @@ FIXTURE_SETUP(guard_regions) self->page_size = (unsigned long)sysconf(_SC_PAGESIZE); setup_sighandler(); - if (variant->backing == ANON_BACKED) + switch (variant->backing) { + case ANON_BACKED: return; - - self->fd = open_file( - variant->backing == SHMEM_BACKED ? "/tmp/" : "", - self->path); + case LOCAL_FILE_BACKED: + self->fd = open_file("", self->path); + break; + case SHMEM_BACKED: + self->fd = memfd_create(self->path, 0); + break; + } /* We truncate file to at least 100 pages, tests can modify as needed. */ ASSERT_EQ(ftruncate(self->fd, 100 * self->page_size), 0); @@ -1696,7 +1700,7 @@ TEST_F(guard_regions, readonly_file) char *ptr; int i; - if (variant->backing == ANON_BACKED) + if (variant->backing != LOCAL_FILE_BACKED) SKIP(return, "Read-only test specific to file-backed"); /* Map shared so we can populate with pattern, populate it, unmap. */ diff --git a/tools/testing/selftests/mm/pkey-powerpc.h b/tools/testing/selftests/mm/pkey-powerpc.h index 1bad310d282a..17bf2d1b0192 100644 --- a/tools/testing/selftests/mm/pkey-powerpc.h +++ b/tools/testing/selftests/mm/pkey-powerpc.h @@ -3,6 +3,8 @@ #ifndef _PKEYS_POWERPC_H #define _PKEYS_POWERPC_H +#include <sys/stat.h> + #ifndef SYS_pkey_alloc # define SYS_pkey_alloc 384 # define SYS_pkey_free 385 @@ -102,8 +104,18 @@ static inline void expect_fault_on_read_execonly_key(void *p1, int pkey) return; } +#define REPEAT_8(s) s s s s s s s s +#define REPEAT_64(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) \ + REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) REPEAT_8(s) +#define REPEAT_512(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) \ + REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) REPEAT_64(s) +#define REPEAT_4096(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) \ + REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) REPEAT_512(s) +#define REPEAT_16384(s) REPEAT_4096(s) REPEAT_4096(s) \ + REPEAT_4096(s) REPEAT_4096(s) + /* 4-byte instructions * 16384 = 64K page */ -#define __page_o_noops() asm(".rept 16384 ; nop; .endr") +#define __page_o_noops() asm(REPEAT_16384("nop\n")) static inline void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) { diff --git a/tools/testing/selftests/mm/pkey_util.c b/tools/testing/selftests/mm/pkey_util.c index ca4ad0d44ab2..255b332f7a08 100644 --- a/tools/testing/selftests/mm/pkey_util.c +++ b/tools/testing/selftests/mm/pkey_util.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#define __SANE_USERSPACE_TYPES__ #include <sys/syscall.h> #include <unistd.h> diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 124078b56fa4..70a38f485d4d 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -31,6 +31,7 @@ TEST_PROGS += veth.sh TEST_PROGS += ioam6.sh TEST_PROGS += gro.sh TEST_PROGS += gre_gso.sh +TEST_PROGS += gre_ipv6_lladdr.sh TEST_PROGS += cmsg_so_mark.sh TEST_PROGS += cmsg_so_priority.sh TEST_PROGS += test_so_rcv.sh diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh index 90f8a244ea90..e59fba366a0a 100755 --- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh +++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh @@ -1,7 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid" +ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn other_tpid 8021p drop_untagged" NUM_NETIFS=4 CHECK_TC="yes" source lib.sh @@ -194,6 +194,100 @@ other_tpid() tc qdisc del dev $h2 clsact } +8021p_do() +{ + local should_fail=$1; shift + local mac=de:ad:be:ef:13:37 + + tc filter add dev $h2 ingress protocol all pref 1 handle 101 \ + flower dst_mac $mac action drop + + $MZ -q $h1 -c 1 -b $mac -a own "81:00 00:00 08:00 aa-aa-aa-aa-aa-aa-aa-aa-aa" + sleep 1 + + tc -j -s filter show dev $h2 ingress \ + | jq -e ".[] | select(.options.handle == 101) \ + | select(.options.actions[0].stats.packets == 1)" &> /dev/null + check_err_fail $should_fail $? "802.1p-tagged reception" + + tc filter del dev $h2 ingress pref 1 +} + +8021p() +{ + RET=0 + + tc qdisc add dev $h2 clsact + ip link set $h2 promisc on + + # Test that with the default_pvid, 1, packets tagged with VID 0 are + # accepted. + 8021p_do 0 + + # Test that packets tagged with VID 0 are still accepted after changing + # the default_pvid. + ip link set br0 type bridge vlan_default_pvid 10 + 8021p_do 0 + + log_test "Reception of 802.1p-tagged traffic" + + ip link set $h2 promisc off + tc qdisc del dev $h2 clsact +} + +send_untagged_and_8021p() +{ + ping_do $h1 192.0.2.2 + check_fail $? + + 8021p_do 1 +} + +drop_untagged() +{ + RET=0 + + tc qdisc add dev $h2 clsact + ip link set $h2 promisc on + + # Test that with no PVID, untagged and 802.1p-tagged traffic is + # dropped. + ip link set br0 type bridge vlan_default_pvid 1 + + # First we reconfigure the default_pvid, 1, as a non-PVID VLAN. + bridge vlan add dev $swp1 vid 1 untagged + send_untagged_and_8021p + bridge vlan add dev $swp1 vid 1 pvid untagged + + # Next we try to delete VID 1 altogether + bridge vlan del dev $swp1 vid 1 + send_untagged_and_8021p + bridge vlan add dev $swp1 vid 1 pvid untagged + + # Set up the bridge without a default_pvid, then check that the 8021q + # module, when the bridge port goes down and then up again, does not + # accidentally re-enable untagged packet reception. + ip link set br0 type bridge vlan_default_pvid 0 + ip link set $swp1 down + ip link set $swp1 up + setup_wait + send_untagged_and_8021p + + # Remove swp1 as a bridge port and let it rejoin the bridge while it + # has no default_pvid. + ip link set $swp1 nomaster + ip link set $swp1 master br0 + send_untagged_and_8021p + + # Restore settings + ip link set br0 type bridge vlan_default_pvid 1 + + log_test "Dropping of untagged and 802.1p-tagged traffic with no PVID" + + ip link set $h2 promisc off + tc qdisc del dev $h2 clsact +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/tc_taprio.sh b/tools/testing/selftests/net/forwarding/tc_taprio.sh new file mode 100755 index 000000000000..8992aeabfe0b --- /dev/null +++ b/tools/testing/selftests/net/forwarding/tc_taprio.sh @@ -0,0 +1,421 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" \ + test_clock_jump_backward \ + test_taprio_after_ptp \ + test_max_sdu \ + test_clock_jump_backward_forward \ +" +NUM_NETIFS=4 +source tc_common.sh +source lib.sh +source tsn_lib.sh + +require_command python3 + +# The test assumes the usual topology from the README, where h1 is connected to +# swp1, h2 to swp2, and swp1 and swp2 are together in a bridge. +# Additional assumption: h1 and h2 use the same PHC, and so do swp1 and swp2. +# By synchronizing h1 to swp1 via PTP, h2 is also implicitly synchronized to +# swp1 (and both to CLOCK_REALTIME). +h1=${NETIFS[p1]} +swp1=${NETIFS[p2]} +swp2=${NETIFS[p3]} +h2=${NETIFS[p4]} + +UDS_ADDRESS_H1="/var/run/ptp4l_h1" +UDS_ADDRESS_SWP1="/var/run/ptp4l_swp1" + +H1_IPV4="192.0.2.1" +H2_IPV4="192.0.2.2" +H1_IPV6="2001:db8:1::1" +H2_IPV6="2001:db8:1::2" + +# Tunables +NUM_PKTS=100 +STREAM_VID=10 +STREAM_PRIO_1=6 +STREAM_PRIO_2=5 +STREAM_PRIO_3=4 +# PTP uses TC 0 +ALL_GATES=$((1 << 0 | 1 << STREAM_PRIO_1 | 1 << STREAM_PRIO_2)) +# Use a conservative cycle of 10 ms to allow the test to still pass when the +# kernel has some extra overhead like lockdep etc +CYCLE_TIME_NS=10000000 +# Create two Gate Control List entries, one OPEN and one CLOSE, of equal +# durations +GATE_DURATION_NS=$((CYCLE_TIME_NS / 2)) +# Give 2/3 of the cycle time to user space and 1/3 to the kernel +FUDGE_FACTOR=$((CYCLE_TIME_NS / 3)) +# Shift the isochron base time by half the gate time, so that packets are +# always received by swp1 close to the middle of the time slot, to minimize +# inaccuracies due to network sync +SHIFT_TIME_NS=$((GATE_DURATION_NS / 2)) + +path_delay= + +h1_create() +{ + simple_if_init $h1 $H1_IPV4/24 $H1_IPV6/64 +} + +h1_destroy() +{ + simple_if_fini $h1 $H1_IPV4/24 $H1_IPV6/64 +} + +h2_create() +{ + simple_if_init $h2 $H2_IPV4/24 $H2_IPV6/64 +} + +h2_destroy() +{ + simple_if_fini $h2 $H2_IPV4/24 $H2_IPV6/64 +} + +switch_create() +{ + local h2_mac_addr=$(mac_get $h2) + + ip link set $swp1 up + ip link set $swp2 up + + ip link add br0 type bridge vlan_filtering 1 + ip link set $swp1 master br0 + ip link set $swp2 master br0 + ip link set br0 up + + bridge vlan add dev $swp2 vid $STREAM_VID + bridge vlan add dev $swp1 vid $STREAM_VID + bridge fdb add dev $swp2 \ + $h2_mac_addr vlan $STREAM_VID static master +} + +switch_destroy() +{ + ip link del br0 +} + +ptp_setup() +{ + # Set up swp1 as a master PHC for h1, synchronized to the local + # CLOCK_REALTIME. + phc2sys_start $UDS_ADDRESS_SWP1 + ptp4l_start $h1 true $UDS_ADDRESS_H1 + ptp4l_start $swp1 false $UDS_ADDRESS_SWP1 +} + +ptp_cleanup() +{ + ptp4l_stop $swp1 + ptp4l_stop $h1 + phc2sys_stop +} + +txtime_setup() +{ + local if_name=$1 + + tc qdisc add dev $if_name clsact + # Classify PTP on TC 7 and isochron on TC 6 + tc filter add dev $if_name egress protocol 0x88f7 \ + flower action skbedit priority 7 + tc filter add dev $if_name egress protocol 802.1Q \ + flower vlan_ethtype 0xdead action skbedit priority 6 + tc qdisc add dev $if_name handle 100: parent root mqprio num_tc 8 \ + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \ + map 0 1 2 3 4 5 6 7 \ + hw 1 + # Set up TC 5, 6, 7 for SO_TXTIME. tc-mqprio queues count from 1. + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_1 + 1)) etf \ + clockid CLOCK_TAI offload delta $FUDGE_FACTOR + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_2 + 1)) etf \ + clockid CLOCK_TAI offload delta $FUDGE_FACTOR + tc qdisc replace dev $if_name parent 100:$((STREAM_PRIO_3 + 1)) etf \ + clockid CLOCK_TAI offload delta $FUDGE_FACTOR +} + +txtime_cleanup() +{ + local if_name=$1 + + tc qdisc del dev $if_name clsact + tc qdisc del dev $if_name root +} + +taprio_replace() +{ + local if_name="$1"; shift + local extra_args="$1"; shift + + # STREAM_PRIO_1 always has an open gate. + # STREAM_PRIO_2 has a gate open for GATE_DURATION_NS (half the cycle time) + # STREAM_PRIO_3 always has a closed gate. + tc qdisc replace dev $if_name root stab overhead 24 taprio num_tc 8 \ + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 \ + map 0 1 2 3 4 5 6 7 \ + sched-entry S $(printf "%x" $ALL_GATES) $GATE_DURATION_NS \ + sched-entry S $(printf "%x" $((ALL_GATES & ~(1 << STREAM_PRIO_2)))) $GATE_DURATION_NS \ + base-time 0 flags 0x2 $extra_args + taprio_wait_for_admin $if_name +} + +taprio_cleanup() +{ + local if_name=$1 + + tc qdisc del dev $if_name root +} + +probe_path_delay() +{ + local isochron_dat="$(mktemp)" + local received + + log_info "Probing path delay" + + isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" 0 \ + "$CYCLE_TIME_NS" "" "" "$NUM_PKTS" \ + "$STREAM_VID" "$STREAM_PRIO_1" "" "$isochron_dat" + + received=$(isochron_report_num_received "$isochron_dat") + if [ "$received" != "$NUM_PKTS" ]; then + echo "Cannot establish basic data path between $h1 and $h2" + exit $ksft_fail + fi + + printf "pdelay = {}\n" > isochron_data.py + isochron report --input-file "$isochron_dat" \ + --printf-format "pdelay[%u] = %d - %d\n" \ + --printf-args "qRT" \ + >> isochron_data.py + cat <<-'EOF' > isochron_postprocess.py + #!/usr/bin/env python3 + + from isochron_data import pdelay + import numpy as np + + w = np.array(list(pdelay.values())) + print("{}".format(np.max(w))) + EOF + path_delay=$(python3 ./isochron_postprocess.py) + + log_info "Path delay from $h1 to $h2 estimated at $path_delay ns" + + if [ "$path_delay" -gt "$GATE_DURATION_NS" ]; then + echo "Path delay larger than gate duration, aborting" + exit $ksft_fail + fi + + rm -f ./isochron_data.py 2> /dev/null + rm -f ./isochron_postprocess.py 2> /dev/null + rm -f "$isochron_dat" 2> /dev/null +} + +setup_prepare() +{ + vrf_prepare + + h1_create + h2_create + switch_create + + txtime_setup $h1 + + # Temporarily set up PTP just to probe the end-to-end path delay. + ptp_setup + probe_path_delay + ptp_cleanup +} + +cleanup() +{ + pre_cleanup + + isochron_recv_stop + txtime_cleanup $h1 + + switch_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +run_test() +{ + local base_time=$1; shift + local stream_prio=$1; shift + local expected_delay=$1; shift + local should_fail=$1; shift + local test_name=$1; shift + local isochron_dat="$(mktemp)" + local received + local median_delay + + RET=0 + + # Set the shift time equal to the cycle time, which effectively + # cancels the default advance time. Packets won't be sent early in + # software, which ensures that they won't prematurely enter through + # the open gate in __test_out_of_band(). Also, the gate is open for + # long enough that this won't cause a problem in __test_in_band(). + isochron_do "$h1" "$h2" "$UDS_ADDRESS_H1" "" "$base_time" \ + "$CYCLE_TIME_NS" "$SHIFT_TIME_NS" "$GATE_DURATION_NS" \ + "$NUM_PKTS" "$STREAM_VID" "$stream_prio" "" "$isochron_dat" + + received=$(isochron_report_num_received "$isochron_dat") + [ "$received" = "$NUM_PKTS" ] + check_err_fail $should_fail $? "Reception of $NUM_PKTS packets" + + if [ $should_fail = 0 ] && [ "$received" = "$NUM_PKTS" ]; then + printf "pdelay = {}\n" > isochron_data.py + isochron report --input-file "$isochron_dat" \ + --printf-format "pdelay[%u] = %d - %d\n" \ + --printf-args "qRT" \ + >> isochron_data.py + cat <<-'EOF' > isochron_postprocess.py + #!/usr/bin/env python3 + + from isochron_data import pdelay + import numpy as np + + w = np.array(list(pdelay.values())) + print("{}".format(int(np.median(w)))) + EOF + median_delay=$(python3 ./isochron_postprocess.py) + + # If the condition below is true, packets were delayed by a closed gate + [ "$median_delay" -gt $((path_delay + expected_delay)) ] + check_fail $? "Median delay $median_delay is greater than expected delay $expected_delay plus path delay $path_delay" + + # If the condition below is true, packets were sent expecting them to + # hit a closed gate in the switch, but were not delayed + [ "$expected_delay" -gt 0 ] && [ "$median_delay" -lt "$expected_delay" ] + check_fail $? "Median delay $median_delay is less than expected delay $expected_delay" + fi + + log_test "$test_name" + + rm -f ./isochron_data.py 2> /dev/null + rm -f ./isochron_postprocess.py 2> /dev/null + rm -f "$isochron_dat" 2> /dev/null +} + +__test_always_open() +{ + run_test 0.000000000 $STREAM_PRIO_1 0 0 "Gate always open" +} + +__test_always_closed() +{ + run_test 0.000000000 $STREAM_PRIO_3 0 1 "Gate always closed" +} + +__test_in_band() +{ + # Send packets in-band with the OPEN gate entry + run_test 0.000000000 $STREAM_PRIO_2 0 0 "In band with gate" +} + +__test_out_of_band() +{ + # Send packets in-band with the CLOSE gate entry + run_test 0.005000000 $STREAM_PRIO_2 \ + $((GATE_DURATION_NS - SHIFT_TIME_NS)) 0 \ + "Out of band with gate" +} + +run_subtests() +{ + __test_always_open + __test_always_closed + __test_in_band + __test_out_of_band +} + +test_taprio_after_ptp() +{ + log_info "Setting up taprio after PTP" + ptp_setup + taprio_replace $swp2 + run_subtests + taprio_cleanup $swp2 + ptp_cleanup +} + +__test_under_max_sdu() +{ + # Limit max-sdu for STREAM_PRIO_1 + taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 100 0" + run_test 0.000000000 $STREAM_PRIO_1 0 0 "Under maximum SDU" +} + +__test_over_max_sdu() +{ + # Limit max-sdu for STREAM_PRIO_1 + taprio_replace "$swp2" "max-sdu 0 0 0 0 0 0 20 0" + run_test 0.000000000 $STREAM_PRIO_1 0 1 "Over maximum SDU" +} + +test_max_sdu() +{ + ptp_setup + __test_under_max_sdu + __test_over_max_sdu + taprio_cleanup $swp2 + ptp_cleanup +} + +# Perform a clock jump in the past without synchronization running, so that the +# time base remains where it was set by phc_ctl. +test_clock_jump_backward() +{ + # This is a more complex schedule specifically crafted in a way that + # has been problematic on NXP LS1028A. Not much to test with it other + # than the fact that it passes traffic. + tc qdisc replace dev $swp2 root stab overhead 24 taprio num_tc 8 \ + queues 1@0 1@1 1@2 1@3 1@4 1@5 1@6 1@7 map 0 1 2 3 4 5 6 7 \ + base-time 0 sched-entry S 20 300000 sched-entry S 10 200000 \ + sched-entry S 20 300000 sched-entry S 48 200000 \ + sched-entry S 20 300000 sched-entry S 83 200000 \ + sched-entry S 40 300000 sched-entry S 00 200000 flags 2 + + log_info "Forcing a backward clock jump" + phc_ctl $swp1 set 0 + + ping_test $h1 192.0.2.2 + taprio_cleanup $swp2 +} + +# Test that taprio tolerates clock jumps. +# Since ptp4l and phc2sys are running, it is expected for the time to +# eventually recover (through yet another clock jump). Isochron waits +# until that is the case. +test_clock_jump_backward_forward() +{ + log_info "Forcing a backward and a forward clock jump" + taprio_replace $swp2 + phc_ctl $swp1 set 0 + ptp_setup + ping_test $h1 192.0.2.2 + run_subtests + ptp_cleanup + taprio_cleanup $swp2 +} + +tc_offload_check +if [[ $? -ne 0 ]]; then + log_test_skip "Could not test offloaded functionality" + exit $EXIT_STATUS +fi + +trap cleanup EXIT + +setup_prepare +setup_wait +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/tsn_lib.sh b/tools/testing/selftests/net/forwarding/tsn_lib.sh index b91bcd8008a9..08c044ff6689 100644 --- a/tools/testing/selftests/net/forwarding/tsn_lib.sh +++ b/tools/testing/selftests/net/forwarding/tsn_lib.sh @@ -2,6 +2,8 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright 2021-2022 NXP +tc_testing_scripts_dir=$(dirname $0)/../../tc-testing/scripts + REQUIRE_ISOCHRON=${REQUIRE_ISOCHRON:=yes} REQUIRE_LINUXPTP=${REQUIRE_LINUXPTP:=yes} @@ -18,6 +20,7 @@ fi if [[ "$REQUIRE_LINUXPTP" = "yes" ]]; then require_command phc2sys require_command ptp4l + require_command phc_ctl fi phc2sys_start() @@ -182,6 +185,7 @@ isochron_do() local base_time=$1; shift local cycle_time=$1; shift local shift_time=$1; shift + local window_size=$1; shift local num_pkts=$1; shift local vid=$1; shift local priority=$1; shift @@ -212,6 +216,10 @@ isochron_do() extra_args="${extra_args} --shift-time=${shift_time}" fi + if ! [ -z "${window_size}" ]; then + extra_args="${extra_args} --window-size=${window_size}" + fi + if [ "${use_l2}" = "true" ]; then extra_args="${extra_args} --l2 --etype=0xdead ${vid}" receiver_extra_args="--l2 --etype=0xdead" @@ -247,3 +255,21 @@ isochron_do() cpufreq_restore ${ISOCHRON_CPU} } + +isochron_report_num_received() +{ + local isochron_dat=$1; shift + + # Count all received packets by looking at the non-zero RX timestamps + isochron report \ + --input-file "${isochron_dat}" \ + --printf-format "%u\n" --printf-args "R" | \ + grep -w -v '0' | wc -l +} + +taprio_wait_for_admin() +{ + local if_name="$1"; shift + + "$tc_testing_scripts_dir/taprio_wait_for_admin.sh" "$(which tc)" "$if_name" +} diff --git a/tools/testing/selftests/net/gre_ipv6_lladdr.sh b/tools/testing/selftests/net/gre_ipv6_lladdr.sh new file mode 100755 index 000000000000..5b34f6e1f831 --- /dev/null +++ b/tools/testing/selftests/net/gre_ipv6_lladdr.sh @@ -0,0 +1,177 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source ./lib.sh + +PAUSE_ON_FAIL="no" + +# The trap function handler +# +exit_cleanup_all() +{ + cleanup_all_ns + + exit "${EXIT_STATUS}" +} + +# Add fake IPv4 and IPv6 networks on the loopback device, to be used as +# underlay by future GRE devices. +# +setup_basenet() +{ + ip -netns "${NS0}" link set dev lo up + ip -netns "${NS0}" address add dev lo 192.0.2.10/24 + ip -netns "${NS0}" address add dev lo 2001:db8::10/64 nodad +} + +# Check if network device has an IPv6 link-local address assigned. +# +# Parameters: +# +# * $1: The network device to test +# * $2: An extra regular expression that should be matched (to verify the +# presence of extra attributes) +# * $3: The expected return code from grep (to allow checking the absence of +# a link-local address) +# * $4: The user visible name for the scenario being tested +# +check_ipv6_ll_addr() +{ + local DEV="$1" + local EXTRA_MATCH="$2" + local XRET="$3" + local MSG="$4" + + RET=0 + set +e + ip -netns "${NS0}" -6 address show dev "${DEV}" scope link | grep "fe80::" | grep -q "${EXTRA_MATCH}" + check_err_fail "${XRET}" $? "" + log_test "${MSG}" + set -e +} + +# Create a GRE device and verify that it gets an IPv6 link-local address as +# expected. +# +# Parameters: +# +# * $1: The device type (gre, ip6gre, gretap or ip6gretap) +# * $2: The local underlay IP address (can be an IPv4, an IPv6 or "any") +# * $3: The remote underlay IP address (can be an IPv4, an IPv6 or "any") +# * $4: The IPv6 interface identifier generation mode to use for the GRE +# device (eui64, none, stable-privacy or random). +# +test_gre_device() +{ + local GRE_TYPE="$1" + local LOCAL_IP="$2" + local REMOTE_IP="$3" + local MODE="$4" + local ADDR_GEN_MODE + local MATCH_REGEXP + local MSG + + ip link add netns "${NS0}" name gretest type "${GRE_TYPE}" local "${LOCAL_IP}" remote "${REMOTE_IP}" + + case "${MODE}" in + "eui64") + ADDR_GEN_MODE=0 + MATCH_REGEXP="" + MSG="${GRE_TYPE}, mode: 0 (EUI64), ${LOCAL_IP} -> ${REMOTE_IP}" + XRET=0 + ;; + "none") + ADDR_GEN_MODE=1 + MATCH_REGEXP="" + MSG="${GRE_TYPE}, mode: 1 (none), ${LOCAL_IP} -> ${REMOTE_IP}" + XRET=1 # No link-local address should be generated + ;; + "stable-privacy") + ADDR_GEN_MODE=2 + MATCH_REGEXP="stable-privacy" + MSG="${GRE_TYPE}, mode: 2 (stable privacy), ${LOCAL_IP} -> ${REMOTE_IP}" + XRET=0 + # Initialise stable_secret (required for stable-privacy mode) + ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.stable_secret="2001:db8::abcd" + ;; + "random") + ADDR_GEN_MODE=3 + MATCH_REGEXP="stable-privacy" + MSG="${GRE_TYPE}, mode: 3 (random), ${LOCAL_IP} -> ${REMOTE_IP}" + XRET=0 + ;; + esac + + # Check that IPv6 link-local address is generated when device goes up + ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}" + ip -netns "${NS0}" link set dev gretest up + check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "config: ${MSG}" + + # Now disable link-local address generation + ip -netns "${NS0}" link set dev gretest down + ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode=1 + ip -netns "${NS0}" link set dev gretest up + + # Check that link-local address generation works when re-enabled while + # the device is already up + ip netns exec "${NS0}" sysctl -qw net.ipv6.conf.gretest.addr_gen_mode="${ADDR_GEN_MODE}" + check_ipv6_ll_addr gretest "${MATCH_REGEXP}" "${XRET}" "update: ${MSG}" + + ip -netns "${NS0}" link del dev gretest +} + +test_gre4() +{ + local GRE_TYPE + local MODE + + for GRE_TYPE in "gre" "gretap"; do + printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n" + + for MODE in "eui64" "none" "stable-privacy" "random"; do + test_gre_device "${GRE_TYPE}" 192.0.2.10 192.0.2.11 "${MODE}" + test_gre_device "${GRE_TYPE}" any 192.0.2.11 "${MODE}" + test_gre_device "${GRE_TYPE}" 192.0.2.10 any "${MODE}" + done + done +} + +test_gre6() +{ + local GRE_TYPE + local MODE + + for GRE_TYPE in "ip6gre" "ip6gretap"; do + printf "\n####\nTesting IPv6 link-local address generation on ${GRE_TYPE} devices\n####\n\n" + + for MODE in "eui64" "none" "stable-privacy" "random"; do + test_gre_device "${GRE_TYPE}" 2001:db8::10 2001:db8::11 "${MODE}" + test_gre_device "${GRE_TYPE}" any 2001:db8::11 "${MODE}" + test_gre_device "${GRE_TYPE}" 2001:db8::10 any "${MODE}" + done + done +} + +usage() +{ + echo "Usage: $0 [-p]" + exit 1 +} + +while getopts :p o +do + case $o in + p) PAUSE_ON_FAIL="yes";; + *) usage;; + esac +done + +setup_ns NS0 + +set -e +trap exit_cleanup_all EXIT + +setup_basenet + +test_gre4 +test_gre6 diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index e26bbc169783..a951c0d33cd2 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -352,5 +352,226 @@ "$TC qdisc del dev $DUMMY handle 1:0 root", "$IP addr del 10.10.10.10/24 dev $DUMMY || true" ] + }, + { + "id": "90ec", + "name": "Test DRR's enqueue reentrant behaviour with netem", + "category": [ + "qdisc", + "drr" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY handle 1:0 root drr", + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 drr", + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" + ], + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", + "expExitCode": "0", + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", + "matchJSON": [ + { + "kind": "drr", + "handle": "1:", + "bytes": 196, + "packets": 2 + } + ], + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1:0 root", + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" + ] + }, + { + "id": "1f1f", + "name": "Test ETS's enqueue reentrant behaviour with netem", + "category": [ + "qdisc", + "ets" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY handle 1:0 root ets bands 2", + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 ets quantum 1500", + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" + ], + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", + "expExitCode": "0", + "verifyCmd": "$TC -j -s class show dev $DUMMY", + "matchJSON": [ + { + "class": "ets", + "handle": "1:1", + "stats": { + "bytes": 196, + "packets": 2 + } + } + ], + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1:0 root", + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" + ] + }, + { + "id": "5e6d", + "name": "Test QFQ's enqueue reentrant behaviour with netem", + "category": [ + "qdisc", + "qfq" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY handle 1:0 root qfq", + "$TC class replace dev $DUMMY parent 1:0 classid 1:1 qfq weight 100 maxpkt 1500", + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1" + ], + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", + "expExitCode": "0", + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", + "matchJSON": [ + { + "kind": "qfq", + "handle": "1:", + "bytes": 196, + "packets": 2 + } + ], + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1:0 root", + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" + ] + }, + { + "id": "bf1d", + "name": "Test HFSC's enqueue reentrant behaviour with netem", + "category": [ + "qdisc", + "hfsc" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY handle 1:0 root hfsc", + "$TC class add dev $DUMMY parent 1:0 classid 1:1 hfsc ls m2 10Mbit", + "$TC qdisc add dev $DUMMY parent 1:1 handle 2:0 netem duplicate 100%", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip dst 10.10.10.1/32 flowid 1:1", + "$TC class add dev $DUMMY parent 1:0 classid 1:2 hfsc ls m2 10Mbit", + "$TC qdisc add dev $DUMMY parent 1:2 handle 3:0 netem duplicate 100%", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 2 u32 match ip dst 10.10.10.2/32 flowid 1:2", + "ping -c 1 10.10.10.1 -I$DUMMY > /dev/null || true", + "$TC filter del dev $DUMMY parent 1:0 protocol ip prio 1", + "$TC class del dev $DUMMY classid 1:1" + ], + "cmdUnderTest": "ping -c 1 10.10.10.2 -I$DUMMY > /dev/null || true", + "expExitCode": "0", + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", + "matchJSON": [ + { + "kind": "hfsc", + "handle": "1:", + "bytes": 392, + "packets": 4 + } + ], + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1:0 root", + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" + ] + }, + { + "id": "7c3b", + "name": "Test nested DRR's enqueue reentrant behaviour with netem", + "category": [ + "qdisc", + "drr" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link set dev $DUMMY up || true", + "$IP addr add 10.10.10.10/24 dev $DUMMY || true", + "$TC qdisc add dev $DUMMY handle 1:0 root drr", + "$TC class add dev $DUMMY parent 1:0 classid 1:1 drr", + "$TC filter add dev $DUMMY parent 1:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 1:1", + "$TC qdisc add dev $DUMMY handle 2:0 parent 1:1 drr", + "$TC class add dev $DUMMY classid 2:1 parent 2:0 drr", + "$TC filter add dev $DUMMY parent 2:0 protocol ip prio 1 u32 match ip protocol 1 0xff flowid 2:1", + "$TC qdisc add dev $DUMMY parent 2:1 handle 3:0 netem duplicate 100%" + ], + "cmdUnderTest": "ping -c 1 -I $DUMMY 10.10.10.1 > /dev/null || true", + "expExitCode": "0", + "verifyCmd": "$TC -j -s qdisc ls dev $DUMMY handle 1:0", + "matchJSON": [ + { + "kind": "drr", + "handle": "1:", + "bytes": 196, + "packets": 2 + } + ], + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1:0 root", + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" + ] + }, + { + "id": "62c4", + "name": "Test HTB with FQ_CODEL - basic functionality", + "category": [ + "qdisc", + "htb", + "fq_codel" + ], + "plugins": { + "requires": [ + "nsPlugin", + "scapyPlugin" + ] + }, + "setup": [ + "$TC qdisc add dev $DEV1 root handle 1: htb default 11", + "$TC class add dev $DEV1 parent 1: classid 1:1 htb rate 10kbit", + "$TC class add dev $DEV1 parent 1:1 classid 1:11 htb rate 10kbit prio 0 quantum 1486", + "$TC qdisc add dev $DEV1 parent 1:11 fq_codel quantum 300 noecn", + "sleep 0.5" + ], + "scapy": { + "iface": "$DEV0", + "count": 5, + "packet": "Ether()/IP(dst='10.10.10.1', src='10.10.10.10')/TCP(sport=12345, dport=80)" + }, + "cmdUnderTest": "$TC -s qdisc show dev $DEV1", + "expExitCode": "0", + "verifyCmd": "$TC -s qdisc show dev $DEV1 | grep -A 5 'qdisc fq_codel'", + "matchPattern": "Sent [0-9]+ bytes [0-9]+ pkt", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DEV1 handle 1: root" + ] } ] diff --git a/tools/testing/selftests/ublk/Makefile b/tools/testing/selftests/ublk/Makefile index ec4624a283bc..f34ac0bac696 100644 --- a/tools/testing/selftests/ublk/Makefile +++ b/tools/testing/selftests/ublk/Makefile @@ -9,6 +9,7 @@ TEST_PROGS += test_generic_03.sh TEST_PROGS += test_generic_04.sh TEST_PROGS += test_generic_05.sh TEST_PROGS += test_generic_06.sh +TEST_PROGS += test_generic_07.sh TEST_PROGS += test_null_01.sh TEST_PROGS += test_null_02.sh diff --git a/tools/testing/selftests/ublk/kublk.c b/tools/testing/selftests/ublk/kublk.c index e57a1486bb48..842b40736a9b 100644 --- a/tools/testing/selftests/ublk/kublk.c +++ b/tools/testing/selftests/ublk/kublk.c @@ -536,12 +536,17 @@ int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag) if (!(io->flags & UBLKSRV_IO_FREE)) return 0; - /* we issue because we need either fetching or committing */ + /* + * we issue because we need either fetching or committing or + * getting data + */ if (!(io->flags & - (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP))) + (UBLKSRV_NEED_FETCH_RQ | UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_NEED_GET_DATA))) return 0; - if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) + if (io->flags & UBLKSRV_NEED_GET_DATA) + cmd_op = UBLK_U_IO_NEED_GET_DATA; + else if (io->flags & UBLKSRV_NEED_COMMIT_RQ_COMP) cmd_op = UBLK_U_IO_COMMIT_AND_FETCH_REQ; else if (io->flags & UBLKSRV_NEED_FETCH_RQ) cmd_op = UBLK_U_IO_FETCH_REQ; @@ -658,6 +663,9 @@ static void ublk_handle_cqe(struct io_uring *r, assert(tag < q->q_depth); if (q->tgt_ops->queue_io) q->tgt_ops->queue_io(q, tag); + } else if (cqe->res == UBLK_IO_RES_NEED_GET_DATA) { + io->flags |= UBLKSRV_NEED_GET_DATA | UBLKSRV_IO_FREE; + ublk_queue_io_cmd(q, io, tag); } else { /* * COMMIT_REQ will be completed immediately since no fetching @@ -1237,7 +1245,7 @@ static void __cmd_create_help(char *exe, bool recovery) printf("%s %s -t [null|loop|stripe|fault_inject] [-q nr_queues] [-d depth] [-n dev_id]\n", exe, recovery ? "recover" : "add"); - printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g 0|1]\n"); + printf("\t[--foreground] [--quiet] [-z] [--debug_mask mask] [-r 0|1 ] [-g]\n"); printf("\t[-e 0|1 ] [-i 0|1]\n"); printf("\t[target options] [backfile1] [backfile2] ...\n"); printf("\tdefault: nr_queues=2(max 32), depth=128(max 1024), dev_id=-1(auto allocation)\n"); @@ -1313,7 +1321,7 @@ int main(int argc, char *argv[]) opterr = 0; optind = 2; - while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:az", + while ((opt = getopt_long(argc, argv, "t:n:d:q:r:e:i:gaz", longopts, &option_idx)) != -1) { switch (opt) { case 'a': @@ -1351,9 +1359,7 @@ int main(int argc, char *argv[]) ctx.flags |= UBLK_F_USER_RECOVERY | UBLK_F_USER_RECOVERY_REISSUE; break; case 'g': - value = strtol(optarg, NULL, 10); - if (value) - ctx.flags |= UBLK_F_NEED_GET_DATA; + ctx.flags |= UBLK_F_NEED_GET_DATA; break; case 0: if (!strcmp(longopts[option_idx].name, "debug_mask")) diff --git a/tools/testing/selftests/ublk/kublk.h b/tools/testing/selftests/ublk/kublk.h index 918db5cd633f..44ee1e4ac55b 100644 --- a/tools/testing/selftests/ublk/kublk.h +++ b/tools/testing/selftests/ublk/kublk.h @@ -115,6 +115,7 @@ struct ublk_io { #define UBLKSRV_NEED_FETCH_RQ (1UL << 0) #define UBLKSRV_NEED_COMMIT_RQ_COMP (1UL << 1) #define UBLKSRV_IO_FREE (1UL << 2) +#define UBLKSRV_NEED_GET_DATA (1UL << 3) unsigned short flags; unsigned short refs; /* used by target code only */ diff --git a/tools/testing/selftests/ublk/test_generic_07.sh b/tools/testing/selftests/ublk/test_generic_07.sh new file mode 100755 index 000000000000..cba86451fa5e --- /dev/null +++ b/tools/testing/selftests/ublk/test_generic_07.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="generic_07" +ERR_CODE=0 + +if ! _have_program fio; then + exit "$UBLK_SKIP_CODE" +fi + +_prep_test "generic" "test UBLK_F_NEED_GET_DATA" + +_create_backfile 0 256M +dev_id=$(_add_ublk_dev -t loop -q 2 -g "${UBLK_BACKFILES[0]}") +_check_add_dev $TID $? + +# run fio over the ublk disk +_run_fio_verify_io --filename=/dev/ublkb"${dev_id}" --size=256M +ERR_CODE=$? +if [ "$ERR_CODE" -eq 0 ]; then + _mkfs_mount_test /dev/ublkb"${dev_id}" + ERR_CODE=$? +fi + +_cleanup_test "generic" +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/ublk/test_stress_05.sh b/tools/testing/selftests/ublk/test_stress_05.sh index a7071b10224d..88601b48f1cd 100755 --- a/tools/testing/selftests/ublk/test_stress_05.sh +++ b/tools/testing/selftests/ublk/test_stress_05.sh @@ -47,15 +47,15 @@ _create_backfile 0 256M _create_backfile 1 256M for reissue in $(seq 0 1); do - ublk_io_and_remove 8G -t null -q 4 -g 1 -r 1 -i "$reissue" & - ublk_io_and_remove 256M -t loop -q 4 -g 1 -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & + ublk_io_and_remove 8G -t null -q 4 -g -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g -r 1 -i "$reissue" "${UBLK_BACKFILES[0]}" & wait done if _have_feature "ZERO_COPY"; then for reissue in $(seq 0 1); do - ublk_io_and_remove 8G -t null -q 4 -g 1 -z -r 1 -i "$reissue" & - ublk_io_and_remove 256M -t loop -q 4 -g 1 -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & + ublk_io_and_remove 8G -t null -q 4 -g -z -r 1 -i "$reissue" & + ublk_io_and_remove 256M -t loop -q 4 -g -z -r 1 -i "$reissue" "${UBLK_BACKFILES[1]}" & wait done fi |