diff options
author | Sean Paul <seanpaul@chromium.org> | 2017-05-04 15:38:21 +0300 |
---|---|---|
committer | Sean Paul <seanpaul@chromium.org> | 2017-05-04 15:42:49 +0300 |
commit | 3c390df3337e54130e4b511ea3bbb868643cc5ea (patch) | |
tree | 3963984db2528843edcf2a2bb9f281fd150ab784 | |
parent | cd4b11d9d524ef457433d42c121e3405765d4a29 (diff) | |
parent | 8b03d1ed2c43a2ba5ef3381322ee4515b97381bf (diff) | |
download | linux-3c390df3337e54130e4b511ea3bbb868643cc5ea.tar.xz |
Merge tag 'drm-for-v4.12' of git://people.freedesktop.org/~airlied/linux into drm-misc-next
Backmerging Dave's 'drm-for-v4.12' pull request now that it's landed. There are
a bunch of non-drm changes which are just random bits we hadn't yet picked up in
misc-next.
main drm pull request for 4.12 kernel
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: http://patchwork.freedesktop.org/patch/msgid/CAPM=9ty0jHgzG18zOr5CYODyTqZfH55kOCOFqNnXiWnTb_uNWw@mail.gmail.com
762 files changed, 16518 insertions, 7672 deletions
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Mark Brown <broonie@sirena.org.uk> +Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> +Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Matthieu CASTET <castet.matthieu@free.fr> Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br> Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> @@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> Takashi YOSHII <takashi.yoshii.zj@renesas.com> +Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com> Yusuke Goda <goda.yusuke@renesas.com> Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <padovan@profusion.mobi> diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt new file mode 100644 index 000000000000..f6b3f36d422b --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt @@ -0,0 +1,75 @@ +Renesas Gen3 DWC HDMI TX Encoder +================================ + +The HDMI transmitter is a Synopsys DesignWare HDMI 1.4 TX controller IP +with a companion PHY IP. + +These DT bindings follow the Synopsys DWC HDMI TX bindings defined in +Documentation/devicetree/bindings/display/bridge/dw_hdmi.txt with the +following device-specific properties. + + +Required properties: + +- compatible : Shall contain one or more of + - "renesas,r8a7795-hdmi" for R8A7795 (R-Car H3) compatible HDMI TX + - "renesas,rcar-gen3-hdmi" for the generic R-Car Gen3 compatible HDMI TX + + When compatible with generic versions, nodes must list the SoC-specific + version corresponding to the platform first, followed by the + family-specific version. + +- reg: See dw_hdmi.txt. +- interrupts: HDMI interrupt number +- clocks: See dw_hdmi.txt. +- clock-names: Shall contain "iahb" and "isfr" as defined in dw_hdmi.txt. +- ports: See dw_hdmi.txt. The DWC HDMI shall have one port numbered 0 + corresponding to the video input of the controller and one port numbered 1 + corresponding to its HDMI output. Each port shall have a single endpoint. + +Optional properties: + +- power-domains: Shall reference the power domain that contains the DWC HDMI, + if any. + + +Example: + + hdmi0: hdmi0@fead0000 { + compatible = "renesas,r8a7795-dw-hdmi"; + reg = <0 0xfead0000 0 0x10000>; + interrupts = <0 389 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_CORE R8A7795_CLK_S0D4>, <&cpg CPG_MOD 729>; + clock-names = "iahb", "isfr"; + power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + dw_hdmi0_in: endpoint { + remote-endpoint = <&du_out_hdmi0>; + }; + }; + port@1 { + reg = <1>; + rcar_dw_hdmi0_out: endpoint { + remote-endpoint = <&hdmi0_con>; + }; + }; + }; + }; + + hdmi0-out { + compatible = "hdmi-connector"; + label = "HDMI0 OUT"; + type = "a"; + + port { + hdmi0_con: endpoint { + remote-endpoint = <&rcar_dw_hdmi0_out>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt index 708f5664a316..383183a89164 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt @@ -40,6 +40,7 @@ Required properties (all function blocks): "mediatek,<chip>-dpi" - DPI controller, see mediatek,dpi.txt "mediatek,<chip>-disp-mutex" - display mutex "mediatek,<chip>-disp-od" - overdrive + the supported chips are mt2701 and mt8173. - reg: Physical base address and length of the function block register space - interrupts: The interrupt signal from the function block (required, except for merge and split function blocks). @@ -54,6 +55,7 @@ Required properties (DMA function blocks): "mediatek,<chip>-disp-ovl" "mediatek,<chip>-disp-rdma" "mediatek,<chip>-disp-wdma" + the supported chips are mt2701 and mt8173. - larb: Should contain a phandle pointing to the local arbiter device as defined in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt - iommus: Should point to the respective IOMMU block with master port as diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt index 2b1585a34b85..fadf327c7cdf 100644 --- a/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt +++ b/Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt @@ -7,6 +7,7 @@ channel output. Required properties: - compatible: "mediatek,<chip>-dsi" + the supported chips are mt2701 and mt8173. - reg: Physical base address and length of the controller's registers - interrupts: The interrupt signal from the function block. - clocks: device clocks @@ -25,6 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY. Required properties: - compatible: "mediatek,<chip>-mipi-tx" + the supported chips are mt2701 and mt8173. - reg: Physical base address and length of the controller's registers - clocks: PLL reference clock - clock-output-names: name of the output clock line to the DSI encoder diff --git a/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt new file mode 100644 index 000000000000..6812280cb109 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/ampire,am-480272h3tmqw-t01h.txt @@ -0,0 +1,26 @@ +Ampire AM-480272H3TMQW-T01H 4.3" WQVGA TFT LCD panel + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. + +Required properties: +- compatible: should be "ampire,am-480272h3tmqw-t01h" + +Optional properties: +- power-supply: regulator to provide the supply voltage +- enable-gpios: GPIO pin to enable or disable the panel +- backlight: phandle of the backlight device attached to the panel + +Optional nodes: +- Video port for RGB input. + +Example: + panel_rgb: panel-rgb { + compatible = "ampire,am-480272h3tmqw-t01h"; + enable-gpios = <&gpioa 8 1>; + port { + panel_in_rgb: endpoint { + remote-endpoint = <&controller_out_rgb>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt new file mode 100644 index 000000000000..ced0121aed7d --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.txt @@ -0,0 +1,47 @@ +Mitsubishi AA204XD12 LVDS Display Panel +======================================= + +The AA104XD12 is a 10.4" XGA TFT-LCD display panel. + +These DT bindings follow the LVDS panel bindings defined in panel-lvds.txt +with the following device-specific properties. + + +Required properties: + +- compatible: Shall contain "mitsubishi,aa121td01" and "panel-lvds", in that + order. +- vcc-supply: Reference to the regulator powering the panel VCC pins. + + +Example +------- + +panel { + compatible = "mitsubishi,aa104xd12", "panel-lvds"; + vcc-supply = <&vcc_3v3>; + + width-mm = <210>; + height-mm = <158>; + + data-mapping = "jeida-24"; + + panel-timing { + /* 1024x768 @65Hz */ + clock-frequency = <65000000>; + hactive = <1024>; + vactive = <768>; + hsync-len = <136>; + hfront-porch = <20>; + hback-porch = <160>; + vfront-porch = <3>; + vback-porch = <29>; + vsync-len = <6>; + }; + + port { + panel_in: endpoint { + remote-endpoint = <&lvds_encoder>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt new file mode 100644 index 000000000000..d6e1097504fe --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.txt @@ -0,0 +1,47 @@ +Mitsubishi AA121TD01 LVDS Display Panel +======================================= + +The AA121TD01 is a 12.1" WXGA TFT-LCD display panel. + +These DT bindings follow the LVDS panel bindings defined in panel-lvds.txt +with the following device-specific properties. + + +Required properties: + +- compatible: Shall contain "mitsubishi,aa121td01" and "panel-lvds", in that + order. +- vcc-supply: Reference to the regulator powering the panel VCC pins. + + +Example +------- + +panel { + compatible = "mitsubishi,aa121td01", "panel-lvds"; + vcc-supply = <&vcc_3v3>; + + width-mm = <261>; + height-mm = <163>; + + data-mapping = "jeida-24"; + + panel-timing { + /* 1280x800 @60Hz */ + clock-frequency = <71000000>; + hactive = <1280>; + vactive = <800>; + hsync-len = <70>; + hfront-porch = <20>; + hback-porch = <70>; + vsync-len = <5>; + vfront-porch = <3>; + vback-porch = <15>; + }; + + port { + panel_in: endpoint { + remote-endpoint = <&lvds_encoder>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt new file mode 100644 index 000000000000..ec52c472c845 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt @@ -0,0 +1,91 @@ +Common Properties for Display Panel +=================================== + +This document defines device tree properties common to several classes of +display panels. It doesn't constitue a device tree binding specification by +itself but is meant to be referenced by device tree bindings. + +When referenced from panel device tree bindings the properties defined in this +document are defined as follows. The panel device tree bindings are +responsible for defining whether each property is required or optional. + + +Descriptive Properties +---------------------- + +- width-mm, +- height-mm: The width-mm and height-mm specify the width and height of the + physical area where images are displayed. These properties are expressed in + millimeters and rounded to the closest unit. + +- label: The label property specifies a symbolic name for the panel as a + string suitable for use by humans. It typically contains a name inscribed on + the system (e.g. as an affixed label) or specified in the system's + documentation (e.g. in the user's manual). + + If no such name exists, and unless the property is mandatory according to + device tree bindings, it shall rather be omitted than constructed of + non-descriptive information. For instance an LCD panel in a system that + contains a single panel shall not be labelled "LCD" if that name is not + inscribed on the system or used in a descriptive fashion in system + documentation. + + +Display Timings +--------------- + +- panel-timing: Most display panels are restricted to a single resolution and + require specific display timings. The panel-timing subnode expresses those + timings as specified in the timing subnode section of the display timing + bindings defined in + Documentation/devicetree/bindings/display/display-timing.txt. + + +Connectivity +------------ + +- ports: Panels receive video data through one or multiple connections. While + the nature of those connections is specific to the panel type, the + connectivity is expressed in a standard fashion using ports as specified in + the device graph bindings defined in + Documentation/devicetree/bindings/graph.txt. + +- ddc-i2c-bus: Some panels expose EDID information through an I2C-compatible + bus such as DDC2 or E-DDC. For such panels the ddc-i2c-bus contains a + phandle to the system I2C controller connected to that bus. + + +Control I/Os +------------ + +Many display panels can be controlled through pins driven by GPIOs. The nature +and timing of those control signals are device-specific and left for panel +device tree bindings to specify. The following GPIO specifiers can however be +used for panels that implement compatible control signals. + +- enable-gpios: Specifier for a GPIO connected to the panel enable control + signal. The enable signal is active high and enables operation of the panel. + This property can also be used for panels implementing an active low power + down signal, which is a negated version of the enable signal. Active low + enable signals (or active high power down signals) can be supported by + inverting the GPIO specifier polarity flag. + + Note that the enable signal control panel operation only and must not be + confused with a backlight enable signal. + +- reset-gpios: Specifier for a GPIO coonnected to the panel reset control + signal. The reset signal is active low and resets the panel internal logic + while active. Active high reset signals can be supported by inverting the + GPIO specifier polarity flag. + + +Backlight +--------- + +Most display panels include a backlight. Some of them also include a backlight +controller exposed through a control bus such as I2C or DSI. Others expose +backlight control through GPIO, PWM or other signals connected to an external +backlight controller. + +- backlight: For panels whose backlight is controlled by an external backlight + controller, this property contains a phandle that references the controller. diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.txt b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt new file mode 100644 index 000000000000..b938269f841e --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.txt @@ -0,0 +1,120 @@ +LVDS Display Panel +================== + +LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple +incompatible data link layers have been used over time to transmit image data +to LVDS panels. This bindings supports display panels compatible with the +following specifications. + +[JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February +1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA) +[LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National +Semiconductor +[VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video +Electronics Standards Association (VESA) + +Device compatible with those specifications have been marketed under the +FPD-Link and FlatLink brands. + + +Required properties: + +- compatible: Shall contain "panel-lvds" in addition to a mandatory + panel-specific compatible string defined in individual panel bindings. The + "panel-lvds" value shall never be used on its own. +- width-mm: See panel-common.txt. +- height-mm: See panel-common.txt. +- data-mapping: The color signals mapping order, "jeida-18", "jeida-24" + or "vesa-24". + +Optional properties: + +- label: See panel-common.txt. +- gpios: See panel-common.txt. +- backlight: See panel-common.txt. +- data-mirror: If set, reverse the bit order described in the data mappings + below on all data lanes, transmitting bits for slots 6 to 0 instead of + 0 to 6. + +Required nodes: + +- panel-timing: See panel-common.txt. +- ports: See panel-common.txt. These bindings require a single port subnode + corresponding to the panel LVDS input. + + +LVDS data mappings are defined as follows. + +- "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and + [VESA] specifications. Data are transferred as follows on 3 LVDS lanes. + +Slot 0 1 2 3 4 5 6 + ________________ _________________ +Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ +DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< +DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< +DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< + +- "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI] + specifications. Data are transferred as follows on 4 LVDS lanes. + +Slot 0 1 2 3 4 5 6 + ________________ _________________ +Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ +DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__>< +DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__>< +DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__>< +DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__>< + +- "vesa-24" - 24-bit data mapping compatible with the [VESA] specification. + Data are transferred as follows on 4 LVDS lanes. + +Slot 0 1 2 3 4 5 6 + ________________ _________________ +Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ +DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< +DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< +DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< +DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__>< + +Control signals are mapped as follows. + +CTL0: HSync +CTL1: VSync +CTL2: Data Enable +CTL3: 0 + + +Example +------- + +panel { + compatible = "mitsubishi,aa121td01", "panel-lvds"; + + width-mm = <261>; + height-mm = <163>; + + data-mapping = "jeida-24"; + + panel-timing { + /* 1280x800 @60Hz */ + clock-frequency = <71000000>; + hactive = <1280>; + vactive = <800>; + hsync-len = <70>; + hfront-porch = <20>; + hback-porch = <70>; + vsync-len = <5>; + vfront-porch = <3>; + vback-porch = <15>; + }; + + port { + panel_in: endpoint { + remote-endpoint = <&lvds_encoder>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt new file mode 100644 index 000000000000..18854f4c8376 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha2.txt @@ -0,0 +1,28 @@ +Samsung S6E3HA2 5.7" 1440x2560 AMOLED panel + +Required properties: + - compatible: "samsung,s6e3ha2" + - reg: the virtual channel number of a DSI peripheral + - vdd3-supply: I/O voltage supply + - vci-supply: voltage supply for analog circuits + - reset-gpios: a GPIO spec for the reset pin (active low) + - enable-gpios: a GPIO spec for the panel enable pin (active high) + +Optional properties: + - te-gpios: a GPIO spec for the tearing effect synchronization signal + gpio pin (active high) + +Example: +&dsi { + ... + + panel@0 { + compatible = "samsung,s6e3ha2"; + reg = <0>; + vdd3-supply = <&ldo27_reg>; + vci-supply = <&ldo28_reg>; + reset-gpios = <&gpg0 0 GPIO_ACTIVE_LOW>; + enable-gpios = <&gpf1 5 GPIO_ACTIVE_HIGH>; + te-gpios = <&gpf1 3 GPIO_ACTIVE_HIGH>; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt new file mode 100644 index 000000000000..c6995dde641b --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.txt @@ -0,0 +1,37 @@ +Sitronix ST7789V RGB panel with SPI control bus + +Required properties: + - compatible: "sitronix,st7789v" + - reg: Chip select of the panel on the SPI bus + - reset-gpios: a GPIO phandle for the reset pin + - power-supply: phandle of the regulator that provides the supply voltage + +Optional properties: + - backlight: phandle to the backlight used + +The generic bindings for the SPI slaves documented in [1] also applies + +The device node can contain one 'port' child node with one child +'endpoint' node, according to the bindings defined in [2]. This +node should describe panel's video bus. + +[1]: Documentation/devicetree/bindings/spi/spi-bus.txt +[2]: Documentation/devicetree/bindings/graph.txt + +Example: + +panel@0 { + compatible = "sitronix,st7789v"; + reg = <0>; + reset-gpios = <&pio 6 11 GPIO_ACTIVE_LOW>; + backlight = <&pwm_bl>; + spi-max-frequency = <100000>; + spi-cpol; + spi-cpha; + + port { + panel_input: endpoint { + remote-endpoint = <&tcon0_out_panel>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt new file mode 100644 index 000000000000..2a7e6e3ba64c --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt @@ -0,0 +1,48 @@ +Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel + +Required properties: +- compatible: should be "winstar,wf35ltiacd" +- power-supply: regulator to provide the VCC supply voltage (3.3 volts) + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. + +Example: + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>; + brightness-levels = <0 31 63 95 127 159 191 223 255>; + default-brightness-level = <191>; + power-supply = <&bl_reg>; + }; + + bl_reg: backlight_regulator { + compatible = "regulator-fixed"; + regulator-name = "backlight-power-supply"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; + + panel: panel { + compatible = "winstar,wf35ltiacd", "simple-panel"; + backlight = <&backlight>; + power-supply = <&panel_reg>; + #address-cells = <1>; + #size-cells = <0>; + + port { + #address-cells = <1>; + #size-cells = <0>; + + panel_input: endpoint { + remote-endpoint = <&hlcdc_panel_output>; + }; + }; + }; + + panel_reg: panel_regulator { + compatible = "regulator-fixed"; + regulator-name = "panel-power-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index 1a02f099a0ff..c6cb96a4fa93 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -36,6 +36,9 @@ Required Properties: When supplied they must be named "dclkin.x" with "x" being the input clock numerical index. + - vsps: A list of phandles to the VSP nodes that handle the memory + interfaces for the DU channels. + Required nodes: The connections to the DU output video ports are modeled using the OF graph diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 4686f4bdaca0..aed180d8e585 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -336,6 +336,7 @@ wd Western Digital Corp. wetek WeTek Electronics, limited. wexler Wexler winbond Winbond Electronics corp. +winstar Winstar Display Corp. wlf Wolfson Microelectronics wm Wondermedia Technologies, Inc. x-powers X-Powers diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index fdcfdd79682a..fe25787ff6d4 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -58,8 +58,7 @@ prototypes: int (*permission) (struct inode *, int, unsigned int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (const struct path *, struct dentry *, struct kstat *, - u32, unsigned int); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); void (*update_time)(struct inode *, struct timespec *, int); diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 95280079c0b3..5fb17f49f7a2 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -600,3 +600,9 @@ in your dentry operations instead. [recommended] ->readlink is optional for symlinks. Don't set, unless filesystem needs to fake something for readlink(2). +-- +[mandatory] + ->getattr() is now passed a struct path rather than a vfsmount and + dentry separately, and it now has request_mask and query_flags arguments + to specify the fields and sync type requested by statx. Filesystems not + supporting any statx-specific features may ignore the new arguments. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 569211703721..94dd27ef4a76 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -382,8 +382,7 @@ struct inode_operations { int (*permission) (struct inode *, int); int (*get_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); - int (*getattr) (const struct path *, struct dentry *, struct kstat *, - u32, unsigned int); + int (*getattr) (const struct path *, struct kstat *, u32, unsigned int); ssize_t (*listxattr) (struct dentry *, char *, size_t); void (*update_time)(struct inode *, struct timespec *, int); int (*atomic_open)(struct inode *, struct dentry *, struct file *, diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt index 54bd5faa8782..f2af35f6d6b2 100644 --- a/Documentation/pinctrl.txt +++ b/Documentation/pinctrl.txt @@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = { int __init foo_probe(void) { + int error; + struct pinctrl_dev *pctl; - return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); + error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl); + if (error) + return error; + + return pinctrl_enable(pctl); } To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 11ec2d93a5e0..61e9c78bd6d1 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -124,7 +124,7 @@ specified in the following format in the sign-off area: .. code-block:: none - Cc: <stable@vger.kernel.org> # 3.3.x- + Cc: <stable@vger.kernel.org> # 3.3.x The tag has the meaning of: diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt index 76e61c883347..b2f60ca8b60c 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt @@ -83,6 +83,12 @@ Groups: Bits for undefined preemption levels are RAZ/WI. + For historical reasons and to provide ABI compatibility with userspace we + export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask + field in the lower 5 bits of a word, meaning that userspace must always + use the lower 5 bits to communicate with the KVM device and must shift the + value left by 3 places to obtain the actual priority mask level. + Limitations: - Priorities are not implemented, and registers are RAZ/WI - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2. diff --git a/MAINTAINERS b/MAINTAINERS index 84cf73ff3fcb..e8f86bc75006 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4117,14 +4117,13 @@ F: drivers/block/drbd/ F: lib/lru_cache.c F: Documentation/blockdev/drbd/ -DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS +DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git S: Supported F: Documentation/kobject.txt F: drivers/base/ F: fs/debugfs/ -F: fs/kernfs/ F: fs/sysfs/ F: include/linux/debugfs.h F: include/linux/kobj* @@ -4250,6 +4249,7 @@ L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git DRM DRIVERS FOR AMLOGIC SOCS M: Neil Armstrong <narmstrong@baylibre.com> @@ -4386,6 +4386,7 @@ S: Supported F: drivers/gpu/drm/rcar-du/ F: drivers/gpu/drm/shmobile/ F: include/linux/platform_data/shmob_drm.h +F: Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt F: Documentation/devicetree/bindings/display/renesas,du.txt DRM DRIVER FOR QXL VIRTUAL GPU @@ -4419,7 +4420,7 @@ DRM DRIVERS FOR STI M: Benjamin Gaignard <benjamin.gaignard@linaro.org> M: Vincent Abriou <vincent.abriou@st.com> L: dri-devel@lists.freedesktop.org -T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git +T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/sti F: Documentation/devicetree/bindings/display/st,stih4xx.txt @@ -4948,6 +4949,7 @@ F: include/linux/netfilter_bridge/ F: net/bridge/ ETHERNET PHY LIBRARY +M: Andrew Lunn <andrew@lunn.ch> M: Florian Fainelli <f.fainelli@gmail.com> L: netdev@vger.kernel.org S: Maintained @@ -7109,9 +7111,9 @@ S: Maintained F: fs/autofs4/ KERNEL BUILD + files below scripts/ (unless maintained elsewhere) +M: Masahiro Yamada <yamada.masahiro@socionext.com> M: Michal Marek <mmarek@suse.com> -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes +T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git L: linux-kbuild@vger.kernel.org S: Maintained F: Documentation/kbuild/ @@ -7228,6 +7230,14 @@ F: arch/mips/include/uapi/asm/kvm* F: arch/mips/include/asm/kvm* F: arch/mips/kvm/ +KERNFS +M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> +M: Tejun Heo <tj@kernel.org> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git +S: Supported +F: include/linux/kernfs.h +F: fs/kernfs/ + KEXEC M: Eric Biederman <ebiederm@xmission.com> W: http://kernel.org/pub/linux/utils/kernel/kexec/ @@ -10842,6 +10852,7 @@ F: drivers/s390/block/dasd* F: block/partitions/ibm.c S390 NETWORK DRIVERS +M: Julian Wiedmann <jwi@linux.vnet.ibm.com> M: Ursula Braun <ubraun@linux.vnet.ibm.com> L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -10872,6 +10883,7 @@ S: Supported F: drivers/s390/scsi/zfcp_* S390 IUCV NETWORK LAYER +M: Julian Wiedmann <jwi@linux.vnet.ibm.com> M: Ursula Braun <ubraun@linux.vnet.ibm.com> L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ @@ -13331,7 +13343,7 @@ F: drivers/virtio/ F: tools/virtio/ F: drivers/net/virtio_net.c F: drivers/block/virtio_blk.c -F: include/linux/virtio_*.h +F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* @@ -372,7 +372,7 @@ LDFLAGS_MODULE = CFLAGS_KERNEL = AFLAGS_KERNEL = LDFLAGS_vmlinux = -CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized +CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,) CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,) @@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +endif + include scripts/Makefile.gcc-plugins ifdef CONFIG_READABLE_ASM @@ -798,12 +804,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 0b961093ca5c..6d76e528ab8f 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) /* copy relevant bits of struct timex. */ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) || copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - - offsetof(struct timex32, time))) + offsetof(struct timex32, tick))) return -EFAULT; ret = do_adjtimex(&txc); diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi index efb5eae290a8..d42b98f15e8b 100644 --- a/arch/arm/boot/dts/am335x-baltos.dtsi +++ b/arch/arm/boot/dts/am335x-baltos.dtsi @@ -371,6 +371,8 @@ phy1: ethernet-phy@1 { reg = <7>; + eee-broken-100tx; + eee-broken-1000t; }; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 9e43c443738a..9ba4b18c0cb2 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -672,6 +672,7 @@ ti,non-removable; bus-width = <4>; cap-power-off-card; + keep-power-in-suspend; pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 2c9e56f4aac5..bbfb9d5a70a9 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -283,6 +283,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <0>; @@ -319,6 +320,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <1>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 8f9a69ca818c..efe53998c961 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -121,7 +121,7 @@ &i2c3 { clock-frequency = <400000>; at24@50 { - compatible = "at24,24c02"; + compatible = "atmel,24c64"; readonly; reg = <0x50>; }; diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi index 0467fb365bfc..306af6cadf26 100644 --- a/arch/arm/boot/dts/sun8i-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a33.dtsi @@ -66,12 +66,6 @@ opp-microvolt = <1200000>; clock-latency-ns = <244144>; /* 8 32k periods */ }; - - opp@1200000000 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <1320000>; - clock-latency-ns = <244144>; /* 8 32k periods */ - }; }; cpus { @@ -81,16 +75,22 @@ operating-points-v2 = <&cpu0_opp_table>; }; + cpu@1 { + operating-points-v2 = <&cpu0_opp_table>; + }; + cpu@2 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <2>; + operating-points-v2 = <&cpu0_opp_table>; }; cpu@3 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <3>; + operating-points-v2 = <&cpu0_opp_table>; }; }; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 96dba7cd8be7..314eb6abe1ff 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void) if (__hyp_get_vectors() == hyp_default_vectors) cpu_init_hyp_mode(NULL); } + + if (vgic_present) + kvm_vgic_init_cpu_hardware(); } static void cpu_hyp_reset(void) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 962616fd4ddd..582a972371cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) phys_addr_t addr = start, end = start + size; phys_addr_t next; + assert_spin_locked(&kvm->mmu_lock); pgd = kvm->arch.pgd + stage2_pgd_index(addr); do { next = stage2_pgd_addr_end(addr, end); if (!stage2_pgd_none(*pgd)) unmap_stage2_puds(kvm, pgd, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } @@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm) int idx; idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_sem); spin_lock(&kvm->mmu_lock); slots = kvm_memslots(kvm); @@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm) stage2_unmap_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_sem); srcu_read_unlock(&kvm->srcu, idx); } @@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) if (kvm->arch.pgd == NULL) return; + spin_lock(&kvm->mmu_lock); unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); + spin_unlock(&kvm->mmu_lock); + /* Free the HW pgd, one page at a time */ free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); kvm->arch.pgd = NULL; @@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (KVM_PHYS_SIZE >> PAGE_SHIFT)) return -EFAULT; + down_read(¤t->mm->mmap_sem); /* * A memory region could potentially cover multiple VMAs, and any holes * between them, so iterate over all of them to find out if we can map @@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pa += vm_start - vma->vm_start; /* IO region dirty page logging not allowed */ - if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) - return -EINVAL; + if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) { + ret = -EINVAL; + goto out; + } ret = kvm_phys_addr_ioremap(kvm, gpa, pa, vm_end - vm_start, @@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, } while (hva < reg_end); if (change == KVM_MR_FLAGS_ONLY) - return ret; + goto out; spin_lock(&kvm->mmu_lock); if (ret) @@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, else stage2_flush_memslot(kvm, memslot); spin_unlock(&kvm->mmu_lock); +out: + up_read(¤t->mm->mmap_sem); return ret; } diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index c4f2ace91ea2..3089d3bfa19b 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops; extern int omap4_mpuss_init(void); extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); +extern u32 omap4_get_cpu1_ns_pa_addr(void); #else static inline int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c index d3fb5661bb5d..433db6d0b073 100644 --- a/arch/arm/mach-omap2/omap-hotplug.c +++ b/arch/arm/mach-omap2/omap-hotplug.c @@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu) omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); if (omap_secure_apis_support()) - boot_cpu = omap_read_auxcoreboot0(); + boot_cpu = omap_read_auxcoreboot0() >> 9; else boot_cpu = readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 113ab2dd2ee9..03ec6d307c82 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -64,6 +64,7 @@ #include "prm-regbits-44xx.h" static void __iomem *sar_base; +static u32 old_cpu1_ns_pa_addr; #if defined(CONFIG_PM) && defined(CONFIG_SMP) @@ -212,6 +213,11 @@ static void __init save_l2x0_context(void) {} #endif +u32 omap4_get_cpu1_ns_pa_addr(void) +{ + return old_cpu1_ns_pa_addr; +} + /** * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function * The purpose of this function is to manage low power programming @@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void) void __init omap4_mpuss_early_init(void) { unsigned long startup_pa; + void __iomem *ns_pa_addr; - if (!(cpu_is_omap44xx() || soc_is_omap54xx())) + if (!(soc_is_omap44xx() || soc_is_omap54xx())) return; sar_base = omap4_get_sar_ram_base(); - if (cpu_is_omap443x()) + /* Save old NS_PA_ADDR for validity checks later on */ + if (soc_is_omap44xx()) + ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + else + ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); + + if (soc_is_omap443x()) startup_pa = __pa_symbol(omap4_secondary_startup); - else if (cpu_is_omap446x()) + else if (soc_is_omap446x()) startup_pa = __pa_symbol(omap4460_secondary_startup); else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) startup_pa = __pa_symbol(omap5_secondary_hyp_startup); else startup_pa = __pa_symbol(omap5_secondary_startup); - if (cpu_is_omap44xx()) + if (soc_is_omap44xx()) writel_relaxed(startup_pa, sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET); else diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S index fd90125bffc7..72506e6cf9e7 100644 --- a/arch/arm/mach-omap2/omap-smc.S +++ b/arch/arm/mach-omap2/omap-smc.S @@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0) ldr r12, =0x103 dsb smc #0 - mov r0, r0, lsr #9 ldmfd sp!, {r2-r12, pc} ENDPROC(omap_read_auxcoreboot0) diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 003353b0b794..3faf454ba487 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> +#include <asm/sections.h> #include <asm/smp_scu.h> #include <asm/virt.h> @@ -40,10 +41,14 @@ #define OMAP5_CORE_COUNT 0x2 +#define AUX_CORE_BOOT0_GP_RELEASE 0x020 +#define AUX_CORE_BOOT0_HS_RELEASE 0x200 + struct omap_smp_config { unsigned long cpu1_rstctrl_pa; void __iomem *cpu1_rstctrl_va; void __iomem *scu_base; + void __iomem *wakeupgen_base; void *startup_addr; }; @@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; - void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor @@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) - omap_modify_auxcoreboot0(0x200, 0xfffffdff); + omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE, + 0xfffffdff); else - writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); + writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE, + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); @@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void) set_cpu_possible(i, true); } +/* + * For now, just make sure the start-up address is not within the booting + * kernel space as that means we just overwrote whatever secondary_startup() + * code there was. + */ +static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr) +{ + if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start))) + return false; + + return true; +} + +/* + * We may need to reset CPU1 before configuring, otherwise kexec boot can end + * up trying to use old kernel startup address or suspend-resume will + * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper + * idle states. + */ +static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) +{ + unsigned long cpu1_startup_pa, cpu1_ns_pa_addr; + bool needs_reset = false; + u32 released; + + if (omap_secure_apis_support()) + released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE; + else + released = readl_relaxed(cfg.wakeupgen_base + + OMAP_AUX_CORE_BOOT_0) & + AUX_CORE_BOOT0_GP_RELEASE; + if (released) { + pr_warn("smp: CPU1 not parked?\n"); + + return; + } + + cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + + OMAP_AUX_CORE_BOOT_1); + cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); + + /* Did the configured secondary_startup() get overwritten? */ + if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) + needs_reset = true; + + /* + * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a + * deeper idle state in WFI and will wake to an invalid address. + */ + if ((soc_is_omap44xx() || soc_is_omap54xx()) && + !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) + needs_reset = true; + + if (!needs_reset || !c->cpu1_rstctrl_va) + return; + + pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n", + cpu1_startup_pa, cpu1_ns_pa_addr); + + writel_relaxed(1, c->cpu1_rstctrl_va); + readl_relaxed(c->cpu1_rstctrl_va); + writel_relaxed(0, c->cpu1_rstctrl_va); +} + static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) { - void __iomem *base = omap_get_wakeupgen_base(); const struct omap_smp_config *c = NULL; if (soc_is_omap443x()) @@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) /* Must preserve cfg.scu_base set earlier */ cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; cfg.startup_addr = c->startup_addr; + cfg.wakeupgen_base = omap_get_wakeupgen_base(); if (soc_is_dra74x() || soc_is_omap54xx()) { if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) @@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) if (cfg.scu_base) scu_enable(cfg.scu_base); - /* - * Reset CPU1 before configuring, otherwise kexec will - * end up trying to use old kernel startup address. - */ - if (cfg.cpu1_rstctrl_va) { - writel_relaxed(1, cfg.cpu1_rstctrl_va); - readl_relaxed(cfg.cpu1_rstctrl_va); - writel_relaxed(0, cfg.cpu1_rstctrl_va); - } + omap4_smp_maybe_reset_cpu1(&cfg); /* * Write the address of secondary startup routine into the @@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); else writel_relaxed(__pa_symbol(cfg.startup_addr), - base + OMAP_AUX_CORE_BOOT_1); + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); } const struct smp_operations omap4_smp_ops __initconst = { diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e920dd83e443..f989145480c8 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb, dev_err(dev, "failed to idle\n"); } break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && + pm_runtime_status_suspended(dev)) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; + pm_runtime_set_active(dev); + } + break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig index 633442ad4e4c..2a7bb6ccdcb7 100644 --- a/arch/arm/mach-orion5x/Kconfig +++ b/arch/arm/mach-orion5x/Kconfig @@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X select GPIOLIB select MVEBU_MBUS select PCI + select PHYLIB if NETDEVICES select PLAT_ORION_LEGACY help Support for the following Marvell Orion 5x series SoCs: diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 63eabb06f9f1..475811f5383a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } +/* + * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems + * that the intention is to allow exporting memory allocated via the + * coherent DMA APIs through the dma_buf API, which only accepts a + * scattertable. This presents a couple of problems: + * 1. Not all memory allocated via the coherent DMA APIs is backed by + * a struct page + * 2. Passing coherent DMA memory into the streaming APIs is not allowed + * as we will try to flush the memory through a different alias to that + * actually being used (and the flushes are redundant.) + */ int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { - struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + unsigned long pfn = dma_to_pfn(dev, handle); + struct page *page; int ret; + /* If the PFN is not valid, we do not have a struct page */ + if (!pfn_valid(pfn)) + return -ENXIO; + + page = pfn_to_page(pfn); + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 3b5c7aaf9c76..33a45bd96860 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val) */ static inline bool security_extensions_enabled(void) { - return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + /* Check CPUID Identification Scheme before ID_PFR1 read */ + if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); + return 0; } static unsigned long __init setup_vectors_base(void) diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 9255b6d67ba5..aff6994950ba 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, eth_data, &orion_ge11); } +#ifdef CONFIG_ARCH_ORION5X /***************************************************************************** * Ethernet switch ****************************************************************************/ @@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d) struct mdio_board_info *bd; unsigned int i; + if (!IS_BUILTIN(CONFIG_PHYLIB)) + return; + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) if (!strcmp(d->port_names[i], "cpu")) break; @@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d) mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); } +#endif /***************************************************************************** * I2C diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index b6dc9d838a9a..ad1f4e6a9e33 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs) #endif if (p) { - if (cur) { + if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + /* + * Probe hit but conditional execution check failed, + * so just skip the instruction and continue as if + * nothing had happened. + * In this case, we can skip recursing check too. + */ + singlestep_skip(p, regs); + } else if (cur) { /* Kprobe is pending, so we're recursing. */ switch (kcb->kprobe_status) { case KPROBE_HIT_ACTIVE: case KPROBE_HIT_SSDONE: + case KPROBE_HIT_SS: /* A pre- or post-handler probe got us here. */ kprobes_inc_nmissed_count(p); save_previous_kprobe(kcb); @@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs) singlestep(p, regs, kcb); restore_previous_kprobe(kcb); break; + case KPROBE_REENTER: + /* A nested probe was hit in FIQ, it is a BUG */ + pr_warn("Unrecoverable kprobe detected at %p.\n", + p->addr); + /* fall through */ default: /* impossible cases */ BUG(); } - } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { + } else { /* Probe hit and conditional execution check ok. */ set_current_kprobe(p); kcb->kprobe_status = KPROBE_HIT_ACTIVE; @@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs) } reset_current_kprobe(); } - } else { - /* - * Probe hit but conditional execution check failed, - * so just skip the instruction and continue as if - * nothing had happened. - */ - singlestep_skip(p, regs); } } else if (cur) { /* We probably hit a jprobe. Call its break handler. */ @@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + kprobe_opcode_t *correct_ret_addr = NULL; INIT_HLIST_HEAD(&empty_rp); kretprobe_hash_lock(current, &head, &flags); @@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; + orig_ret_address = (unsigned long)ri->ret_addr; + + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + kretprobe_assert(ri, orig_ret_address, trampoline_address); + + correct_ret_addr = ri->ret_addr; + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + orig_ret_address = (unsigned long)ri->ret_addr; if (ri->rp && ri->rp->handler) { __this_cpu_write(current_kprobe, &ri->rp->kp); get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; + ri->ret_addr = correct_ret_addr; ri->rp->handler(ri, regs); __this_cpu_write(current_kprobe, NULL); } - orig_ret_address = (unsigned long)ri->ret_addr; recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) @@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) break; } - kretprobe_assert(ri, orig_ret_address, trampoline_address); kretprobe_hash_unlock(current, &flags); hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c index c893726aa52d..1c98a87786ca 100644 --- a/arch/arm/probes/kprobes/test-core.c +++ b/arch/arm/probes/kprobes/test-core.c @@ -977,7 +977,10 @@ static void coverage_end(void) void __naked __kprobes_test_case_start(void) { __asm__ __volatile__ ( - "stmdb sp!, {r4-r11} \n\t" + "mov r2, sp \n\t" + "bic r3, r2, #7 \n\t" + "mov sp, r3 \n\t" + "stmdb sp!, {r2-r11} \n\t" "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" "bic r0, lr, #1 @ r0 = inline data \n\t" "mov r1, sp \n\t" @@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void) "movne pc, r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "mov pc, r0 \n\t" ); } @@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void) "bxne r0 \n\t" "mov r0, r4 \n\t" "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" - "ldmia sp!, {r4-r11} \n\t" + "ldmia sp!, {r2-r11} \n\t" + "mov sp, r2 \n\t" "bx r0 \n\t" ); } diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 1c64ea2d23f9..0565779e66fa 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -179,8 +179,10 @@ usbphy: phy@01c19400 { compatible = "allwinner,sun50i-a64-usb-phy"; reg = <0x01c19400 0x14>, + <0x01c1a800 0x4>, <0x01c1b800 0x4>; reg-names = "phy_ctrl", + "pmu0", "pmu1"; clocks = <&ccu CLK_USB_PHY0>, <&ccu CLK_USB_PHY1>; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 4bf899fb451b..1b35b8bddbfb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -42,7 +42,20 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> -static const char *fault_name(unsigned int esr); +struct fault_info { + int (*fn)(unsigned long addr, unsigned int esr, + struct pt_regs *regs); + int sig; + int code; + const char *name; +}; + +static const struct fault_info fault_info[]; + +static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +{ + return fault_info + (esr & 63); +} #ifdef CONFIG_KPROBES static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) @@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, struct pt_regs *regs) { struct siginfo si; + const struct fault_info *inf; if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) { + inf = esr_to_fault_info(esr); pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", - tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, + tsk->comm, task_pid_nr(tsk), inf->name, sig, addr, esr); show_pte(tsk->mm, addr); show_regs(regs); @@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re { struct task_struct *tsk = current; struct mm_struct *mm = tsk->active_mm; + const struct fault_info *inf; /* * If we are in kernel mode at this point, we have no context to * handle this fault with. */ - if (user_mode(regs)) - __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs); - else + if (user_mode(regs)) { + inf = esr_to_fault_info(esr); + __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs); + } else __do_kernel_fault(mm, addr, esr, regs); } @@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) return 1; } -static const struct fault_info { - int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); - int sig; - int code; - const char *name; -} fault_info[] = { +static const struct fault_info fault_info[] = { { do_bad, SIGBUS, 0, "ttbr address size fault" }, { do_bad, SIGBUS, 0, "level 1 address size fault" }, { do_bad, SIGBUS, 0, "level 2 address size fault" }, @@ -560,19 +572,13 @@ static const struct fault_info { { do_bad, SIGBUS, 0, "unknown 63" }, }; -static const char *fault_name(unsigned int esr) -{ - const struct fault_info *inf = fault_info + (esr & 63); - return inf->name; -} - /* * Dispatch a data abort to the relevant handler. */ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { - const struct fault_info *inf = fault_info + (esr & 63); + const struct fault_info *inf = esr_to_fault_info(esr); struct siginfo info; if (!inf->fn(addr, esr, regs)) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index e25584d72396..7514a000e361 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); - } else if (ps == (PAGE_SIZE * CONT_PTES)) { - hugetlb_add_hstate(CONT_PTE_SHIFT); - } else if (ps == (PMD_SIZE * CONT_PMDS)) { - hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { hugetlb_bad_size(); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); @@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); - -#ifdef CONFIG_ARM64_64K_PAGES -static __init int add_default_hugepagesz(void) -{ - if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) - hugetlb_add_hstate(CONT_PTE_SHIFT); - return 0; -} -arch_initcall(add_default_hugepagesz); -#endif diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..a2c139808cfe --- /dev/null +++ b/arch/ia64/include/asm/asm-prototypes.h @@ -0,0 +1,29 @@ +#ifndef _ASM_IA64_ASM_PROTOTYPES_H +#define _ASM_IA64_ASM_PROTOTYPES_H + +#include <asm/cacheflush.h> +#include <asm/checksum.h> +#include <asm/esi.h> +#include <asm/ftrace.h> +#include <asm/page.h> +#include <asm/pal.h> +#include <asm/string.h> +#include <asm/uaccess.h> +#include <asm/unwind.h> +#include <asm/xor.h> + +extern const char ia64_ivt[]; + +signed int __divsi3(signed int, unsigned int); +signed int __modsi3(signed int, unsigned int); + +signed long long __divdi3(signed long long, unsigned long long); +signed long long __moddi3(signed long long, unsigned long long); + +unsigned int __udivsi3(unsigned int, unsigned int); +unsigned int __umodsi3(unsigned int, unsigned int); + +unsigned long long __udivdi3(unsigned long long, unsigned long long); +unsigned long long __umoddi3(unsigned long long, unsigned long long); + +#endif /* _ASM_IA64_ASM_PROTOTYPES_H */ diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 1f3d3877618f..0a40b14407b1 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -24,25 +24,25 @@ AFLAGS___modsi3.o = -DMODULO AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO $(obj)/__divdi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__udivdi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__moddi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__umoddi3.o: $(src)/idiv64.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__divsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__udivsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__modsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) $(obj)/__umodsi3.o: $(src)/idiv32.S FORCE - $(call if_changed_dep,as_o_S) + $(call if_changed_rule,as_o_S) diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 273e61225c27..07238b39638c 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long __must_check __copy_user_zeroing(void *to, - const void __user *from, - unsigned long n); +extern unsigned long raw_copy_from_user(void *to, const void __user *from, + unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_user_zeroing(to, from, n); - memset(to, 0, n); - return n; + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } -#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) +#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b3ebfe9c8e88..2792fc621088 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ @@ -260,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #32\n" \ "23:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "SUB %3, %3, #32\n" \ "24:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "25:\n" \ + "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ "SUB %3, %3, #32\n" \ - "27:\n" \ + "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ + "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %0, %0, #8\n" \ - "29:\n" \ + "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ @@ -312,11 +315,15 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ - " .long 29b,4b\n" \ + " .long 29b,3b\n" \ + " .long 30b,3b\n" \ + " .long 31b,3b\n" \ + " .long 32b,3b\n" \ + " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -342,7 +349,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -403,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #16\n" \ "23:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "24:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ - "25:\n" \ + "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ + "SUB %3, %3, #16\n" \ + "30:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "29:\n" \ + "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "30:\n" \ + "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "35:\n" \ "SUB %3, %3, #16\n" \ - "31:\n" \ + "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "32:\n" \ + "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "38:\n" \ "SUB %3, %3, #16\n" \ - "33:\n" \ + "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "34:\n" \ + "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "41:\n" \ "SUB %3, %3, #16\n" \ - "35:\n" \ + "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "36:\n" \ + "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "44:\n" \ "SUB %0, %0, #4\n" \ - "37:\n" \ + "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ @@ -483,11 +498,19 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ - " .long 37b,4b\n" \ + " .long 37b,3b\n" \ + " .long 38b,3b\n" \ + " .long 39b,3b\n" \ + " .long 40b,3b\n" \ + " .long 41b,3b\n" \ + " .long 42b,3b\n" \ + " .long 43b,3b\n" \ + " .long 44b,3b\n" \ + " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -513,7 +536,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } } @@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { @@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } #endif @@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; + if (retn) + return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; + if (retn) + return retn + n; } switch (n) { @@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } + /* + * If we get here, retn correctly reflects the number of failing + * bytes. + */ return retn; } EXPORT_SYMBOL(__copy_user); @@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ - "3: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ @@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") -#define __asm_copy_from_user_5(to, from, ret) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 4b,5b\n") - -#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "4: SETW [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_6(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_7(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "4: SETD [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_8(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_9(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "6: SETW [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_10(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_11(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "6: SETD [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_12(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_13(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "8: SETW [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_14(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_15(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "10: SETB [%0++],D1Ar1\n", \ - "11: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 10b,11b\n") - -#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "8: SETD [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_16(to, from, ret) \ - __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") - #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ - " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ - " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ @@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 8 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*8 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #8\n") + "LSR D0Ar2, D0Ar2, #5\n" \ + "ANDS D0Ar2, D0Ar2, #0x38\n" \ + "ADDZ D0Ar2, D0Ar2, #32\n" \ + "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 4 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*4 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #4\n") + "LSR D0Ar2, D0Ar2, #6\n" \ + "ANDS D0Ar2, D0Ar2, #0x1c\n" \ + "ADDZ D0Ar2, D0Ar2, #16\n" \ + "SUB %1, %1, D0Ar2\n") -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were - inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, - unsigned long n) +/* + * Copy from user to kernel. The return-value is the number of bytes that were + * inaccessible. + */ +unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, + unsigned long n) { register char *dst asm ("A0.2") = pdst; register const char __user *src asm ("A1.2") = psrc; @@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ @@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) - goto copy_exception_bytes; + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ @@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) - goto copy_exception_bytes; + return retn + n; } } - /* We only need one check after the unalignment-adjustments, - because if both adjustments were done, either both or - neither reference had an exception. */ - if (retn != 0) - goto copy_exception_bytes; - #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { @@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } @@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } #endif @@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + return retn + n; } /* If we get here, there were no memory read faults. */ @@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; - - copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - - return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a008a9f03072..e0bb576410bb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA select GENERIC_CSUM - select MIPS_O32_FP64_SUPPORT if MIPS32_O32 + select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM help Choose this option to build a kernel for release 6 or later of the diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index f94455f964ec..a2813fe381cf 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -21,6 +21,7 @@ #include <asm/cpu-features.h> #include <asm/fpu_emulator.h> #include <asm/hazards.h> +#include <asm/ptrace.h> #include <asm/processor.h> #include <asm/current.h> #include <asm/msa.h> diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index 956db6e201d1..ddd1c918103b 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h @@ -18,9 +18,24 @@ #include <irq.h> #define IRQ_STACK_SIZE THREAD_SIZE +#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long)) extern void *irq_stack[NR_CPUS]; +/* + * The highest address on the IRQ stack contains a dummy frame put down in + * genex.S (handle_int & except_vec_vi_handler) which is structured as follows: + * + * top ------------ + * | task sp | <- irq_stack[cpu] + IRQ_STACK_START + * ------------ + * | | <- First frame of IRQ context + * ------------ + * + * task sp holds a copy of the task stack pointer where the struct pt_regs + * from exception entry can be found. + */ + static inline bool on_irq_stack(int cpu, unsigned long sp) { unsigned long low = (unsigned long)irq_stack[cpu]; diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index f485afe51514..a8df44d60607 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " andi %[ticket], %[ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "4: andi %[ticket], %[ticket], 0xffff \n" " sll %[ticket], 5 \n" @@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " sc %[ticket], %[ticket_ptr] \n" " beqz %[ticket], 1b \n" " li %[ticket], 1 \n" - "2: \n" + "2: .insn \n" " .subsection 2 \n" "3: b 2b \n" " li %[ticket], 0 \n" @@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) " .set reorder \n" __WEAK_LLSC_MB " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); @@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " li %2, 1 \n" - "2: \n" + "2: .insn \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) : GCC_OFF_SMALL_ASM() (rw->lock) diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 3e940dbe0262..78faf4292e90 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -386,17 +386,18 @@ #define __NR_pkey_mprotect (__NR_Linux + 363) #define __NR_pkey_alloc (__NR_Linux + 364) #define __NR_pkey_free (__NR_Linux + 365) +#define __NR_statx (__NR_Linux + 366) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 365 +#define __NR_Linux_syscalls 366 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 365 +#define __NR_O32_Linux_syscalls 366 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -730,16 +731,17 @@ #define __NR_pkey_mprotect (__NR_Linux + 323) #define __NR_pkey_alloc (__NR_Linux + 324) #define __NR_pkey_free (__NR_Linux + 325) +#define __NR_statx (__NR_Linux + 326) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 325 +#define __NR_Linux_syscalls 326 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 325 +#define __NR_64_Linux_syscalls 326 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1077,15 +1079,16 @@ #define __NR_pkey_mprotect (__NR_Linux + 327) #define __NR_pkey_alloc (__NR_Linux + 328) #define __NR_pkey_free (__NR_Linux + 329) +#define __NR_statx (__NR_Linux + 330) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 329 +#define __NR_Linux_syscalls 330 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 329 +#define __NR_N32_Linux_syscalls 330 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index bb5c5d34ba81..a670c0c11875 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -102,6 +102,7 @@ void output_thread_info_defines(void) DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); + DEFINE(_IRQ_STACK_START, IRQ_STACK_START); BLANK(); } diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 59476a607add..a00e87b0256d 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg) END(mips_cps_get_bootcfg) LEAF(mips_cps_boot_vpes) - PTR_L ta2, COREBOOTCFG_VPEMASK(a0) + lw ta2, COREBOOTCFG_VPEMASK(a0) PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) #if defined(CONFIG_CPU_MIPSR6) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 07718bb5fc9d..12422fd4af23 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) } decode_configs(c); - c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; + c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 7ec9612cb007..ae810da4d499 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jal plat_irq_dispatch @@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp) beq t0, t1, 2f /* Switch to IRQ stack */ - li t1, _IRQ_STACK_SIZE + li t1, _IRQ_STACK_START PTR_ADD sp, t0, t1 + /* Save task's sp on IRQ stack so that unwinding can follow it */ + LONG_S s1, 0(sp) 2: jalr v0 @@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER reserved reserved sti verbose /* others */ .align 5 - LEAF(handle_ri_rdhwr_vivt) + LEAF(handle_ri_rdhwr_tlbp) .set push .set noat .set noreorder @@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp) .set pop bltz k1, handle_ri /* slow path */ /* fall thru */ - END(handle_ri_rdhwr_vivt) + END(handle_ri_rdhwr_tlbp) LEAF(handle_ri_rdhwr) .set push diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index fb6b6b650719..b68e10fc453d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, unsigned long pc, unsigned long *ra) { + unsigned long low, high, irq_stack_high; struct mips_frame_info info; unsigned long size, ofs; + struct pt_regs *regs; int leaf; - extern void ret_from_irq(void); - extern void ret_from_exception(void); if (!stack_page) return 0; /* - * If we reached the bottom of interrupt context, - * return saved pc in pt_regs. + * IRQ stacks start at IRQ_STACK_START + * task stacks at THREAD_SIZE - 32 */ - if (pc == (unsigned long)ret_from_irq || - pc == (unsigned long)ret_from_exception) { - struct pt_regs *regs; - if (*sp >= stack_page && - *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) { - regs = (struct pt_regs *)*sp; - pc = regs->cp0_epc; - if (!user_mode(regs) && __kernel_text_address(pc)) { - *sp = regs->regs[29]; - *ra = regs->regs[31]; - return pc; - } + low = stack_page; + if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { + high = stack_page + IRQ_STACK_START; + irq_stack_high = high; + } else { + high = stack_page + THREAD_SIZE - 32; + irq_stack_high = 0; + } + + /* + * If we reached the top of the interrupt stack, start unwinding + * the interrupted task stack. + */ + if (unlikely(*sp == irq_stack_high)) { + unsigned long task_sp = *(unsigned long *)*sp; + + /* + * Check that the pointer saved in the IRQ stack head points to + * something within the stack of the current task + */ + if (!object_is_on_stack((void *)task_sp)) + return 0; + + /* + * Follow pointer to tasks kernel stack frame where interrupted + * state was saved. + */ + regs = (struct pt_regs *)task_sp; + pc = regs->cp0_epc; + if (!user_mode(regs) && __kernel_text_address(pc)) { + *sp = regs->regs[29]; + *ra = regs->regs[31]; + return pc; } return 0; } @@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page, if (leaf < 0) return 0; - if (*sp < stack_page || - *sp + info.frame_size > stack_page + THREAD_SIZE - 32) + if (*sp < low || *sp + info.frame_size > high) return 0; if (leaf) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index c29d397eee86..80ed68b2c95e 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -600,3 +600,4 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 0687f96ee912..49765b44aa9b 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -438,4 +438,5 @@ EXPORT(sys_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 5325 */ + PTR sys_statx .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 0331ba39a065..90bad2d1b2d3 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -433,4 +433,5 @@ EXPORT(sysn32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free + PTR sys_statx /* 6330 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 5a47042dd25f..2dd70bd104e1 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -588,4 +588,5 @@ EXPORT(sys32_call_table) PTR sys_pkey_mprotect PTR sys_pkey_alloc PTR sys_pkey_free /* 4365 */ + PTR sys_statx .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c7d17cfb32f6..b49e7bf9f950 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void); extern asmlinkage void handle_sys(void); extern asmlinkage void handle_bp(void); extern asmlinkage void handle_ri(void); -extern asmlinkage void handle_ri_rdhwr_vivt(void); +extern asmlinkage void handle_ri_rdhwr_tlbp(void); extern asmlinkage void handle_ri_rdhwr(void); extern asmlinkage void handle_cpu(void); extern asmlinkage void handle_ov(void); @@ -2408,9 +2408,18 @@ void __init trap_init(void) set_except_vector(EXCCODE_SYS, handle_sys); set_except_vector(EXCCODE_BP, handle_bp); - set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri : - (cpu_has_vtag_icache ? - handle_ri_rdhwr_vivt : handle_ri_rdhwr)); + + if (rdhwr_noopt) + set_except_vector(EXCCODE_RI, handle_ri); + else { + if (cpu_has_vtag_icache) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else if (current_cpu_type() == CPU_LOONGSON3) + set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); + else + set_except_vector(EXCCODE_RI, handle_ri_rdhwr); + } + set_except_vector(EXCCODE_CPU, handle_cpu); set_except_vector(EXCCODE_OV, handle_ov); set_except_vector(EXCCODE_TR, handle_tr); diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c index 3c3aa05891dd..95bec460b651 100644 --- a/arch/mips/lantiq/xway/sysctrl.c +++ b/arch/mips/lantiq/xway/sysctrl.c @@ -467,7 +467,7 @@ void __init ltq_soc_init(void) if (!np_xbar) panic("Failed to load xbar nodes from devicetree"); - if (of_address_to_resource(np_pmu, 0, &res_xbar)) + if (of_address_to_resource(np_xbar, 0, &res_xbar)) panic("Failed to get xbar resources"); if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), res_xbar.name)) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index e7f798d55fbc..3fe99cb271a9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1562,6 +1562,7 @@ static void probe_vcache(void) vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; c->vcache.waybit = 0; + c->vcache.waysize = vcache_size / c->vcache.ways; pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); @@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void) /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ scache_size *= 4; c->scache.waybit = 0; + c->scache.waysize = scache_size / c->scache.ways; pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); if (scache_size) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 9bfee8988eaf..4f642e07c2b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte, static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, struct uasm_label **l, unsigned int pte, - unsigned int ptr) + unsigned int ptr, + unsigned int flush) { #ifdef CONFIG_SMP UASM_i_SC(p, pte, 0, ptr); @@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r, #else UASM_i_SW(p, pte, 0, ptr); #endif + if (cpu_has_ftlb && flush) { + BUG_ON(!cpu_has_tlbinv); + + UASM_i_MFC0(p, ptr, C0_ENTRYHI); + uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_tlb_write_entry(p, l, r, tlb_indexed); + + uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV); + UASM_i_MTC0(p, ptr, C0_ENTRYHI); + build_huge_update_entries(p, pte, ptr); + build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0); + + return; + } + build_huge_update_entries(p, pte, ptr); build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0); } @@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void) uasm_l_tlbl_goaround2(&l, p); } uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID)); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbl(&l, p); @@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1); #endif uasm_l_nopage_tlbs(&l, p); @@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void) build_tlb_probe_entry(&p); uasm_i_ori(&p, wr.r1, wr.r1, _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); - build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2); + build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0); #endif uasm_l_nopage_tlbm(&l, p); diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c index c4ffd43d3996..48ce701557a4 100644 --- a/arch/mips/ralink/rt3883.c +++ b/arch/mips/ralink/rt3883.c @@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; +static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; static struct rt2880_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), @@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = { FUNC("pci-fnc", 3, 40, 32) }; static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; +static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; static struct rt2880_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c index 367c5426157b..3901b80d4420 100644 --- a/arch/nios2/kernel/prom.c +++ b/arch/nios2/kernel/prom.c @@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) return alloc_bootmem_align(size, align); } +int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, + bool nomap) +{ + reserve_bootmem(base, size, BOOTMEM_DEFAULT); + return 0; +} + void __init early_init_devtree(void *params) { __be32 *dtb = (u32 *)__dtb_start; diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c index 6e57ffa5db27..6044d9be28b4 100644 --- a/arch/nios2/kernel/setup.c +++ b/arch/nios2/kernel/setup.c @@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p) } #endif /* CONFIG_BLK_DEV_INITRD */ + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + unflatten_and_copy_device_tree(); setup_cpuinfo(); diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index f01188c044ee..85c28bb80fb7 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy) add dst,len,end /* short copy with less than 16 bytes? */ - cmpib,>>=,n 15,len,.Lbyte_loop + cmpib,COND(>>=),n 15,len,.Lbyte_loop /* same alignment? */ xor src,dst,t0 @@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy) /* loop until we are 64-bit aligned */ .Lalign_loop64: extru dst,31,3,t1 - cmpib,=,n 0,t1,.Lcopy_loop_16 + cmpib,=,n 0,t1,.Lcopy_loop_16_start 20: ldb,ma 1(srcspc,src),t1 21: stb,ma t1,1(dstspc,dst) b .Lalign_loop64 @@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) +.Lcopy_loop_16_start: ldi 31,t0 .Lcopy_loop_16: cmpb,COND(>>=),n t0,len,.Lword_loop @@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy) /* loop until we are 32-bit aligned */ .Lalign_loop32: extru dst,31,2,t1 - cmpib,=,n 0,t1,.Lcopy_loop_4 + cmpib,=,n 0,t1,.Lcopy_loop_8 20: ldb,ma 1(srcspc,src),t1 21: stb,ma t1,1(dstspc,dst) b .Lalign_loop32 @@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) -.Lcopy_loop_4: +.Lcopy_loop_8: cmpib,COND(>>=),n 15,len,.Lbyte_loop 10: ldw 0(srcspc,src),t1 @@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) - b .Lcopy_loop_4 + b .Lcopy_loop_8 ldo -16(len),len .Lbyte_loop: @@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy) .Lunaligned_copy: /* align until dst is 32bit-word-aligned */ extru dst,31,2,t1 - cmpib,COND(=),n 0,t1,.Lcopy_dstaligned + cmpib,=,n 0,t1,.Lcopy_dstaligned 20: ldb 0(srcspc,src),t1 ldo 1(src),src 21: stb,ma t1,1(dstspc,dst) @@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy) cmpiclr,<> 1,t0,%r0 b,n .Lcase1 .Lcase0: - cmpb,= %r0,len,.Lcda_finish + cmpb,COND(=) %r0,len,.Lcda_finish nop 1: ldw,ma 4(srcspc,src), a3 @@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy) 1: ldw,ma 4(srcspc,src), a3 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) ldo -1(len),len - cmpb,=,n %r0,len,.Ldo0 + cmpb,COND(=),n %r0,len,.Ldo0 .Ldo4: 1: ldw,ma 4(srcspc,src), a0 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) @@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy) 1: stw,ma t0, 4(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) ldo -4(len),len - cmpb,<> %r0,len,.Ldo4 + cmpb,COND(<>) %r0,len,.Ldo4 nop .Ldo0: shrpw a2, a3, %sar, t0 @@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy) /* fault exception fixup handlers: */ #ifdef CONFIG_64BIT .Lcopy16_fault: -10: b .Lcopy_done - std,ma t1,8(dstspc,dst) + b .Lcopy_done +10: std,ma t1,8(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) #endif .Lcopy8_fault: -10: b .Lcopy_done - stw,ma t1,4(dstspc,dst) + b .Lcopy_done +10: stw,ma t1,4(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) .exit diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 411994551afc..f058e0c3e4d4 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len) } if (len & ~VMX_ALIGN_MASK) { + preempt_disable(); pagefault_disable(); enable_kernel_altivec(); crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK); + disable_kernel_altivec(); pagefault_enable(); + preempt_enable(); } tail = len & VMX_ALIGN_MASK; diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index cbc7c42cdb74..ec7a8b099dd9 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; - /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ - if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { - nb = 8; - flags = LD+SW; - } else if (IS_XFORM(instruction) && - ((instruction >> 1) & 0x3ff) == 660) { - nb = 8; - flags = ST+SW; + /* + * Handle some cases which give overlaps in the DSISR values. + */ + if (IS_XFORM(instruction)) { + switch (get_xop(instruction)) { + case 532: /* ldbrx */ + nb = 8; + flags = LD+SW; + break; + case 660: /* stdbrx */ + nb = 8; + flags = ST+SW; + break; + case 20: /* lwarx */ + case 84: /* ldarx */ + case 116: /* lharx */ + case 276: /* lqarx */ + return 0; /* not emulated ever */ + } } /* Byteswap little endian loads and stores */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index ae179cb1bb3c..c119044cad0d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -67,7 +67,7 @@ PPC64_CACHES: * flush all bytes from start through stop-1 inclusive */ -_GLOBAL(flush_icache_range) +_GLOBAL_TOC(flush_icache_range) BEGIN_FTR_SECTION PURGE_PREFETCHED_INS blr @@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range) * * flush all bytes from start to stop-1 inclusive */ -_GLOBAL(flush_dcache_range) +_GLOBAL_TOC(flush_dcache_range) /* * Flush the data cache to memory diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 9cfaa8b69b5f..f997154dfc41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void) mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); } + /* + * Fixup HFSCR:TM based on CPU features. The bit is set by our + * early asm init because at that point we haven't updated our + * CPU features from firmware and device-tree. Here we have, + * so let's do it. + */ + if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP)) + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM); + /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 8c68145ba1bd..710e491206ed 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, /* start new resize */ resize = kzalloc(sizeof(*resize), GFP_KERNEL); + if (!resize) { + ret = -ENOMEM; + goto out; + } resize->order = shift; resize->kvm = kvm; INIT_WORK(&resize->work, resize_hpt_prepare_work); diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index cc332608e656..65bb8f33b399 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local) unsigned long psize = batch->psize; int ssize = batch->ssize; int i; + unsigned int use_local; + + use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && + mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); local_irq_save(flags); @@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local) } pte_iterate_hashed_end(); } - if (mmu_has_feature(MMU_FTR_TLBIEL) && - mmu_psize_defs[psize].tlbiel && local) { + if (use_local) { asm volatile("ptesync":::"memory"); for (i = 0; i < number; i++) { vpn = batch->vpn[i]; diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index fa95041fa9f6..33ca29333e18 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size) unsigned long decompress_kernel(void) { - unsigned long output_addr; - unsigned char *output; + void *output, *kernel_end; - output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; - check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); - memset(&_bss, 0, &_ebss - &_bss); - free_mem_ptr = (unsigned long)&_end; - free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) output_addr; + output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE); + kernel_end = output + SZ__bss_start; + check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); #ifdef CONFIG_BLK_DEV_INITRD /* * Move the initrd right behind the end of the decompressed - * kernel image. + * kernel image. This also prevents initrd corruption caused by + * bss clearing since kernel_end will always be located behind the + * current bss section.. */ - if (INITRD_START && INITRD_SIZE && - INITRD_START < (unsigned long) output + SZ__bss_start) { - check_ipl_parmblock(output + SZ__bss_start, - INITRD_START + INITRD_SIZE); - memmove(output + SZ__bss_start, - (void *) INITRD_START, INITRD_SIZE); - INITRD_START = (unsigned long) output + SZ__bss_start; + if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { + check_ipl_parmblock(kernel_end, INITRD_SIZE); + memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); + INITRD_START = (unsigned long) kernel_end; } #endif + /* + * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be + * initialized afterwards since they reside in bss. + */ + memset(&_bss, 0, &_ebss - &_bss); + free_mem_ptr = (unsigned long) &_end; + free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; + puts("Uncompressing Linux... "); __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); puts("Ok, booting the kernel.\n"); diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 136932ff4250..3ea1554d04b3 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, " jg 2b\n" \ ".popsection\n" \ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ - : "=d" (__rc), "=Q" (*(to)) \ + : "=d" (__rc), "+Q" (*(to)) \ : "d" (size), "Q" (*(from)), \ "d" (__reg0), "K" (-EFAULT) \ : "cc"); \ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 47a973b5b4f1..5dab859b0d54 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void) { struct pcpu *pcpu = pcpu_devices; + WARN_ON(!cpu_present(0) || !cpu_online(0)); pcpu->state = CPU_STATE_CONFIGURED; - pcpu->address = stap(); pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); - set_cpu_present(0, true); - set_cpu_online(0, true); } void __init smp_cpus_done(unsigned int max_cpus) @@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_setup_processor_id(void) { + pcpu_devices[0].address = stap(); S390_lowcore.cpu_nr = 0; S390_lowcore.spinlock_lockval = arch_spin_lockval(0); } diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index d55c829a5944..ddbffb715b40 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -168,8 +168,7 @@ union page_table_entry { unsigned long z : 1; /* Zero Bit */ unsigned long i : 1; /* Page-Invalid Bit */ unsigned long p : 1; /* DAT-Protection Bit */ - unsigned long co : 1; /* Change-Recording Override */ - unsigned long : 8; + unsigned long : 9; }; }; @@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, return PGM_PAGE_TRANSLATION; if (pte.z) return PGM_TRANSLATION_SPEC; - if (pte.co && !edat1) - return PGM_TRANSLATION_SPEC; dat_protection |= pte.p; raddr.pfra = pte.pfra; real_address: @@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); if (!rc && pte.i) rc = PGM_PAGE_TRANSLATION; - if (!rc && (pte.z || (pte.co && sg->edat_level < 1))) + if (!rc && pte.z) rc = PGM_TRANSLATION_SPEC; shadow_page: pte.p |= dat_protection; diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index f294dd42fc7d..5961b2d8398a 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 #define REAL_HPAGE_SIZE (_AC(1,UL) << REAL_HPAGE_SHIFT) @@ -27,7 +28,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 3 +#define HUGE_MAX_HSTATE 4 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8a598528ec1f..6fbd931f0570 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return pte_pfn(pte); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline unsigned long pmd_dirty(pmd_t pmd) +#define __HAVE_ARCH_PMD_WRITE +static inline unsigned long pmd_write(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_dirty(pte); + return pte_write(pte); } -static inline unsigned long pmd_young(pmd_t pmd) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline unsigned long pmd_dirty(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_young(pte); + return pte_dirty(pte); } -static inline unsigned long pmd_write(pmd_t pmd) +static inline unsigned long pmd_young(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); - return pte_write(pte); + return pte_young(pte); } static inline unsigned long pmd_trans_huge(pmd_t pmd) diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index 365d4cb267b4..dd27159819eb 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h @@ -18,12 +18,6 @@ #include <asm/signal.h> #include <asm/page.h> -/* - * The sparc has no problems with write protection - */ -#define wp_works_ok 1 -#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ - /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too... * That one page is used to protect kernel from intruders, so that * we can make our access_ok test faster diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 6448cfc8292f..b58ee9018433 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h @@ -18,10 +18,6 @@ #include <asm/ptrace.h> #include <asm/page.h> -/* The sparc has no problems with write protection */ -#define wp_works_ok 1 -#define wp_works_ok__is_a_macro /* for versions in ksyms.c */ - /* * User lives in his very own context, and cannot reference us. Note * that TASK_SIZE is a misnomer, it really gives maximum user virtual diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 6aa3da152c20..44101196d02b 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -96,6 +96,7 @@ sparc64_boot: andn %g1, PSTATE_AM, %g1 wrpr %g1, 0x0, %pstate ba,a,pt %xcc, 1f + nop .globl prom_finddev_name, prom_chosen_path, prom_root_node .globl prom_getprop_name, prom_mmu_name, prom_peer_name @@ -613,6 +614,7 @@ niagara_tlb_fixup: nop ba,a,pt %xcc, 80f + nop niagara4_patch: call niagara4_patch_copyops nop @@ -622,6 +624,7 @@ niagara4_patch: nop ba,a,pt %xcc, 80f + nop niagara2_patch: call niagara2_patch_copyops @@ -632,6 +635,7 @@ niagara2_patch: nop ba,a,pt %xcc, 80f + nop niagara_patch: call niagara_patch_copyops diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S index 34b4933900bf..9276d2f0dd86 100644 --- a/arch/sparc/kernel/misctrap.S +++ b/arch/sparc/kernel/misctrap.S @@ -82,6 +82,7 @@ do_stdfmna: call handle_stdfmna add %sp, PTREGS_OFF, %o0 ba,a,pt %xcc, rtrap + nop .size do_stdfmna,.-do_stdfmna .type breakpoint_trap,#function diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 216948ca4382..709a82ebd294 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -237,6 +237,7 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 bne,pt %xcc, user_rtt_fill_32bit wrpr %g1, %cwp ba,a,pt %xcc, user_rtt_fill_64bit + nop user_rtt_fill_fixup_dax: ba,pt %xcc, user_rtt_fill_fixup_common diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S index 4a73009f66a5..d7e540842809 100644 --- a/arch/sparc/kernel/spiterrs.S +++ b/arch/sparc/kernel/spiterrs.S @@ -86,6 +86,7 @@ __spitfire_cee_trap_continue: rd %pc, %g7 ba,a,pt %xcc, 2f + nop 1: ba,pt %xcc, etrap_irq rd %pc, %g7 diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index 6179e19bc9b9..c19f352f46c7 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -352,6 +352,7 @@ sun4v_mna: call sun4v_do_mna add %sp, PTREGS_OFF, %o0 ba,a,pt %xcc, rtrap + nop /* Privileged Action. */ sun4v_privact: diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S index 5604a2b051d4..364af3250646 100644 --- a/arch/sparc/kernel/urtt_fill.S +++ b/arch/sparc/kernel/urtt_fill.S @@ -92,6 +92,7 @@ user_rtt_fill_fixup_common: call sun4v_data_access_exception nop ba,a,pt %xcc, rtrap + nop 1: call spitfire_data_access_exception nop diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index 855019a8590e..1ee173cc3c39 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S @@ -152,6 +152,8 @@ fill_fixup_dax: call sun4v_data_access_exception nop ba,a,pt %xcc, rtrap + nop 1: call spitfire_data_access_exception nop ba,a,pt %xcc, rtrap + nop diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index c629dbd121b6..64dcd6cdb606 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -326,11 +326,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ blu 170f nop ba,a,pt %xcc, 180f + nop 4: /* 32 <= low bits < 48 */ blu 150f nop ba,a,pt %xcc, 160f + nop 5: /* 0 < low bits < 32 */ blu,a 6f cmp %g2, 8 @@ -338,6 +340,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ blu 130f nop ba,a,pt %xcc, 140f + nop 6: /* 0 < low bits < 16 */ bgeu 120f nop @@ -475,6 +478,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %o2, 85f sub %o0, %o1, GLOBAL_SPARE ba,a,pt %XCC, 90f + nop .align 64 75: /* 16 < len <= 64 */ diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 75bb93b1437f..78ea962edcbe 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -530,4 +530,5 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bne,pt %icc, 1b EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) ba,a,pt %icc, .Lexit + nop .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/NG4memset.S b/arch/sparc/lib/NG4memset.S index 41da4bdd95cb..7c0c81f18837 100644 --- a/arch/sparc/lib/NG4memset.S +++ b/arch/sparc/lib/NG4memset.S @@ -102,4 +102,5 @@ NG4bzero: bne,pt %icc, 1b add %o0, 0x30, %o0 ba,a,pt %icc, .Lpostloop + nop .size NG4bzero,.-NG4bzero diff --git a/arch/sparc/lib/NGmemcpy.S b/arch/sparc/lib/NGmemcpy.S index d88c4ed50a00..cd654a719b27 100644 --- a/arch/sparc/lib/NGmemcpy.S +++ b/arch/sparc/lib/NGmemcpy.S @@ -394,6 +394,7 @@ FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ brz,pt %i2, 85f sub %o0, %i1, %i3 ba,a,pt %XCC, 90f + nop .align 64 70: /* 16 < len <= 64 */ diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 323bc6b6e3ad..ee5273ad918d 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_2GB_SHIFT: + hugepage_size = _PAGE_SZ2GB_4V; + pte_val(entry) |= _PAGE_PMD_HUGE; + break; case HPAGE_256MB_SHIFT: hugepage_size = _PAGE_SZ256MB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ2GB_4V: + shift = HPAGE_2GB_SHIFT; + break; case _PAGE_SZ256MB_4V: shift = HPAGE_256MB_SHIFT; break; @@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, if (!pmd) return NULL; - if (sz == PMD_SHIFT) + if (sz >= PMD_SIZE) pte = (pte_t *)pmd; else pte = pte_alloc_map(mm, pmd, addr); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index ccd455328989..0cda653ae007 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_2GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_2GB; + hv_pgsz_idx = HV_PGSZ_IDX_2GB; + break; case HPAGE_256MB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_256MB; hv_pgsz_idx = HV_PGSZ_IDX_256MB; @@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr) if ((long)addr < 0L) { unsigned long pa = __pa(addr); - if ((addr >> max_phys_bits) != 0UL) + if ((pa >> max_phys_bits) != 0UL) return false; return pfn_valid(pa >> PAGE_SHIFT); diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index def82f6d626f..8e76ebba2986 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -54,6 +54,7 @@ enum mbus_module srmmu_modtype; static unsigned int hwbug_bitmask; int vac_cache_size; +EXPORT_SYMBOL(vac_cache_size); int vac_line_size; extern struct resource sparc_iomap; diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index afda3bbf7854..ee8066c3d96c 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, if (pte_val(*pte) & _PAGE_VALID) { bool exec = pte_exec(*pte); - tlb_batch_add_one(mm, vaddr, exec, false); + tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); } pte++; vaddr += PAGE_SIZE; @@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, pte_t orig_pte = __pte(pmd_val(orig)); bool exec = pte_exec(orig_pte); - tlb_batch_add_one(mm, addr, exec, true); + tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, - true); + REAL_HPAGE_SHIFT); } else { tlb_batch_pmd_scan(mm, addr, orig); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 0a04811f06b7..bedf08b22a47 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb) spin_lock_irqsave(&mm->context.lock, flags); - if (tb->hugepage_shift < HPAGE_SHIFT) { + if (tb->hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) @@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, spin_lock_irqsave(&mm->context.lock, flags); - if (hugepage_shift < HPAGE_SHIFT) { + if (hugepage_shift < REAL_HPAGE_SHIFT) { base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c index 7853b53959cd..3f9d1a83891a 100644 --- a/arch/x86/entry/vdso/vdso32-setup.c +++ b/arch/x86/entry/vdso/vdso32-setup.c @@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s) { vdso32_enabled = simple_strtoul(s, NULL, 0); - if (vdso32_enabled > 1) + if (vdso32_enabled > 1) { pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n"); + vdso32_enabled = 0; + } return 1; } @@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup); /* Register vsyscall32 into the ABI table */ #include <linux/sysctl.h> +static const int zero; +static const int one = 1; + static struct ctl_table abi_table2[] = { { .procname = "vsyscall32", .data = &vdso32_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = (int *)&zero, + .extra2 = (int *)&one, }, {} }; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 81b321ace8e0..f924629836a8 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) cpuc->lbr_entries[i].to = msr_lastbranch.to; cpuc->lbr_entries[i].mispred = 0; cpuc->lbr_entries[i].predicted = 0; + cpuc->lbr_entries[i].in_tx = 0; + cpuc->lbr_entries[i].abort = 0; + cpuc->lbr_entries[i].cycles = 0; cpuc->lbr_entries[i].reserved = 0; } cpuc->lbr_stack.nr = i; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 9d49c18b5ea9..3762536619f8 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -287,7 +287,7 @@ struct task_struct; #define ARCH_DLINFO_IA32 \ do { \ - if (vdso32_enabled) { \ + if (VDSO_CURRENT_BASE) { \ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ } \ diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 2c1ebeb4d737..529bb4a6487a 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) - * instruction. + * instruction. Note that @size is internally rounded up to be cache + * line size aligned. */ static inline void arch_wb_cache_pmem(void *addr, size_t size) { @@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size) clwb(p); } -/* - * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec - * iterators, so for other types (bvec & kvec) we must do a cache write-back. - */ -static inline bool __iter_needs_pmem_wb(struct iov_iter *i) -{ - return iter_is_iovec(i) == false; -} - /** * arch_copy_from_iter_pmem - copy data from an iterator to PMEM * @addr: PMEM destination address @@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, /* TODO: skip the write-back by always using non-temporal stores */ len = copy_from_iter_nocache(addr, bytes, i); - if (__iter_needs_pmem_wb(i)) + /* + * In the iovec case on x86_64 copy_from_iter_nocache() uses + * non-temporal stores for the bulk of the transfer, but we need + * to manually flush if the transfer is unaligned. A cached + * memory copy is used when destination or size is not naturally + * aligned. That is: + * - Require 8-byte alignment when size is 8 bytes or larger. + * - Require 4-byte alignment when size is 4 bytes. + * + * In the non-iovec case the entire destination needs to be + * flushed. + */ + if (iter_is_iovec(i)) { + unsigned long flushed, dest = (unsigned long) addr; + + if (bytes < 8) { + if (!IS_ALIGNED(dest, 4) || (bytes != 4)) + arch_wb_cache_pmem(addr, 1); + } else { + if (!IS_ALIGNED(dest, 8)) { + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); + arch_wb_cache_pmem(addr, 1); + } + + flushed = dest - (unsigned long) addr; + if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8)) + arch_wb_cache_pmem(addr + bytes - 1, 1); + } + } else arch_wb_cache_pmem(addr, bytes); return len; diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c index f369cb8db0d5..badd2b31a560 100644 --- a/arch/x86/kernel/cpu/intel_rdt_schemata.c +++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c @@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, } out: - rdtgroup_kn_unlock(of->kn); for_each_enabled_rdt_resource(r) { kfree(r->tmp_cbms); r->tmp_cbms = NULL; } + rdtgroup_kn_unlock(of->kn); return ret ?: nbytes; } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8e9725c607ea..5accfbdee3f0 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -54,6 +54,8 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); +static int mce_chrdev_open_count; /* #times opened */ + #define mce_log_get_idx_check(p) \ ({ \ RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ @@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val, if (atomic_read(&num_notifiers) > 2) return NOTIFY_DONE; + /* Don't print when mcelog is running */ + if (mce_chrdev_open_count > 0) + return NOTIFY_DONE; + __print_mce(m); return NOTIFY_DONE; @@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c) */ static DEFINE_SPINLOCK(mce_chrdev_state_lock); -static int mce_chrdev_open_count; /* #times opened */ static int mce_chrdev_open_exclu; /* already open exclusive? */ static int mce_chrdev_open(struct inode *inode, struct file *file) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 396c042e9d0e..cc30a74e4adb 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, me->comm, me->pid, where, frame, regs->ip, regs->sp, regs->orig_ax); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index ec1f756f9dc9..71beb28600d4 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from, if (from->si_signo == SIGSEGV) { if (from->si_code == SEGV_BNDERR) { - compat_uptr_t lower = (unsigned long)&to->si_lower; - compat_uptr_t upper = (unsigned long)&to->si_upper; + compat_uptr_t lower = (unsigned long)from->si_lower; + compat_uptr_t upper = (unsigned long)from->si_upper; put_user_ex(lower, &to->si_lower); put_user_ex(upper, &to->si_upper); } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 948443e115c1..4e496379a871 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", tsk->comm, tsk->pid, str, regs->ip, regs->sp, error_code); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } @@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code) pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", tsk->comm, task_pid_nr(tsk), regs->ip, regs->sp, error_code); - print_vma_addr(" in ", regs->ip); + print_vma_addr(KERN_CONT " in ", regs->ip); pr_cont("\n"); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2ee00dbbbd51..259e9b28ccf8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); case EXIT_REASON_PREEMPTION_TIMER: return false; + case EXIT_REASON_PML_FULL: + /* We don't expose PML support to L1. */ + return false; default: return true; } @@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } + if (enable_pml) { + /* + * Conceptually we want to copy the PML address and index from + * vmcs01 here, and then back to vmcs01 on nested vmexit. But, + * since we always flush the log on each vmexit, this happens + * to be equivalent to simply resetting the fields in vmcs02. + */ + ASSERT(vmx->pml_pg); + vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); + vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); + } + if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 22af912d66d2..889e7619a091 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -643,21 +643,40 @@ void __init init_mem_mapping(void) * devmem_is_allowed() checks to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * - * - * On x86, access has to be given to the first megabyte of ram because that area - * contains BIOS code and data regions used by X and dosemu and similar apps. - * Access has to be given to non-kernel-ram areas as well, these contain the PCI - * mmio resources as well as potential bios/acpi data regions. + * On x86, access has to be given to the first megabyte of RAM because that + * area traditionally contains BIOS code and data regions used by X, dosemu, + * and similar apps. Since they map the entire memory range, the whole range + * must be allowed (for mapping), but any areas that would otherwise be + * disallowed are flagged as being "zero filled" instead of rejected. + * Access has to be given to non-kernel-ram areas as well, these contain the + * PCI mmio resources as well as potential bios/acpi data regions. */ int devmem_is_allowed(unsigned long pagenr) { - if (pagenr < 256) - return 1; - if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + if (page_is_ram(pagenr)) { + /* + * For disallowed memory regions in the low 1MB range, + * request that the page be shown as all zeros. + */ + if (pagenr < 256) + return 2; + + return 0; + } + + /* + * This must follow RAM test, since System RAM is considered a + * restricted resource under CONFIG_STRICT_IOMEM. + */ + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) { + /* Low 1MB bypasses iomem restrictions. */ + if (pagenr < 256) + return 1; + return 0; - if (!page_is_ram(pagenr)) - return 1; - return 0; + } + + return 1; } void free_init_pages(char *what, unsigned long begin, unsigned long end) diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 30031d5293c4..cdfe8c628959 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) return; } + /* No need to reserve regions that will never be freed. */ + if (md.attribute & EFI_MEMORY_RUNTIME) + return; + size += addr % EFI_PAGE_SIZE; size = round_up(size, EFI_PAGE_SIZE); addr = round_down(addr, EFI_PAGE_SIZE); diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 976b1d70edbc..4ddbfd57a7c8 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from, #define ARCH_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) +#ifdef CONFIG_MMU +static inline unsigned long ___pa(unsigned long va) +{ + unsigned long off = va - PAGE_OFFSET; + + if (off >= XCHAL_KSEG_SIZE) + off -= XCHAL_KSEG_SIZE; + + return off + PHYS_OFFSET; +} +#define __pa(x) ___pa((unsigned long)(x)) +#else #define __pa(x) \ ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET) +#endif #define __va(x) \ ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET)) #define pfn_valid(pfn) \ diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index cd400af4a6b2..6be7eb27fd29 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2) #define __NR_pkey_free 350 __SYSCALL(350, sys_pkey_free, 1) -#define __NR_syscall_count 351 +#define __NR_statx 351 +__SYSCALL(351, sys_statx, 5) + +#define __NR_syscall_count 352 /* * sysxtensa syscall handler diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index c82c43bff296..bae697a06a98 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs) static int show_trace_cb(struct stackframe *frame, void *data) { - if (kernel_text_address(frame->pc)) { - pr_cont(" [<%08lx>]", frame->pc); - print_symbol(" %s\n", frame->pc); - } + if (kernel_text_address(frame->pc)) + pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc); return 0; } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 09af8ff18719..c974a1bbf4cb 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq) void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { - struct elevator_queue *e = hctx->queue->elevator; + struct request_queue *q = hctx->queue; + struct elevator_queue *e = q->elevator; const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; bool did_work = false; LIST_HEAD(rq_list); @@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); - did_work = blk_mq_dispatch_rq_list(hctx, &rq_list); + did_work = blk_mq_dispatch_rq_list(q, &rq_list); } else if (!has_sched_dispatch) { blk_mq_flush_busy_ctxs(hctx, &rq_list); - blk_mq_dispatch_rq_list(hctx, &rq_list); + blk_mq_dispatch_rq_list(q, &rq_list); } /* @@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) if (!rq) break; list_add(&rq->queuelist, &rq_list); - } while (blk_mq_dispatch_rq_list(hctx, &rq_list)); + } while (blk_mq_dispatch_rq_list(q, &rq_list)); } } @@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, return true; } -static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) +static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (blk_mq_hctx_has_pending(hctx)) + if (blk_mq_hctx_has_pending(hctx)) { blk_mq_run_hw_queue(hctx, true); + return true; + } } + return false; } -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - unsigned int i; +/** + * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list + * @pos: loop cursor. + * @skip: the list element that will not be examined. Iteration starts at + * @skip->next. + * @head: head of the list to examine. This list must have at least one + * element, namely @skip. + * @member: name of the list_head structure within typeof(*pos). + */ +#define list_for_each_entry_rcu_rr(pos, skip, head, member) \ + for ((pos) = (skip); \ + (pos = (pos)->member.next != (head) ? list_entry_rcu( \ + (pos)->member.next, typeof(*pos), member) : \ + list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \ + (pos) != (skip); ) - if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) { - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_restart_hctx(hctx); +/* + * Called after a driver tag has been freed to check whether a hctx needs to + * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware + * queues in a round-robin fashion if the tag set of @hctx is shared with other + * hardware queues. + */ +void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx) +{ + struct blk_mq_tags *const tags = hctx->tags; + struct blk_mq_tag_set *const set = hctx->queue->tag_set; + struct request_queue *const queue = hctx->queue, *q; + struct blk_mq_hw_ctx *hctx2; + unsigned int i, j; + + if (set->flags & BLK_MQ_F_TAG_SHARED) { + rcu_read_lock(); + list_for_each_entry_rcu_rr(q, queue, &set->tag_list, + tag_set_list) { + queue_for_each_hw_ctx(q, hctx2, i) + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + goto done; + } + j = hctx->queue_num + 1; + for (i = 0; i < queue->nr_hw_queues; i++, j++) { + if (j == queue->nr_hw_queues) + j = 0; + hctx2 = queue->queue_hw_ctx[j]; + if (hctx2->tags == tags && + blk_mq_sched_restart_hctx(hctx2)) + break; } +done: + rcu_read_unlock(); } else { blk_mq_sched_restart_hctx(hctx); } @@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, } } -int blk_mq_sched_setup(struct request_queue *q) +static int blk_mq_sched_alloc_tags(struct request_queue *q, + struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct blk_mq_tag_set *set = q->tag_set; + int ret; + + hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, + set->reserved_tags); + if (!hctx->sched_tags) + return -ENOMEM; + + ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); + if (ret) + blk_mq_sched_free_tags(set, hctx, hctx_idx); + + return ret; +} + +static void blk_mq_sched_tags_teardown(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; - int ret, i; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_sched_free_tags(set, hctx, i); +} + +int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct elevator_queue *e = q->elevator; + + if (!e) + return 0; + + return blk_mq_sched_alloc_tags(q, hctx, hctx_idx); +} + +void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) +{ + struct elevator_queue *e = q->elevator; + + if (!e) + return; + + blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx); +} + +int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + int ret; + + if (!e) { + q->elevator = NULL; + return 0; + } /* * Default to 256, since we don't split into sync/async like the @@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q) */ q->nr_requests = 2 * BLKDEV_MAX_RQ; - /* - * We're switching to using an IO scheduler, so setup the hctx - * scheduler tags and switch the request map from the regular - * tags to scheduler tags. First allocate what we need, so we - * can safely fail and fallback, if needed. - */ - ret = 0; queue_for_each_hw_ctx(q, hctx, i) { - hctx->sched_tags = blk_mq_alloc_rq_map(set, i, - q->nr_requests, set->reserved_tags); - if (!hctx->sched_tags) { - ret = -ENOMEM; - break; - } - ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests); + ret = blk_mq_sched_alloc_tags(q, hctx, i); if (ret) - break; + goto err; } - /* - * If we failed, free what we did allocate - */ - if (ret) { - queue_for_each_hw_ctx(q, hctx, i) { - if (!hctx->sched_tags) - continue; - blk_mq_sched_free_tags(set, hctx, i); - } - - return ret; - } + ret = e->ops.mq.init_sched(q, e); + if (ret) + goto err; return 0; + +err: + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; + return ret; } -void blk_mq_sched_teardown(struct request_queue *q) +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { - struct blk_mq_tag_set *set = q->tag_set; - struct blk_mq_hw_ctx *hctx; - int i; - - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_sched_free_tags(set, hctx, i); + if (e->type->ops.mq.exit_sched) + e->type->ops.mq.exit_sched(e); + blk_mq_sched_tags_teardown(q); + q->elevator = NULL; } int blk_mq_sched_init(struct request_queue *q) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index a75b16b123f7..3a9e6e40558b 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, struct request **merged_request); bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio); bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); -void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx); +void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async, bool can_block); @@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx, struct list_head *rq_list, struct request *(*get_rq)(struct blk_mq_hw_ctx *)); -int blk_mq_sched_setup(struct request_queue *q); -void blk_mq_sched_teardown(struct request_queue *q); +int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); +void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); + +int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx); +void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx); int blk_mq_sched_init(struct request_queue *q); @@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } -/* - * Mark a hardware queue and the request queue it belongs to as needing a - * restart. - */ -static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx) -{ - struct request_queue *q = hctx->queue; - - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); - if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) - set_bit(QUEUE_FLAG_RESTART, &q->queue_flags); -} - static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) { return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); diff --git a/block/blk-mq.c b/block/blk-mq.c index 6b6e7bc041db..572966f49596 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data); - blk_mq_put_ctx(alloc_data.ctx); blk_queue_exit(q); if (!rq) @@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag); if (sched_tag != -1) blk_mq_sched_completed_request(hctx, rq); - blk_mq_sched_restart_queues(hctx); + blk_mq_sched_restart(hctx); blk_queue_exit(q); } @@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT, }; - if (rq->tag != -1) { -done: - if (hctx) - *hctx = data.hctx; - return true; - } + if (rq->tag != -1) + goto done; if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag)) data.flags |= BLK_MQ_REQ_RESERVED; @@ -863,10 +858,12 @@ done: atomic_inc(&data.hctx->nr_active); } data.hctx->tags->rqs[rq->tag] = rq; - goto done; } - return false; +done: + if (hctx) + *hctx = data.hctx; + return rq->tag != -1; } static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, @@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx) return true; } -bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) +bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list) { - struct request_queue *q = hctx->queue; + struct blk_mq_hw_ctx *hctx; struct request *rq; LIST_HEAD(driver_list); struct list_head *dptr; int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; + if (list_empty(list)) + return false; + /* * Start off with dptr being NULL, so we start the first request * immediately, even if we have more pending. @@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) * Now process all the entries, sending them to the driver. */ errors = queued = 0; - while (!list_empty(list)) { + do { struct blk_mq_queue_data bd; rq = list_first_entry(list, struct request, queuelist); @@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list) */ if (!dptr && list->next != list->prev) dptr = &driver_list; - } + } while (!list_empty(list)); hctx->dispatched[queued_to_index(queued)]++; @@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) return hctx->next_cpu; } -void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, + unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx) || !blk_mq_hw_queue_mapped(hctx))) @@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) put_cpu(); } - kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); + if (msecs == 0) + kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->run_work); + else + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), + &hctx->delayed_run_work, + msecs_to_jiffies(msecs)); +} + +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + __blk_mq_delay_run_hw_queue(hctx, true, msecs); +} +EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); + +void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) +{ + __blk_mq_delay_run_hw_queue(hctx, async, 0); } void blk_mq_run_hw_queues(struct request_queue *q, bool async) @@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work) __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delayed_run_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work); + + __blk_mq_run_hw_queue(hctx); +} + static void blk_mq_delay_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; @@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, hctx->fq->flush_rq, hctx_idx, flush_start_tag + hctx_idx); + blk_mq_sched_exit_hctx(q, hctx, hctx_idx); + if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q, node = hctx->numa_node = set->numa_node; INIT_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn); INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); @@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q, set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto free_bitmap; + if (blk_mq_sched_init_hctx(q, hctx, hctx_idx)) + goto exit_hctx; + hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); if (!hctx->fq) - goto exit_hctx; + goto sched_exit_hctx; if (set->ops->init_request && set->ops->init_request(set->driver_data, @@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q, free_fq: kfree(hctx->fq); + sched_exit_hctx: + blk_mq_sched_exit_hctx(q, hctx, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); @@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q) struct blk_mq_hw_ctx *hctx; unsigned int i; - blk_mq_sched_teardown(q); - /* hctx kobj stays in hctx */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx) @@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; } +static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) +{ + if (set->ops->map_queues) + return set->ops->map_queues(set); + else + return blk_mq_map_queues(set); +} + /* * Alloc a tag set to be associated with one or more request queues. * May fail with EINVAL for various error conditions. May adjust the @@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (!set->mq_map) goto out_free_tags; - if (set->ops->map_queues) - ret = set->ops->map_queues(set); - else - ret = blk_mq_map_queues(set); + ret = blk_mq_update_queue_map(set); if (ret) goto out_free_mq_map; @@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) blk_mq_freeze_queue(q); set->nr_hw_queues = nr_hw_queues; + blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); diff --git a/block/blk-mq.h b/block/blk-mq.h index b79f9a7d8cf6..660a17e1d033 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); -bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); +bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index c44b321335f3..37f0b3ad635e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->elevator) { ioc_clear_queue(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); } blk_exit_rl(&q->root_rl); diff --git a/block/elevator.c b/block/elevator.c index 01139f549b5b..dbeecf7be719 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name) } } - if (e->uses_mq) { - err = blk_mq_sched_setup(q); - if (!err) - err = e->ops.mq.init_sched(q, e); - } else + if (e->uses_mq) + err = blk_mq_init_sched(q, e); + else err = e->ops.sq.elevator_init_fn(q, e); - if (err) { - if (e->uses_mq) - blk_mq_sched_teardown(q); + if (err) elevator_put(e); - } return err; } EXPORT_SYMBOL(elevator_init); -void elevator_exit(struct elevator_queue *e) +void elevator_exit(struct request_queue *q, struct elevator_queue *e) { mutex_lock(&e->sysfs_lock); if (e->uses_mq && e->type->ops.mq.exit_sched) - e->type->ops.mq.exit_sched(e); + blk_mq_exit_sched(q, e); else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) e->type->ops.sq.elevator_exit_fn(e); mutex_unlock(&e->sysfs_lock); @@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e) } EXPORT_SYMBOL_GPL(elv_unregister); +static int elevator_switch_mq(struct request_queue *q, + struct elevator_type *new_e) +{ + int ret; + + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue(q); + + if (q->elevator) { + if (q->elevator->registered) + elv_unregister_queue(q); + ioc_clear_queue(q); + elevator_exit(q, q->elevator); + } + + ret = blk_mq_init_sched(q, new_e); + if (ret) + goto out; + + if (new_e) { + ret = elv_register_queue(q); + if (ret) { + elevator_exit(q, q->elevator); + goto out; + } + } + + if (new_e) + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); + else + blk_add_trace_msg(q, "elv switch: none"); + +out: + blk_mq_unfreeze_queue(q); + blk_mq_start_stopped_hw_queues(q, true); + return ret; + +} + /* * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we @@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) bool old_registered = false; int err; - if (q->mq_ops) { - blk_mq_freeze_queue(q); - blk_mq_quiesce_queue(q); - } + if (q->mq_ops) + return elevator_switch_mq(q, new_e); /* * Turn on BYPASS and drain all requests w/ elevator private data. @@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) if (old) { old_registered = old->registered; - if (old->uses_mq) - blk_mq_sched_teardown(q); - - if (!q->mq_ops) - blk_queue_bypass_start(q); + blk_queue_bypass_start(q); /* unregister and clear all auxiliary data of the old elevator */ if (old_registered) @@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) } /* allocate, init and register new elevator */ - if (new_e) { - if (new_e->uses_mq) { - err = blk_mq_sched_setup(q); - if (!err) - err = new_e->ops.mq.init_sched(q, new_e); - } else - err = new_e->ops.sq.elevator_init_fn(q, new_e); - if (err) - goto fail_init; + err = new_e->ops.sq.elevator_init_fn(q, new_e); + if (err) + goto fail_init; - err = elv_register_queue(q); - if (err) - goto fail_register; - } else - q->elevator = NULL; + err = elv_register_queue(q); + if (err) + goto fail_register; /* done, kill the old one and finish */ if (old) { - elevator_exit(old); - if (!q->mq_ops) - blk_queue_bypass_end(q); + elevator_exit(q, old); + blk_queue_bypass_end(q); } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); - } - - if (new_e) - blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); - else - blk_add_trace_msg(q, "elv switch: none"); + blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); return 0; fail_register: - if (q->mq_ops) - blk_mq_sched_teardown(q); - elevator_exit(q->elevator); + elevator_exit(q, q->elevator); fail_init: /* switch failed, restore and re-register old elevator */ if (old) { q->elevator = old; elv_register_queue(q); - if (!q->mq_ops) - blk_queue_bypass_end(q); - } - if (q->mq_ops) { - blk_mq_unfreeze_queue(q); - blk_mq_start_stopped_hw_queues(q, true); + blk_queue_bypass_end(q); } return err; diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c index c86bae7b1d0f..ff096d9755b9 100644 --- a/drivers/acpi/acpica/utresrc.c +++ b/drivers/acpi/acpica/utresrc.c @@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, ACPI_FUNCTION_TRACE(ut_walk_aml_resources); - /* - * The absolute minimum resource template is one end_tag descriptor. - * However, we will treat a lone end_tag as just a simple buffer. - */ + /* The absolute minimum resource template is one end_tag descriptor */ + if (aml_length < sizeof(struct aml_resource_end_tag)) { return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); } @@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, /* Invoke the user function */ if (user_function) { - status = user_function(aml, length, offset, - resource_index, context); + status = + user_function(aml, length, offset, resource_index, + context); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } @@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state, *context = aml; } - /* Check if buffer is defined to be longer than the resource length */ - - if (aml_length > (offset + length)) { - return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); - } - /* Normal exit */ return_ACPI_STATUS(AE_OK); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index fb19e1cdb641..edc8663b5db3 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) return -ENODEV; /* - * If the device has a _HID (or _CID) returning a valid ACPI/PNP - * device ID, it is better to make it look less attractive here, so that - * the other device with the same _ADR value (that may not have a valid - * device ID) can be matched going forward. [This means a second spec - * violation in a row, so whatever we do here is best effort anyway.] + * If the device has a _HID returning a valid ACPI/PNP device ID, it is + * better to make it look less attractive here, so that the other device + * with the same _ADR value (that may not have a valid device ID) can be + * matched going forward. [This means a second spec violation in a row, + * so whatever we do here is best effort anyway.] */ - return sta_present && list_empty(&adev->pnp.ids) ? + return sta_present && !adev->pnp.type.platform_id ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 662036bdc65e..c8ea9d698cd0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1) const struct nfit_set_info_map *map0 = m0; const struct nfit_set_info_map *map1 = m1; - return map0->region_offset - map1->region_offset; + if (map0->region_offset < map1->region_offset) + return -1; + else if (map0->region_offset > map1->region_offset) + return 1; + return 0; } /* Retrieve the nth entry referencing this spa */ diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 192691880d55..2433569b02ef 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device) return; device->flags.match_driver = true; - if (!ret) { - ret = device_attach(&device->dev); - if (ret < 0) - return; - - if (!ret && device->pnp.type.platform_id) - acpi_default_enumeration(device); + if (ret > 0) { + acpi_device_set_enumerated(device); + goto ok; } + ret = device_attach(&device->dev); + if (ret < 0) + return; + + if (ret > 0 || !device->pnp.type.platform_id) + acpi_device_set_enumerated(device); + else + acpi_default_enumeration(device); + ok: list_for_each_entry(child, &device->children, node) acpi_bus_attach(child); diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index 6c9aa95a9a05..49d705c9f0f7 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, &info }; - /* SB600/700 don't have secondary port wired */ - if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) || - (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE)) - ppi[1] = &ata_dummy_port_info; - return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL, ATA_HOST_PARALLEL_SCAN); } diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 0636d84fbefe..f3f538eec7b3 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id, pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); } - /* enable IRQ on hotplug */ - pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); - if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { - dev_dbg(&pdev->dev, - "enabling SATA hotplug (0x%x)\n", - (int) tmp8); - tmp8 |= SATA_HOTPLUG; - pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); + if (board_id == vt6421) { + /* enable IRQ on hotplug */ + pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8); + if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) { + dev_dbg(&pdev->dev, + "enabling SATA hotplug (0x%x)\n", + (int) tmp8); + tmp8 |= SATA_HOTPLUG; + pci_write_config_byte(pdev, SVIA_MISC_3, tmp8); + } } /* diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index dceb5edd1e54..0c09d4256108 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { - copy_page(mem, cmem); + memcpy(mem, cmem, PAGE_SIZE); } else { struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); @@ -717,7 +717,7 @@ compress_again: if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { src = kmap_atomic(page); - copy_page(cmem, src); + memcpy(cmem, src, PAGE_SIZE); kunmap_atomic(src); } else { memcpy(cmem, src, clen); @@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, } index = sector >> SECTORS_PER_PAGE_SHIFT; - offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; + offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; bv.bv_page = page; bv.bv_len = PAGE_SIZE; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 6d9cc2d39d22..7e4a9d1296bb 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) #endif #ifdef CONFIG_STRICT_DEVMEM +static inline int page_is_allowed(unsigned long pfn) +{ + return devmem_is_allowed(pfn); +} static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; @@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) return 1; } #else +static inline int page_is_allowed(unsigned long pfn) +{ + return 1; +} static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; @@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, while (count > 0) { unsigned long remaining; + int allowed; sz = size_inside_page(p, count); - if (!range_is_allowed(p >> PAGE_SHIFT, count)) + allowed = page_is_allowed(p >> PAGE_SHIFT); + if (!allowed) return -EPERM; + if (allowed == 2) { + /* Show zeros for restricted memory. */ + remaining = clear_user(buf, sz); + } else { + /* + * On ia64 if a page has been mapped somewhere as + * uncached, then it must also be accessed uncached + * by the kernel or data corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) + return -EFAULT; - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_mem_ptr(p); - if (!ptr) - return -EFAULT; + remaining = copy_to_user(buf, ptr, sz); + + unxlate_dev_mem_ptr(p, ptr); + } - remaining = copy_to_user(buf, ptr, sz); - unxlate_dev_mem_ptr(p, ptr); if (remaining) return -EFAULT; @@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, #endif while (count > 0) { + int allowed; + sz = size_inside_page(p, count); - if (!range_is_allowed(p >> PAGE_SHIFT, sz)) + allowed = page_is_allowed(p >> PAGE_SHIFT); + if (!allowed) return -EPERM; - /* - * On ia64 if a page has been mapped somewhere as uncached, then - * it must also be accessed uncached by the kernel or data - * corruption may occur. - */ - ptr = xlate_dev_mem_ptr(p); - if (!ptr) { - if (written) - break; - return -EFAULT; - } + /* Skip actual writing when a page is marked as restricted. */ + if (allowed == 1) { + /* + * On ia64 if a page has been mapped somewhere as + * uncached, then it must also be accessed uncached + * by the kernel or data corruption may occur. + */ + ptr = xlate_dev_mem_ptr(p); + if (!ptr) { + if (written) + break; + return -EFAULT; + } - copied = copy_from_user(ptr, buf, sz); - unxlate_dev_mem_ptr(p, ptr); - if (copied) { - written += sz - copied; - if (written) - break; - return -EFAULT; + copied = copy_from_user(ptr, buf, sz); + unxlate_dev_mem_ptr(p, ptr); + if (copied) { + written += sz - copied; + if (written) + break; + return -EFAULT; + } } buf += sz; diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e9b7e0b3cabe..87fe111d0be6 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev) vdev->config->reset(vdev); - virtqueue_disable_cb(portdev->c_ivq); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); cancel_work_sync(&portdev->control_work); cancel_work_sync(&portdev->config_work); /* * Once more: if control_work_handler() was running, it would * enable the cb as the last step. */ - virtqueue_disable_cb(portdev->c_ivq); + if (use_multiport(portdev)) + virtqueue_disable_cb(portdev->c_ivq); remove_controlq_data(portdev); list_for_each_entry(port, &portdev->ports, list) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bc96d423781a..0e3f6496524d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled); *********************************************************************/ static enum cpuhp_state hp_online; +static int cpuhp_cpufreq_online(unsigned int cpu) +{ + cpufreq_online(cpu); + + return 0; +} + +static int cpuhp_cpufreq_offline(unsigned int cpu) +{ + cpufreq_offline(cpu); + + return 0; +} + /** * cpufreq_register_driver - register a CPU Frequency driver * @driver_data: A struct cpufreq_driver containing the values# @@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) } ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online", - cpufreq_online, - cpufreq_offline); + cpuhp_cpufreq_online, + cpuhp_cpufreq_offline); if (ret < 0) goto err_if_unreg; hp_online = ret; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 32100c4851dd..49cbdcba7883 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) ctx->dev = caam_jr_alloc(); if (IS_ERR(ctx->dev)) { - dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n"); + pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->dev); } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index fef39f9f41ee..5d7f73d60515 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) /* Try to run it through DECO0 */ ret = run_descriptor_deco0(ctrldev, desc, &status); - if (ret || status) { + if (ret || + (status && status != JRSTA_SSRC_JUMP_HALT_CC)) { dev_err(ctrldev, "Failed to deinstantiate RNG4 SH%d\n", sh_idx); @@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev) struct device *ctrldev; struct caam_drv_private *ctrlpriv; struct caam_ctrl __iomem *ctrl; - int ring; ctrldev = &pdev->dev; ctrlpriv = dev_get_drvdata(ctrldev); ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; - /* Remove platform devices for JobRs */ - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) - of_device_unregister(ctrlpriv->jrpdev[ring]); + /* Remove platform devices under the crypto node */ + of_platform_depopulate(ctrldev); /* De-initialize RNG state handles initialized by this driver. */ if (ctrlpriv->rng4_sh_init) @@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); #endif +static const struct of_device_id caam_match[] = { + { + .compatible = "fsl,sec-v4.0", + }, + { + .compatible = "fsl,sec4.0", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, caam_match); + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { - int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u64 caam_id; struct device *dev; struct device_node *nprop, *np; @@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev) goto iounmap_ctrl; } - /* - * Detect and enable JobRs - * First, find out how many ring spec'ed, allocate references - * for all, then go probe each one. - */ - rspec = 0; - for_each_available_child_of_node(nprop, np) - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) - rspec++; - - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec, - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL); - if (ctrlpriv->jrpdev == NULL) { - ret = -ENOMEM; + ret = of_platform_populate(nprop, caam_match, NULL, dev); + if (ret) { + dev_err(dev, "JR platform devices creation error\n"); goto iounmap_ctrl; } ring = 0; - ridx = 0; - ctrlpriv->total_jobrs = 0; for_each_available_child_of_node(nprop, np) if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { - ctrlpriv->jrpdev[ring] = - of_platform_device_create(np, NULL, dev); - if (!ctrlpriv->jrpdev[ring]) { - pr_warn("JR physical index %d: Platform device creation error\n", - ridx); - ridx++; - continue; - } ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) ((__force uint8_t *)ctrl + - (ridx + JR_BLOCK_NUMBER) * + (ring + JR_BLOCK_NUMBER) * BLOCK_OFFSET ); ctrlpriv->total_jobrs++; ring++; - ridx++; - } + } /* Check to see if QI present. If so, enable */ ctrlpriv->qi_present = @@ -847,17 +834,6 @@ disable_caam_ipg: return ret; } -static struct of_device_id caam_match[] = { - { - .compatible = "fsl,sec-v4.0", - }, - { - .compatible = "fsl,sec4.0", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, caam_match); - static struct platform_driver caam_driver = { .driver = { .name = "caam", diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e2bcacc1a921..dbed8baeebe5 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -66,7 +66,6 @@ struct caam_drv_private_jr { struct caam_drv_private { struct device *dev; - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */ struct platform_device *pdev; /* Physical-presence section */ diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index 3e2ab3b14eea..9e95bf94eb13 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -2,6 +2,7 @@ menuconfig DEV_DAX tristate "DAX: direct access to differentiated memory" default m if NVDIMM_DAX depends on TRANSPARENT_HUGEPAGE + select SRCU help Support raw access to differentiated (persistence, bandwidth, latency...) memory via an mmap(2) capable character diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 80c6db279ae1..806f180c80d8 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -25,6 +25,7 @@ #include "dax.h" static dev_t dax_devt; +DEFINE_STATIC_SRCU(dax_srcu); static struct class *dax_class; static DEFINE_IDA(dax_minor_ida); static int nr_dax = CONFIG_NR_DEV_DAX; @@ -60,7 +61,7 @@ struct dax_region { * @region - parent region * @dev - device backing the character device * @cdev - core chardev data - * @alive - !alive + rcu grace period == no new mappings can be established + * @alive - !alive + srcu grace period == no new mappings can be established * @id - child id in the region * @num_resources - number of physical address extents in this device * @res - array of physical address ranges @@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) static int dax_dev_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { - int rc; + int rc, id; struct file *filp = vmf->vma->vm_file; struct dax_dev *dax_dev = filp->private_data; @@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf, ? "write" : "read", vmf->vma->vm_start, vmf->vma->vm_end); - rcu_read_lock(); + id = srcu_read_lock(&dax_srcu); switch (pe_size) { case PE_SIZE_PTE: rc = __dax_dev_pte_fault(dax_dev, vmf); @@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf, default: return VM_FAULT_FALLBACK; } - rcu_read_unlock(); + srcu_read_unlock(&dax_srcu, id); return rc; } @@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev) * Note, rcu is not protecting the liveness of dax_dev, rcu is * ensuring that any fault handlers that might have seen * dax_dev->alive == true, have completed. Any fault handlers - * that start after synchronize_rcu() has started will abort + * that start after synchronize_srcu() has started will abort * upon seeing dax_dev->alive == false. */ dax_dev->alive = false; - synchronize_rcu(); + synchronize_srcu(&dax_srcu); unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); cdev_del(cdev); device_unregister(dev); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index f72aaacbe023..512bdbc23bbb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -405,8 +405,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->map_dma_buf || !exp_info->ops->unmap_dma_buf || !exp_info->ops->release - || !exp_info->ops->kmap_atomic - || !exp_info->ops->kmap + || !exp_info->ops->map_atomic + || !exp_info->ops->map || !exp_info->ops->mmap)) { return ERR_PTR(-EINVAL); } @@ -872,7 +872,7 @@ void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num) { WARN_ON(!dmabuf); - return dmabuf->ops->kmap_atomic(dmabuf, page_num); + return dmabuf->ops->map_atomic(dmabuf, page_num); } EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic); @@ -889,8 +889,8 @@ void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num, { WARN_ON(!dmabuf); - if (dmabuf->ops->kunmap_atomic) - dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr); + if (dmabuf->ops->unmap_atomic) + dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr); } EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic); @@ -907,7 +907,7 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num) { WARN_ON(!dmabuf); - return dmabuf->ops->kmap(dmabuf, page_num); + return dmabuf->ops->map(dmabuf, page_num); } EXPORT_SYMBOL_GPL(dma_buf_kmap); @@ -924,8 +924,8 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num, { WARN_ON(!dmabuf); - if (dmabuf->ops->kunmap) - dmabuf->ops->kunmap(dmabuf, page_num, vaddr); + if (dmabuf->ops->unmap) + dmabuf->ops->unmap(dmabuf, page_num, vaddr); } EXPORT_SYMBOL_GPL(dma_buf_kunmap); diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 932742e4cf23..24c461dea7af 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si, status = __gop_query32(sys_table_arg, gop32, &info, &size, ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found)) { + if (status == EFI_SUCCESS && (!first_gop || conout_found) && + info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may * provide multiple GOP devices, not all of which are @@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si, status = __gop_query64(sys_table_arg, gop64, &info, &size, ¤t_fb_base); - if (status == EFI_SUCCESS && (!first_gop || conout_found)) { + if (status == EFI_SUCCESS && (!first_gop || conout_found) && + info->pixel_format != PIXEL_BLT_ONLY) { /* * Systems that use the UEFI Console Splitter may * provide multiple GOP devices, not all of which are diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 9b37a3692b3f..2bd683e2be02 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, goto fail_free_event; } + if (agpio->wake_capable == ACPI_WAKE_CAPABLE) + enable_irq_wake(irq); + list_add_tail(&event->node, &acpi_gpio->events); return AE_OK; @@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip) list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { struct gpio_desc *desc; + if (irqd_is_wakeup_set(irq_get_irq_data(event->irq))) + disable_irq_wake(event->irq); + free_irq(event->irq, event); desc = event->desc; if (WARN_ON(IS_ERR(desc))) @@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev, } desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); - if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) + if (!IS_ERR(desc)) break; + if (PTR_ERR(desc) == -EPROBE_DEFER) + return ERR_CAST(desc); } /* Then from plain _CRS GPIOs */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 262056778f52..6a8129949333 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -32,7 +32,7 @@ #include <linux/wait.h> #include <linux/list.h> #include <linux/kref.h> -#include <linux/interval_tree.h> +#include <linux/rbtree.h> #include <linux/hashtable.h> #include <linux/dma-fence.h> @@ -122,14 +122,6 @@ extern int amdgpu_param_buf_per_se; /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 2 -/* max number of VMHUB */ -#define AMDGPU_MAX_VMHUBS 2 -#define AMDGPU_MMHUB 0 -#define AMDGPU_GFXHUB 1 - -/* hardcode that limit for now */ -#define AMDGPU_VA_RESERVED_SIZE (8 << 20) - /* hard reset data */ #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b @@ -312,12 +304,9 @@ struct amdgpu_gart_funcs { /* set pte flags based per asic */ uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, uint32_t flags); -}; - -/* provided by the mc block */ -struct amdgpu_mc_funcs { /* adjust mc addr in fb for APU case */ u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr); + uint32_t (*get_invalidate_req)(unsigned int vm_id); }; /* provided by the ih block */ @@ -379,7 +368,10 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo_va_mapping { struct list_head list; - struct interval_tree_node it; + struct rb_node rb; + uint64_t start; + uint64_t last; + uint64_t __subtree_last; uint64_t offset; uint64_t flags; }; @@ -579,8 +571,6 @@ struct amdgpu_vmhub { uint32_t vm_context0_cntl; uint32_t vm_l2_pro_fault_status; uint32_t vm_l2_pro_fault_cntl; - uint32_t (*get_invalidate_req)(unsigned int vm_id); - uint32_t (*get_vm_protection_bits)(void); }; /* @@ -618,7 +608,6 @@ struct amdgpu_mc { u64 private_aperture_end; /* protects concurrent invalidation */ spinlock_t invalidate_lock; - const struct amdgpu_mc_funcs *mc_funcs; }; /* @@ -1712,6 +1701,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); #define WREG32_FIELD(reg, field, val) \ WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) +#define WREG32_FIELD_OFFSET(reg, offset, field, val) \ + WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + +#define WREG32_FIELD15(ip, idx, reg, field, val) \ + WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + /* * BIOS helpers. */ @@ -1887,12 +1882,14 @@ void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); bool amdgpu_atpx_dgpu_req_power_for_displays(void); +bool amdgpu_has_atpx(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } +static inline bool amdgpu_has_atpx(void) { return false; } #endif /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f52b1bf3d3d9..ad4329922f79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -754,6 +754,35 @@ union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; }; +/* + * Return vram width from integrated system info table, if available, + * or 0 if not. + */ +int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + u16 data_offset, size; + union igp_info *igp_info; + u8 frev, crev; + + /* get any igp specific overrides */ + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 8: + case 9: + return igp_info->info_8.ucUMAChannelNumber * 64; + default: + return 0; + } + } + + return 0; +} + static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev, struct amdgpu_atom_ss *ss, int id) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 4e0f488487f3..38d0fe32e5cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -148,6 +148,8 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev); int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev); +int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev); + bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev, struct amdgpu_atom_ss *ss, int id, u32 clock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 0218cea6be4d..a6649874e6ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -237,7 +237,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, struct amdgpu_fpriv *fpriv = filp->driver_priv; union drm_amdgpu_bo_list *args = data; uint32_t handle = args->in.list_handle; - const void __user *uptr = (const void*)(long)args->in.bo_info_ptr; + const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; struct drm_amdgpu_bo_list_entry *info; struct amdgpu_bo_list *list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 97f661372a1c..ec71b9320561 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -161,7 +161,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } /* get chunks */ - chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -181,7 +181,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; + chunk_ptr = (void __user *)(uintptr_t)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -192,7 +192,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)(unsigned long)user_chunk.chunk_data; + cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); if (p->chunks[i].kdata == NULL) { @@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } if ((chunk_ib->va_start + chunk_ib->ib_bytes) > - (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { + (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { DRM_ERROR("IB va_start+ib_bytes is invalid\n"); return -EINVAL; } @@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return r; } - offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; + offset = m->start * AMDGPU_GPU_PAGE_SIZE; kptr += chunk_ib->va_start - offset; r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); @@ -1242,6 +1242,7 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + dma_fence_put(fence); if (r < 0) return r; @@ -1339,7 +1340,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, if (fences == NULL) return -ENOMEM; - fences_user = (void __user *)(unsigned long)(wait->in.fences); + fences_user = (void __user *)(uintptr_t)(wait->in.fences); if (copy_from_user(fences, fences_user, sizeof(struct drm_amdgpu_fence) * fence_count)) { r = -EFAULT; @@ -1388,8 +1389,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, continue; list_for_each_entry(mapping, &lobj->bo_va->valids, list) { - if (mapping->it.start > addr || - addr > mapping->it.last) + if (mapping->start > addr || + addr > mapping->last) continue; *bo = lobj->bo_va->bo; @@ -1397,8 +1398,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, } list_for_each_entry(mapping, &lobj->bo_va->invalids, list) { - if (mapping->it.start > addr || - addr > mapping->it.last) + if (mapping->start > addr || + addr > mapping->last) continue; *bo = lobj->bo_va->bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 83dda05325b8..483660742f75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1040,43 +1040,60 @@ static bool amdgpu_check_pot_argument(int arg) return (arg & (arg - 1)) == 0; } -static void amdgpu_get_block_size(struct amdgpu_device *adev) -{ - /* from AI, asic starts to support multiple level VMPT */ - if (adev->asic_type >= CHIP_VEGA10) { - if (amdgpu_vm_block_size != 9) - dev_warn(adev->dev, - "Multi-VMPT limits block size to one page!\n"); - amdgpu_vm_block_size = 9; - return; - } +static void amdgpu_check_block_size(struct amdgpu_device *adev) +{ /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the * page table and the remaining bits are in the page directory */ - if (amdgpu_vm_block_size == -1) { - - /* Total bits covered by PD + PTs */ - unsigned bits = ilog2(amdgpu_vm_size) + 18; - - /* Make sure the PD is 4K in size up to 8GB address space. - Above that split equal between PD and PTs */ - if (amdgpu_vm_size <= 8) - amdgpu_vm_block_size = bits - 9; - else - amdgpu_vm_block_size = (bits + 3) / 2; + if (amdgpu_vm_block_size == -1) + return; - } else if (amdgpu_vm_block_size < 9) { + if (amdgpu_vm_block_size < 9) { dev_warn(adev->dev, "VM page table size (%d) too small\n", amdgpu_vm_block_size); - amdgpu_vm_block_size = 9; + goto def_value; } if (amdgpu_vm_block_size > 24 || (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { dev_warn(adev->dev, "VM page table size (%d) too large\n", amdgpu_vm_block_size); - amdgpu_vm_block_size = 9; + goto def_value; } + + return; + +def_value: + amdgpu_vm_block_size = -1; +} + +static void amdgpu_check_vm_size(struct amdgpu_device *adev) +{ + if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { + dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", + amdgpu_vm_size); + goto def_value; + } + + if (amdgpu_vm_size < 1) { + dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", + amdgpu_vm_size); + goto def_value; + } + + /* + * Max GPUVM size for Cayman, SI, CI VI are 40 bits. + */ + if (amdgpu_vm_size > 1024) { + dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", + amdgpu_vm_size); + goto def_value; + } + + return; + +def_value: + amdgpu_vm_size = -1; } /** @@ -1108,28 +1125,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) } } - if (!amdgpu_check_pot_argument(amdgpu_vm_size)) { - dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", - amdgpu_vm_size); - amdgpu_vm_size = 8; - } + amdgpu_check_vm_size(adev); - if (amdgpu_vm_size < 1) { - dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", - amdgpu_vm_size); - amdgpu_vm_size = 8; - } - - /* - * Max GPUVM size for Cayman, SI and CI are 40 bits. - */ - if (amdgpu_vm_size > 1024) { - dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", - amdgpu_vm_size); - amdgpu_vm_size = 8; - } - - amdgpu_get_block_size(adev); + amdgpu_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || !amdgpu_check_pot_argument(amdgpu_vram_page_split))) { @@ -2249,9 +2247,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } r = amdgpu_resume(adev); - if (r) + if (r) { DRM_ERROR("amdgpu_resume failed (%d).\n", r); - + return r; + } amdgpu_fence_driver_resume(adev); if (resume) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index ce15721cadda..96926a221bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -614,6 +614,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 400917fd7486..4e0f7d2d87f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -86,7 +86,7 @@ int amdgpu_runtime_pm = -1; unsigned amdgpu_ip_block_mask = 0xffffffff; int amdgpu_bapm = -1; int amdgpu_deep_color = 0; -int amdgpu_vm_size = 64; +int amdgpu_vm_size = -1; int amdgpu_vm_block_size = -1; int amdgpu_vm_fault_stop = 0; int amdgpu_vm_debug = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index f85520d4e711..03a9c5cad222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -717,7 +717,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, switch (args->op) { case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { struct drm_amdgpu_gem_create_in info; - void __user *out = (void __user *)(long)args->value; + void __user *out = (void __user *)(uintptr_t)args->value; info.bo_size = robj->gem_base.size; info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; @@ -729,6 +729,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: + if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) { + r = -EINVAL; + amdgpu_bo_unreserve(robj); + break; + } if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { r = -EPERM; amdgpu_bo_unreserve(robj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 13b487235a8b..a6b7e367a860 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -316,9 +316,10 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, return -EINVAL; if (!adev->irq.client[client_id].sources) { - adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID, - sizeof(struct amdgpu_irq_src), - GFP_KERNEL); + adev->irq.client[client_id].sources = + kcalloc(AMDGPU_MAX_IRQ_SRC_ID, + sizeof(struct amdgpu_irq_src *), + GFP_KERNEL); if (!adev->irq.client[client_id].sources) return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index dfb029ab3448..832be632478f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -36,12 +36,6 @@ #include <linux/pm_runtime.h> #include "amdgpu_amdkfd.h" -#if defined(CONFIG_VGA_SWITCHEROO) -bool amdgpu_has_atpx(void); -#else -static inline bool amdgpu_has_atpx(void) { return false; } -#endif - /** * amdgpu_driver_unload_kms - Main unload function for KMS. * @@ -243,7 +237,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct amdgpu_device *adev = dev->dev_private; struct drm_amdgpu_info *info = data; struct amdgpu_mode_info *minfo = &adev->mode_info; - void __user *out = (void __user *)(long)info->return_pointer; + void __user *out = (void __user *)(uintptr_t)info->return_pointer; uint32_t size = info->return_size; struct drm_crtc *crtc; uint32_t ui32 = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 7ea3cacf9f9f..38f739fb727b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -31,6 +31,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/mmu_notifier.h> +#include <linux/interval_tree.h> #include <drm/drmP.h> #include <drm/drm.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5aac350b007f..cb89fff863c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; - unsigned lpfn = 0; - - /* This forces a reallocation if the flag wasn't set before */ - if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) - lpfn = adev->mc.real_vram_size >> PAGE_SHIFT; places[c].fpfn = 0; - places[c].lpfn = lpfn; + places[c].lpfn = 0; places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; else places[c].flags |= TTM_PL_FLAG_TOPDOWN; + + if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) + places[c].flags |= TTM_PL_FLAG_CONTIGUOUS; c++; } @@ -651,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (WARN_ON_ONCE(min_offset > max_offset)) return -EINVAL; + /* A shared bo cannot be migrated to VRAM */ + if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) + return -EINVAL; + if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; @@ -928,8 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) size = bo->mem.num_pages << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT; /* TODO: figure out how to map scattered VRAM to the CPU */ - if ((offset + size) <= adev->mc.visible_vram_size && - (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) + if ((offset + size) <= adev->mc.visible_vram_size) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -937,7 +939,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return -EINVAL; /* hurrah the memory is not visible ! */ - abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < abo->placement.num_placement; i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 4731015f6101..ed6e5799016e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -130,7 +130,7 @@ psp_cmd_submit_buf(struct psp_context *psp, while (*((unsigned int *)psp->fence_buf) != index) { msleep(1); - }; + } amdgpu_bo_free_kernel(&cmd_buf_bo, &cmd_buf_mc_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index a87de18160a8..ee9d0f346d75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map, TP_fast_assign( __entry->bo = bo_va ? bo_va->bo : NULL; - __entry->start = mapping->it.start; - __entry->last = mapping->it.last; + __entry->start = mapping->start; + __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), @@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, TP_fast_assign( __entry->bo = bo_va->bo; - __entry->start = mapping->it.start; - __entry->last = mapping->it.last; + __entry->start = mapping->start; + __entry->last = mapping->last; __entry->offset = mapping->offset; __entry->flags = mapping->flags; ), @@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping, ), TP_fast_assign( - __entry->soffset = mapping->it.start; - __entry->eoffset = mapping->it.last + 1; + __entry->soffset = mapping->start; + __entry->eoffset = mapping->last + 1; __entry->flags = mapping->flags; ), TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 244bb9aacf86..35d53a0d9ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -529,40 +529,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ case TTM_PL_TT: break; case TTM_PL_VRAM: - if (mem->start == AMDGPU_BO_INVALID_OFFSET) - return -EINVAL; - mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) return -EINVAL; mem->bus.base = adev->mc.aper_base; mem->bus.is_iomem = true; -#ifdef __alpha__ - /* - * Alpha: use bus.addr to hold the ioremap() return, - * so we can modify bus.base below. - */ - if (mem->placement & TTM_PL_FLAG_WC) - mem->bus.addr = - ioremap_wc(mem->bus.base + mem->bus.offset, - mem->bus.size); - else - mem->bus.addr = - ioremap_nocache(mem->bus.base + mem->bus.offset, - mem->bus.size); - if (!mem->bus.addr) - return -ENOMEM; - - /* - * Alpha: Use just the bus offset plus - * the hose/domain memory base for bus.base. - * It then can be used to build PTEs for VRAM - * access, as done in ttm_bo_vm_fault(). - */ - mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + - adev->ddev->hose->dense_mem_base; -#endif break; default: return -EINVAL; @@ -574,6 +546,18 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } +static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) +{ + struct drm_mm_node *mm = bo->mem.mm_node; + uint64_t size = mm->size; + uint64_t offset = page_offset; + + page_offset = do_div(offset, size); + mm += offset; + return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset; +} + /* * TTM backend functions. */ @@ -1089,6 +1073,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, + .io_mem_pfn = amdgpu_ttm_io_mem_pfn, }; int amdgpu_ttm_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 0b92dd0c1d70..2ca09f111f08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) start = amdgpu_bo_gpu_offset(bo); - end = (mapping->it.last + 1 - mapping->it.start); + end = (mapping->last + 1 - mapping->start); end = end * AMDGPU_GPU_PAGE_SIZE + start; - addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; + addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; start += addr; amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0184197eb000..c853400805d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, } if ((addr + (uint64_t)size) > - ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { + (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } - addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; + addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; addr += amdgpu_bo_gpu_offset(bo); addr -= ((uint64_t)size) * ((uint64_t)index); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index ecef35a1fe33..ba8b8ae6234f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -122,9 +122,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) mutex_lock(&adev->virt.lock_kiq); amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_hdp_flush(ring); amdgpu_ring_emit_rreg(ring, reg); - amdgpu_ring_emit_hdp_invalidate(ring); amdgpu_fence_emit(ring, &f); amdgpu_ring_commit(ring); mutex_unlock(&adev->virt.lock_kiq); @@ -150,9 +148,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) mutex_lock(&adev->virt.lock_kiq); amdgpu_ring_alloc(ring, 32); - amdgpu_ring_emit_hdp_flush(ring); amdgpu_ring_emit_wreg(ring, reg, v); - amdgpu_ring_emit_hdp_invalidate(ring); amdgpu_fence_emit(ring, &f); amdgpu_ring_commit(ring); mutex_unlock(&adev->virt.lock_kiq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0235d7933efd..7ed5302b511a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -26,6 +26,7 @@ * Jerome Glisse */ #include <linux/dma-fence-array.h> +#include <linux/interval_tree_generic.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -51,6 +52,15 @@ * SI supports 16. */ +#define START(node) ((node)->start) +#define LAST(node) ((node)->last) + +INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, + START, LAST, static, amdgpu_vm_it) + +#undef START +#undef LAST + /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ @@ -90,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, if (level == 0) /* For the root directory */ return adev->vm_manager.max_pfn >> - (amdgpu_vm_block_size * adev->vm_manager.num_level); + (adev->vm_manager.block_size * + adev->vm_manager.num_level); else if (level == adev->vm_manager.num_level) /* For the page tables on the leaves */ - return AMDGPU_VM_PTE_COUNT; + return AMDGPU_VM_PTE_COUNT(adev); else /* Everything in between */ - return 1 << amdgpu_vm_block_size; + return 1 << adev->vm_manager.block_size; } /** @@ -261,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, unsigned level) { unsigned shift = (adev->vm_manager.num_level - level) * - amdgpu_vm_block_size; + adev->vm_manager.block_size; unsigned pt_idx, from, to; int r; @@ -365,11 +376,19 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0); } -static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, - struct amdgpu_vm_id *id) +/** + * amdgpu_vm_had_gpu_reset - check if reset occured since last use + * + * @adev: amdgpu_device pointer + * @id: VMID structure + * + * Check if GPU reset occured since last use of the VMID. + */ +static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) { return id->current_gpu_reset_count != - atomic_read(&adev->gpu_reset_counter) ? true : false; + atomic_read(&adev->gpu_reset_counter); } /** @@ -455,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check all the prerequisites to using this VMID */ if (!id) continue; - if (amdgpu_vm_is_gpu_reset(adev, id)) + if (amdgpu_vm_had_gpu_reset(adev, id)) continue; if (atomic64_read(&id->owner) != vm->client_id) @@ -483,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); vm->ids[ring->idx] = id; @@ -504,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, if (r) goto error; - dma_fence_put(id->first); - id->first = dma_fence_get(fence); - dma_fence_put(id->last_flush); id->last_flush = NULL; @@ -557,8 +572,8 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) { u64 addr = mc_addr; - if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr) - addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr); + if (adev->gart.gart_funcs->adjust_mc_addr) + addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr); return addr; } @@ -583,60 +598,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) id->gws_size != job->gws_size || id->oa_base != job->oa_base || id->oa_size != job->oa_size); + bool vm_flush_needed = job->vm_needs_flush || + amdgpu_vm_ring_has_compute_vm_bug(ring); + unsigned patch_offset = 0; int r; - if (job->vm_needs_flush || gds_switch_needed || - amdgpu_vm_is_gpu_reset(adev, id) || - amdgpu_vm_ring_has_compute_vm_bug(ring)) { - unsigned patch_offset = 0; + if (amdgpu_vm_had_gpu_reset(adev, id)) { + gds_switch_needed = true; + vm_flush_needed = true; + } - if (ring->funcs->init_cond_exec) - patch_offset = amdgpu_ring_init_cond_exec(ring); + if (!vm_flush_needed && !gds_switch_needed) + return 0; - if (ring->funcs->emit_pipeline_sync && - (job->vm_needs_flush || gds_switch_needed || - amdgpu_vm_ring_has_compute_vm_bug(ring))) - amdgpu_ring_emit_pipeline_sync(ring); + if (ring->funcs->init_cond_exec) + patch_offset = amdgpu_ring_init_cond_exec(ring); - if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || - amdgpu_vm_is_gpu_reset(adev, id))) { - struct dma_fence *fence; - u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); + if (ring->funcs->emit_pipeline_sync) + amdgpu_ring_emit_pipeline_sync(ring); - trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); - amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); + if (ring->funcs->emit_vm_flush && vm_flush_needed) { + u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr); + struct dma_fence *fence; - r = amdgpu_fence_emit(ring, &fence); - if (r) - return r; + trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr); - mutex_lock(&adev->vm_manager.lock); - dma_fence_put(id->last_flush); - id->last_flush = fence; - mutex_unlock(&adev->vm_manager.lock); - } + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; - if (gds_switch_needed) { - id->gds_base = job->gds_base; - id->gds_size = job->gds_size; - id->gws_base = job->gws_base; - id->gws_size = job->gws_size; - id->oa_base = job->oa_base; - id->oa_size = job->oa_size; - amdgpu_ring_emit_gds_switch(ring, job->vm_id, - job->gds_base, job->gds_size, - job->gws_base, job->gws_size, - job->oa_base, job->oa_size); - } + mutex_lock(&adev->vm_manager.lock); + dma_fence_put(id->last_flush); + id->last_flush = fence; + mutex_unlock(&adev->vm_manager.lock); + } - if (ring->funcs->patch_cond_exec) - amdgpu_ring_patch_cond_exec(ring, patch_offset); + if (gds_switch_needed) { + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base, + job->gds_size, job->gws_base, + job->gws_size, job->oa_base, + job->oa_size); + } - /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ - if (ring->funcs->emit_switch_buffer) { - amdgpu_ring_emit_switch_buffer(ring); - amdgpu_ring_emit_switch_buffer(ring); - } + if (ring->funcs->patch_cond_exec) + amdgpu_ring_patch_cond_exec(ring, patch_offset); + + /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ + if (ring->funcs->emit_switch_buffer) { + amdgpu_ring_emit_switch_buffer(ring); + amdgpu_ring_emit_switch_buffer(ring); } return 0; } @@ -960,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p, unsigned idx, level = p->adev->vm_manager.num_level; while (entry->entries) { - idx = addr >> (amdgpu_vm_block_size * level--); + idx = addr >> (p->adev->vm_manager.block_size * level--); idx %= amdgpu_bo_size(entry->bo) / 8; entry = &entry->entries[idx]; } @@ -987,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { - const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; + struct amdgpu_device *adev = params->adev; + const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; uint64_t cur_pe_start, cur_nptes, cur_dst; uint64_t addr; /* next GPU address to be updated */ @@ -1011,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); cur_pe_start = amdgpu_bo_gpu_offset(pt); cur_pe_start += (addr & mask) * 8; @@ -1039,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else - nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); next_pe_start = amdgpu_bo_gpu_offset(pt); next_pe_start += (addr & mask) * 8; @@ -1186,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, * reserve space for one command every (1 << BLOCK_SIZE) * entries or 2k dwords (whatever is smaller) */ - ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1; + ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1; /* padding, etc. */ ndw = 64; @@ -1301,7 +1319,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct drm_mm_node *nodes, struct dma_fence **fence) { - uint64_t pfn, src = 0, start = mapping->it.start; + uint64_t pfn, src = 0, start = mapping->start; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here @@ -1353,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } addr += pfn << PAGE_SHIFT; - last = min((uint64_t)mapping->it.last, start + max_entries - 1); + last = min((uint64_t)mapping->last, start + max_entries - 1); r = amdgpu_vm_bo_update_mapping(adev, exclusive, src, pages_addr, vm, start, last, flags, addr, @@ -1368,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } start = last + 1; - } while (unlikely(start != mapping->it.last + 1)); + } while (unlikely(start != mapping->last + 1)); return 0; } @@ -1518,7 +1536,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, if (fence) dma_fence_wait(fence, false); - amdgpu_vm_prt_put(cb->adev); + amdgpu_vm_prt_put(adev); } else { cb->adev = adev; if (!fence || dma_fence_add_callback(fence, &cb->cb, @@ -1724,9 +1742,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, uint64_t saddr, uint64_t offset, uint64_t size, uint64_t flags) { - struct amdgpu_bo_va_mapping *mapping; + struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_vm *vm = bo_va->vm; - struct interval_tree_node *it; uint64_t eaddr; /* validate the parameters */ @@ -1743,14 +1760,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; - it = interval_tree_iter_first(&vm->va, saddr, eaddr); - if (it) { - struct amdgpu_bo_va_mapping *tmp; - tmp = container_of(it, struct amdgpu_bo_va_mapping, it); + tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); + if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " - "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, - tmp->it.start, tmp->it.last + 1); + "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, + tmp->start, tmp->last + 1); return -EINVAL; } @@ -1759,13 +1774,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, return -ENOMEM; INIT_LIST_HEAD(&mapping->list); - mapping->it.start = saddr; - mapping->it.last = eaddr; + mapping->start = saddr; + mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; list_add(&mapping->list, &bo_va->invalids); - interval_tree_insert(&mapping->it, &vm->va); + amdgpu_vm_it_insert(mapping, &vm->va); if (flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); @@ -1823,13 +1838,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; - mapping->it.start = saddr; - mapping->it.last = eaddr; + mapping->start = saddr; + mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; list_add(&mapping->list, &bo_va->invalids); - interval_tree_insert(&mapping->it, &vm->va); + amdgpu_vm_it_insert(mapping, &vm->va); if (flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); @@ -1860,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, saddr /= AMDGPU_GPU_PAGE_SIZE; list_for_each_entry(mapping, &bo_va->valids, list) { - if (mapping->it.start == saddr) + if (mapping->start == saddr) break; } @@ -1868,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, valid = false; list_for_each_entry(mapping, &bo_va->invalids, list) { - if (mapping->it.start == saddr) + if (mapping->start == saddr) break; } @@ -1877,7 +1892,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, } list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) @@ -1905,7 +1920,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, uint64_t saddr, uint64_t size) { struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; - struct interval_tree_node *it; LIST_HEAD(removed); uint64_t eaddr; @@ -1927,43 +1941,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, INIT_LIST_HEAD(&after->list); /* Now gather all removed mappings */ - it = interval_tree_iter_first(&vm->va, saddr, eaddr); - while (it) { - tmp = container_of(it, struct amdgpu_bo_va_mapping, it); - it = interval_tree_iter_next(it, saddr, eaddr); - + tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); + while (tmp) { /* Remember mapping split at the start */ - if (tmp->it.start < saddr) { - before->it.start = tmp->it.start; - before->it.last = saddr - 1; + if (tmp->start < saddr) { + before->start = tmp->start; + before->last = saddr - 1; before->offset = tmp->offset; before->flags = tmp->flags; list_add(&before->list, &tmp->list); } /* Remember mapping split at the end */ - if (tmp->it.last > eaddr) { - after->it.start = eaddr + 1; - after->it.last = tmp->it.last; + if (tmp->last > eaddr) { + after->start = eaddr + 1; + after->last = tmp->last; after->offset = tmp->offset; - after->offset += after->it.start - tmp->it.start; + after->offset += after->start - tmp->start; after->flags = tmp->flags; list_add(&after->list, &tmp->list); } list_del(&tmp->list); list_add(&tmp->list, &removed); + + tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); } /* And free them up */ list_for_each_entry_safe(tmp, next, &removed, list) { - interval_tree_remove(&tmp->it, &vm->va); + amdgpu_vm_it_remove(tmp, &vm->va); list_del(&tmp->list); - if (tmp->it.start < saddr) - tmp->it.start = saddr; - if (tmp->it.last > eaddr) - tmp->it.last = eaddr; + if (tmp->start < saddr) + tmp->start = saddr; + if (tmp->last > eaddr) + tmp->last = eaddr; list_add(&tmp->list, &vm->freed); trace_amdgpu_vm_bo_unmap(NULL, tmp); @@ -1971,7 +1984,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, /* Insert partial mapping before the range */ if (!list_empty(&before->list)) { - interval_tree_insert(&before->it, &vm->va); + amdgpu_vm_it_insert(before, &vm->va); if (before->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); } else { @@ -1980,7 +1993,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, /* Insert partial mapping after the range */ if (!list_empty(&after->list)) { - interval_tree_insert(&after->it, &vm->va); + amdgpu_vm_it_insert(after, &vm->va); if (after->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); } else { @@ -2014,13 +2027,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); amdgpu_vm_free_mapping(adev, vm, mapping, bo_va->last_pt_update); } @@ -2051,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, } } +static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) +{ + /* Total bits covered by PD + PTs */ + unsigned bits = ilog2(vm_size) + 18; + + /* Make sure the PD is 4K in size up to 8GB address space. + Above that split equal between PD and PTs */ + if (vm_size <= 8) + return (bits - 9); + else + return ((bits + 3) / 2); +} + +/** + * amdgpu_vm_adjust_size - adjust vm size and block size + * + * @adev: amdgpu_device pointer + * @vm_size: the default vm size if it's set auto + */ +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size) +{ + /* adjust vm size firstly */ + if (amdgpu_vm_size == -1) + adev->vm_manager.vm_size = vm_size; + else + adev->vm_manager.vm_size = amdgpu_vm_size; + + /* block size depends on vm size */ + if (amdgpu_vm_block_size == -1) + adev->vm_manager.block_size = + amdgpu_vm_get_block_size(adev->vm_manager.vm_size); + else + adev->vm_manager.block_size = amdgpu_vm_block_size; + + DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + adev->vm_manager.vm_size, adev->vm_manager.block_size); +} + /** * amdgpu_vm_init - initialize a vm instance * @@ -2062,7 +2113,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, - AMDGPU_VM_PTE_COUNT * 8); + AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; struct amd_sched_rq *rq; @@ -2162,9 +2213,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } - rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) { + rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) { list_del(&mapping->list); - interval_tree_remove(&mapping->it, &vm->va); + amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { @@ -2227,7 +2278,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_NUM_VM; ++i) { struct amdgpu_vm_id *id = &adev->vm_manager.ids[i]; - dma_fence_put(adev->vm_manager.ids[i].first); amdgpu_sync_free(&adev->vm_manager.ids[i].active); dma_fence_put(id->flushed_updates); dma_fence_put(id->last_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index fbe17bf73a00..d9e57290dc71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF /* number of entries in page table */ -#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) +#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 @@ -76,6 +76,14 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 +/* max number of VMHUB */ +#define AMDGPU_MAX_VMHUBS 2 +#define AMDGPU_GFXHUB 0 +#define AMDGPU_MMHUB 1 + +/* hardcode that limit for now */ +#define AMDGPU_VA_RESERVED_SIZE (8 << 20) + struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; @@ -123,7 +131,6 @@ struct amdgpu_vm { struct amdgpu_vm_id { struct list_head list; - struct dma_fence *first; struct amdgpu_sync active; struct dma_fence *last_flush; atomic64_t owner; @@ -155,6 +162,8 @@ struct amdgpu_vm_manager { uint64_t max_pfn; uint32_t num_level; + uint64_t vm_size; + uint32_t block_size; /* vram base address for page table entry */ u64 vram_base_offset; /* is vm enabled? */ @@ -225,5 +234,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, uint64_t saddr, uint64_t size); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 9e577e3d3147..a4831fe0223b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -93,7 +93,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, const struct ttm_place *place, struct ttm_mem_reg *mem) { - struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo); struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm *mm = &mgr->mm; struct drm_mm_node *nodes; @@ -106,8 +105,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (!lpfn) lpfn = man->size; - if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS || - place->lpfn || amdgpu_vram_page_split == -1) { + if (place->flags & TTM_PL_FLAG_CONTIGUOUS || + amdgpu_vram_page_split == -1) { pages_per_node = ~0ul; num_nodes = 1; } else { @@ -124,12 +123,14 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; + mem->start = 0; pages_left = mem->num_pages; spin_lock(&mgr->lock); for (i = 0; i < num_nodes; ++i) { unsigned long pages = min(pages_left, pages_per_node); uint32_t alignment = mem->page_alignment; + unsigned long start; if (pages == pages_per_node) alignment = pages_per_node; @@ -141,11 +142,19 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (unlikely(r)) goto error; + /* Calculate a virtual BO start address to easily check if + * everything is CPU accessible. + */ + start = nodes[i].start + nodes[i].size; + if (start > mem->num_pages) + start -= mem->num_pages; + else + start = 0; + mem->start = max(mem->start, start); pages_left -= pages; } spin_unlock(&mgr->lock); - mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET; mem->mm_node = nodes; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index daf003dd2351..ba98d35340a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) a.full = dfixed_const(available_bandwidth); b.full = dfixed_const(wm->num_heads); a.full = dfixed_div(a, b); + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); + tmp = min(dfixed_trunc(a), tmp); - b.full = dfixed_const(mc_latency + 512); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(b, c); - - c.full = dfixed_const(dmif_size); - b.full = dfixed_div(c, b); - - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); - - b.full = dfixed_const(1000); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(c, b); - c.full = dfixed_const(wm->bytes_per_pixel); - b.full = dfixed_mul(b, c); - - lb_fill_bw = min(tmp, dfixed_trunc(b)); + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); b.full = dfixed_const(1000); @@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, { struct drm_display_mode *mode = &amdgpu_crtc->base.mode; struct dce10_wm_params wm_low, wm_high; - u32 pixel_period; + u32 active_time; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - pixel_period = 1000000 / (u32)mode->clock; - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, wm_high.disp_clk = mode->clock; wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.active_time = active_time; wm_high.blank_time = line_time - wm_high.active_time; wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, wm_low.disp_clk = mode->clock; wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.active_time = active_time; wm_low.blank_time = line_time - wm_low.active_time; wm_low.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 3a7296724457..e59bc42df18c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) a.full = dfixed_const(available_bandwidth); b.full = dfixed_const(wm->num_heads); a.full = dfixed_div(a, b); + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); + tmp = min(dfixed_trunc(a), tmp); - b.full = dfixed_const(mc_latency + 512); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(b, c); - - c.full = dfixed_const(dmif_size); - b.full = dfixed_div(c, b); - - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); - - b.full = dfixed_const(1000); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(c, b); - c.full = dfixed_const(wm->bytes_per_pixel); - b.full = dfixed_mul(b, c); - - lb_fill_bw = min(tmp, dfixed_trunc(b)); + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); b.full = dfixed_const(1000); @@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, { struct drm_display_mode *mode = &amdgpu_crtc->base.mode; struct dce10_wm_params wm_low, wm_high; - u32 pixel_period; + u32 active_time; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - pixel_period = 1000000 / (u32)mode->clock; - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, wm_high.disp_clk = mode->clock; wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.active_time = active_time; wm_high.blank_time = line_time - wm_high.active_time; wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, wm_low.disp_clk = mode->clock; wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.active_time = active_time; wm_low.blank_time = line_time - wm_low.active_time; wm_low.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 8ccada5d6f39..307269bda4fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) a.full = dfixed_const(available_bandwidth); b.full = dfixed_const(wm->num_heads); a.full = dfixed_div(a, b); + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); + tmp = min(dfixed_trunc(a), tmp); - b.full = dfixed_const(mc_latency + 512); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(b, c); - - c.full = dfixed_const(dmif_size); - b.full = dfixed_div(c, b); - - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); - - b.full = dfixed_const(1000); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(c, b); - c.full = dfixed_const(wm->bytes_per_pixel); - b.full = dfixed_mul(b, c); - - lb_fill_bw = min(tmp, dfixed_trunc(b)); + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); b.full = dfixed_const(1000); @@ -986,7 +973,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, struct drm_display_mode *mode = &amdgpu_crtc->base.mode; struct dce6_wm_params wm_low, wm_high; u32 dram_channels; - u32 pixel_period; + u32 active_time; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; u32 priority_a_mark = 0, priority_b_mark = 0; @@ -996,8 +983,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, fixed20_12 a, b, c; if (amdgpu_crtc->base.enabled && num_heads && mode) { - pixel_period = 1000000 / (u32)mode->clock; - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; @@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, wm_high.disp_clk = mode->clock; wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.active_time = active_time; wm_high.blank_time = line_time - wm_high.active_time; wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, wm_low.disp_clk = mode->clock; wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.active_time = active_time; wm_low.blank_time = line_time - wm_low.active_time; wm_low.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 6943f2641c90..6df7a28e8aac 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) a.full = dfixed_const(available_bandwidth); b.full = dfixed_const(wm->num_heads); a.full = dfixed_div(a, b); + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); + tmp = min(dfixed_trunc(a), tmp); - b.full = dfixed_const(mc_latency + 512); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(b, c); - - c.full = dfixed_const(dmif_size); - b.full = dfixed_div(c, b); - - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); - - b.full = dfixed_const(1000); - c.full = dfixed_const(wm->disp_clk); - b.full = dfixed_div(c, b); - c.full = dfixed_const(wm->bytes_per_pixel); - b.full = dfixed_mul(b, c); - - lb_fill_bw = min(tmp, dfixed_trunc(b)); + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); b.full = dfixed_const(1000); @@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, { struct drm_display_mode *mode = &amdgpu_crtc->base.mode; struct dce8_wm_params wm_low, wm_high; - u32 pixel_period; + u32 active_time; u32 line_time = 0; u32 latency_watermark_a = 0, latency_watermark_b = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - pixel_period = 1000000 / (u32)mode->clock; - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { @@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, wm_high.disp_clk = mode->clock; wm_high.src_width = mode->crtc_hdisplay; - wm_high.active_time = mode->crtc_hdisplay * pixel_period; + wm_high.active_time = active_time; wm_high.blank_time = line_time - wm_high.active_time; wm_high.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, wm_low.disp_clk = mode->clock; wm_low.src_width = mode->crtc_hdisplay; - wm_low.active_time = mode->crtc_hdisplay * pixel_period; + wm_low.active_time = active_time; wm_low.blank_time = line_time - wm_low.active_time; wm_low.interlaced = false; if (mode->flags & DRM_MODE_FLAG_INTERLACE) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e0fa0d30e162..dad8a4cd1b37 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4565,6 +4565,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); for (i = 0; i < adev->gfx.num_compute_rings; i++) adev->gfx.compute_ring[i].ready = false; + adev->gfx.kiq.ring.ready = false; } udelay(50); } @@ -4721,14 +4722,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring) mqd->cp_hqd_eop_control = tmp; /* enable doorbell? */ - tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); - - if (ring->use_doorbell) - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_EN, 1); - else - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, - DOORBELL_EN, 0); + tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL), + CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, + ring->use_doorbell ? 1 : 0); mqd->cp_hqd_pq_doorbell_control = tmp; @@ -4816,13 +4813,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; - uint32_t tmp; int j; /* disable wptr polling */ - tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); - tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); - WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); + WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0); WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); @@ -4834,10 +4828,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* disable the queue if it's active */ - if (RREG32(mmCP_HQD_ACTIVE) & 1) { + if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); for (j = 0; j < adev->usec_timeout; j++) { - if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) + if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) break; udelay(1); } @@ -4894,11 +4888,8 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring) /* activate the queue */ WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); - if (ring->use_doorbell) { - tmp = RREG32(mmCP_PQ_STATUS); - tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); - WREG32(mmCP_PQ_STATUS, tmp); - } + if (ring->use_doorbell) + WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1); return 0; } @@ -5471,19 +5462,18 @@ static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, { int i; + mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) { - u32 tmp; - tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST); - tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST, - DEQUEUE_REQ, 2); - WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp); + WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2); for (i = 0; i < adev->usec_timeout; i++) { if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK)) break; udelay(1); } } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); } static int gfx_v8_0_pre_soft_reset(void *handle) @@ -5589,11 +5579,13 @@ static int gfx_v8_0_soft_reset(void *handle) static void gfx_v8_0_init_hqd(struct amdgpu_device *adev, struct amdgpu_ring *ring) { + mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); WREG32(mmCP_HQD_PQ_RPTR, 0); WREG32(mmCP_HQD_PQ_WPTR, 0); vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); } static int gfx_v8_0_post_soft_reset(void *handle) @@ -6986,40 +6978,24 @@ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev, unsigned int type, enum amdgpu_interrupt_state state) { - uint32_t tmp, target; struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ); - if (ring->me == 1) - target = mmCP_ME1_PIPE0_INT_CNTL; - else - target = mmCP_ME2_PIPE0_INT_CNTL; - target += ring->pipe; - switch (type) { case AMDGPU_CP_KIQ_IRQ_DRIVER0: - if (state == AMDGPU_IRQ_STATE_DISABLE) { - tmp = RREG32(mmCPC_INT_CNTL); - tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, - GENERIC2_INT_ENABLE, 0); - WREG32(mmCPC_INT_CNTL, tmp); - - tmp = RREG32(target); - tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, - GENERIC2_INT_ENABLE, 0); - WREG32(target, tmp); - } else { - tmp = RREG32(mmCPC_INT_CNTL); - tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, - GENERIC2_INT_ENABLE, 1); - WREG32(mmCPC_INT_CNTL, tmp); - - tmp = RREG32(target); - tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, - GENERIC2_INT_ENABLE, 1); - WREG32(target, tmp); - } + WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); + if (ring->me == 1) + WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL, + ring->pipe, + GENERIC2_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); + else + WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL, + ring->pipe, + GENERIC2_INT_ENABLE, + state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); break; default: BUG(); /* kiq only support GENERIC2_INT now */ @@ -7159,8 +7135,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_kiq, - .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, - .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate, .test_ring = gfx_v8_0_ring_test_ring, .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 669bb98fc45d..a447b70841c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1288,9 +1288,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) u32 tmp; int i; - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL)); - tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp); + WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); gfx_v9_0_tiling_mode_table_init(adev); @@ -1395,13 +1393,9 @@ void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) { - u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET)); - - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp); + WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); udelay(50); - tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp); + WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); udelay(50); } @@ -1410,10 +1404,8 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) #ifdef AMDGPU_RLC_DEBUG_RETRY u32 rlc_ucode_ver; #endif - u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)); - tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp); + WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); /* carrizo do enable cp interrupt after cp inited */ if (!(adev->flags & AMD_IS_APU)) @@ -1497,14 +1489,10 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) int i; u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)); - if (enable) { - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0); - } else { - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); - tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); + tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); + if (!enable) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].ready = false; } @@ -2020,13 +2008,10 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; - uint32_t tmp; int j; /* disable wptr polling */ - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL)); - tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp); + WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR), mqd->cp_hqd_eop_base_addr_lo); @@ -2118,11 +2103,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), mqd->cp_hqd_active); - if (ring->use_doorbell) { - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS)); - tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp); - } + if (ring->use_doorbell) + WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); return 0; } @@ -2366,177 +2348,6 @@ static int gfx_v9_0_wait_for_idle(void *handle) return -ETIMEDOUT; } -static void gfx_v9_0_print_status(void *handle) -{ - int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - dev_info(adev->dev, "GFX 9.x registers\n"); - dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS))); - dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2))); - dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0))); - dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1))); - dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2))); - dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3))); - dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT))); - dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1))); - dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2))); - dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3))); - dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT))); - dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1))); - dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS))); - dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT))); - dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1))); - dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS))); - - for (i = 0; i < 32; i++) { - dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", - i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4)); - } - for (i = 0; i < 16; i++) { - dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", - i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4)); - } - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - dev_info(adev->dev, " se: %d\n", i); - gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG))); - dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1))); - } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - - dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))); - - dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS))); - dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1))); - dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX))); - dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL))); - dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG))); - dev_info(adev->dev, " DB_DEBUG=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG))); - dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))); - dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3))); - dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL))); - dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1))); - dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE))); - dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES))); - dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL))); - dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS))); - dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION))); - dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE))); - dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE))); - dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE))); - dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE))); - - dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL))); - dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT))); - dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID))); - - dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER))); - - dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY))); - dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID))); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL))); - dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR))); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR))); - dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI))); - dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL))); - dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE))); - dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI))); - dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL))); - - dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR))); - dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK))); - - dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0))); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL))); - dev_info(adev->dev, " RLC_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL))); - dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL))); - dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT))); - dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX))); - dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK))); - dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS))); - dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL))); - dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL))); - - dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6))); - dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12))); - dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3))); - mutex_lock(&adev->srbm_mutex); - for (i = 0; i < 16; i++) { - soc15_grbm_select(adev, 0, 0, 0, i); - dev_info(adev->dev, " VM %d:\n", i); - dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))); - dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", - RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES))); - } - soc15_grbm_select(adev, 0, 0, 0, 0); - mutex_unlock(&adev->srbm_mutex); -} - static int gfx_v9_0_soft_reset(void *handle) { u32 grbm_soft_reset = 0; @@ -2569,8 +2380,7 @@ static int gfx_v9_0_soft_reset(void *handle) GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); - if (grbm_soft_reset ) { - gfx_v9_0_print_status((void *)adev); + if (grbm_soft_reset) { /* stop the rlc */ gfx_v9_0_rlc_stop(adev); @@ -2596,7 +2406,6 @@ static int gfx_v9_0_soft_reset(void *handle) /* Wait a little for things to settle down */ udelay(50); - gfx_v9_0_print_status((void *)adev); } return 0; } @@ -3148,6 +2957,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->idx; unsigned i; @@ -3157,7 +2967,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; - uint32_t req = hub->get_invalidate_req(vm_id); gfx_v9_0_write_data_to_reg(ring, usepfp, true, hub->ctx0_ptb_addr_lo32 @@ -3376,21 +3185,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); - break; case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = - REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - TIME_STAMP_INT_ENABLE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, + TIME_STAMP_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; default: break; @@ -3446,20 +3246,12 @@ static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); - break; case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_REG_INT_ENABLE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, + PRIV_REG_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); break; default: break; @@ -3473,21 +3265,12 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, unsigned type, enum amdgpu_interrupt_state state) { - u32 cp_int_cntl; - switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); - break; case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)); - cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, - PRIV_INSTR_INT_ENABLE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl); - break; + WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, + PRIV_INSTR_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); default: break; } @@ -3759,8 +3542,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence_kiq, - .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, - .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate, .test_ring = gfx_v9_0_ring_test_ring, .test_ib = gfx_v9_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, @@ -3975,9 +3756,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring) ring->pipe, ring->queue, 0); /* disable wptr polling */ - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL)); - tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp); + WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); /* write the EOP addr */ BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */ @@ -4121,11 +3900,8 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring) amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj); - if (use_doorbell) { - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS)); - tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp); - } + if (use_doorbell) + WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 30ef3126c8a9..005075ff00f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, - amdgpu_vm_block_size - 9); + adev->vm_manager.block_size - 9); WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp); WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); @@ -299,36 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); } -static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id) -{ - u32 req = 0; - - /* invalidate using legacy mode on vm_id*/ - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vm_id); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); - - return req; -} - -static uint32_t gfxhub_v1_0_get_vm_protection_bits(void) -{ - return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); -} - static int gfxhub_v1_0_early_init(void *handle) { return 0; @@ -361,9 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle) hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL); - hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req; - hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index d9586601a437..631aef38126d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) WREG32(mmVM_CONTEXT1_CNTL, VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | - ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); + ((adev->vm_manager.block_size - 9) + << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) gmc_v6_0_set_fault_enable_default(adev, false); else @@ -848,7 +849,8 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0c0a6015cca5..92abe12d92bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -37,6 +37,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "amdgpu_atombios.h" + static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); static int gmc_v7_0_wait_for_idle(void *handle); @@ -325,48 +327,51 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) */ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) { - u32 tmp; - int chansize, numchan; - - /* Get VRAM informations */ - tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { - chansize = 64; - } else { - chansize = 32; - } - tmp = RREG32(mmMC_SHARED_CHMAP); - switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 4; - break; - case 3: - numchan = 8; - break; - case 4: - numchan = 3; - break; - case 5: - numchan = 6; - break; - case 6: - numchan = 10; - break; - case 7: - numchan = 12; - break; - case 8: - numchan = 16; - break; + adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); + if (!adev->mc.vram_width) { + u32 tmp; + int chansize, numchan; + + /* Get VRAM informations */ + tmp = RREG32(mmMC_ARB_RAMCFG); + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(mmMC_SHARED_CHMAP); + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + case 4: + numchan = 3; + break; + case 5: + numchan = 6; + break; + case 6: + numchan = 10; + break; + case 7: + numchan = 12; + break; + case 8: + numchan = 16; + break; + } + adev->mc.vram_width = numchan * chansize; } - adev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->mc.aper_size = pci_resource_len(adev->pdev, 0); @@ -639,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, - amdgpu_vm_block_size - 9); + adev->vm_manager.block_size - 9); WREG32(mmVM_CONTEXT1_CNTL, tmp); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) gmc_v7_0_set_fault_enable_default(adev, false); @@ -998,7 +1003,8 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask * This is the max address of the GPU's diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d19d1c5e2847..f2ccefc66fd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -38,6 +38,8 @@ #include "vid.h" #include "vi.h" +#include "amdgpu_atombios.h" + static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); @@ -487,48 +489,51 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) */ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { - u32 tmp; - int chansize, numchan; - - /* Get VRAM informations */ - tmp = RREG32(mmMC_ARB_RAMCFG); - if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { - chansize = 64; - } else { - chansize = 32; - } - tmp = RREG32(mmMC_SHARED_CHMAP); - switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 4; - break; - case 3: - numchan = 8; - break; - case 4: - numchan = 3; - break; - case 5: - numchan = 6; - break; - case 6: - numchan = 10; - break; - case 7: - numchan = 12; - break; - case 8: - numchan = 16; - break; + adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); + if (!adev->mc.vram_width) { + u32 tmp; + int chansize, numchan; + + /* Get VRAM informations */ + tmp = RREG32(mmMC_ARB_RAMCFG); + if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(mmMC_SHARED_CHMAP); + switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + case 4: + numchan = 3; + break; + case 5: + numchan = 6; + break; + case 6: + numchan = 10; + break; + case 7: + numchan = 12; + break; + case 8: + numchan = 16; + break; + } + adev->mc.vram_width = numchan * chansize; } - adev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->mc.aper_size = pci_resource_len(adev->pdev, 0); @@ -848,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, - amdgpu_vm_block_size - 9); + adev->vm_manager.block_size - 9); WREG32(mmVM_CONTEXT1_CNTL, tmp); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) gmc_v8_0_set_fault_enable_default(adev, false); @@ -1082,7 +1087,8 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - adev->vm_manager.max_pfn = amdgpu_vm_size << 18; + amdgpu_vm_adjust_size(adev, 64); + adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask * This is the max address of the GPU's diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index df69aae99df4..3b045e0b114e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -75,11 +75,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_vmhub *hub; u32 tmp, reg, bits, i; + bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; + switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ hub = &adev->vmhub[AMDGPU_MMHUB]; - bits = hub->get_vm_protection_bits(); for (i = 0; i< 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); @@ -89,7 +96,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, /* GFX HUB */ hub = &adev->vmhub[AMDGPU_GFXHUB]; - bits = hub->get_vm_protection_bits(); for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); @@ -100,7 +106,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ hub = &adev->vmhub[AMDGPU_MMHUB]; - bits = hub->get_vm_protection_bits(); for (i = 0; i< 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); @@ -110,7 +115,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, /* GFX HUB */ hub = &adev->vmhub[AMDGPU_GFXHUB]; - bits = hub->get_vm_protection_bits(); for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); @@ -129,8 +133,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB]; - struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB]; + struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src]; uint32_t status = 0; u64 addr; @@ -138,13 +141,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr |= ((u64)entry->src_data[1] & 0xf) << 44; if (!amdgpu_sriov_vf(adev)) { - if (entry->vm_id_src) { - status = RREG32(mmhub->vm_l2_pro_fault_status); - WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1); - } else { - status = RREG32(gfxhub->vm_l2_pro_fault_status); - WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1); - } + status = RREG32(hub->vm_l2_pro_fault_status); + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); } if (printk_ratelimit()) { @@ -175,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; } +static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id) +{ + u32 req = 0; + + /* invalidate using legacy mode on vm_id*/ + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, + PER_VMID_INVALIDATE_REQ, 1 << vm_id); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); + req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, + CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); + + return req; +} + /* * GART * VMID 0 is the physical GPU addresses as used by the kernel. @@ -204,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; - u32 tmp = hub->get_invalidate_req(vmid); + u32 tmp = gmc_v9_0_get_invalidate_req(vmid); WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); @@ -337,30 +354,23 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, return pte_flag; } -static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { - .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, - .set_pte_pde = gmc_v9_0_gart_set_pte_pde, - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags -}; - -static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) -{ - if (adev->gart.gart_funcs == NULL) - adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; -} - static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr) { return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start; } -static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = { +static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { + .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, + .set_pte_pde = gmc_v9_0_gart_set_pte_pde, + .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, .adjust_mc_addr = gmc_v9_0_adjust_mc_addr, + .get_invalidate_req = gmc_v9_0_get_invalidate_req, }; -static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev) +static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) { - adev->mc.mc_funcs = &gmc_v9_0_mc_funcs; + if (adev->gart.gart_funcs == NULL) + adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; } static int gmc_v9_0_early_init(void *handle) @@ -368,7 +378,6 @@ static int gmc_v9_0_early_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v9_0_set_gart_funcs(adev); - gmc_v9_0_set_mc_funcs(adev); gmc_v9_0_set_irq_funcs(adev); return 0; @@ -511,7 +520,12 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev) * amdkfd will use VMIDs 8-15 */ adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.num_level = 3; + + /* TODO: fix num_level for APU when updating vm size and block size */ + if (adev->flags & AMD_IS_APU) + adev->vm_manager.num_level = 1; + else + adev->vm_manager.num_level = 3; amdgpu_vm_manager_init(adev); /* base offset of vram pages */ @@ -543,9 +557,20 @@ static int gmc_v9_0_sw_init(void *handle) if (adev->flags & AMD_IS_APU) { adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + amdgpu_vm_adjust_size(adev, 64); } else { /* XXX Don't know how to get VRAM type yet. */ adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; + /* + * To fulfill 4-level page support, + * vm size is 256TB (48bit), maximum size of Vega10, + * block size 512 (9bit) + */ + adev->vm_manager.vm_size = 1U << 18; + adev->vm_manager.block_size = 9; + DRM_INFO("vm size is %llu GB, block size is %u-bit\n", + adev->vm_manager.vm_size, + adev->vm_manager.block_size); } /* This interrupt is VMC page fault.*/ @@ -557,14 +582,7 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; - /* Because of four level VMPTs, vm size is at least 512GB. - * The maximum size is 256TB (48bit). - */ - if (amdgpu_vm_size < 512) { - DRM_WARN("VM size is at least 512GB!\n"); - amdgpu_vm_size = 512; - } - adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18; + adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18; /* Set the internal MC address mask * This is the max address of the GPU's diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 266a0f47a908..62684510ddcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev) EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, - amdgpu_vm_block_size - 9); + adev->vm_manager.block_size - 9); WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp); WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0); WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0); @@ -317,36 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp); } -static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id) -{ - u32 req = 0; - - /* invalidate using legacy mode on vm_id*/ - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - PER_VMID_INVALIDATE_REQ, 1 << vm_id); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); - - return req; -} - -static uint32_t mmhub_v1_0_get_vm_protection_bits(void) -{ - return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | - VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); -} - static int mmhub_v1_0_early_init(void *handle) { return 0; @@ -379,9 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle) hub->vm_l2_pro_fault_cntl = SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); - hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req; - hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index cfd5e54777bb..1493301b6a94 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -28,6 +28,7 @@ #include "vega10/GC/gc_9_0_offset.h" #include "vega10/GC/gc_9_0_sh_mask.h" #include "soc15.h" +#include "vega10_ih.h" #include "soc15_common.h" #include "mxgpu_ai.h" @@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev) return r; } -static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event) +static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event) { int r = 0, timeout = AI_MAILBOX_TIMEDOUT; @@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_FINI_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { - r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); if (r) return r; } @@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, return 0; } +static int xgpu_ai_request_reset(struct amdgpu_device *adev) +{ + return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev, bool init) { @@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev, return r; } +static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static void xgpu_ai_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + + /* wait until RCV_MSG become 3 */ + if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { + pr_err("failed to recieve FLR_CMPL\n"); + return; + } + + /* Trigger recovery due to world switch failure */ + amdgpu_sriov_gpu_reset(adev, false); +} + +static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + int r; + + /* see what event we get */ + r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); + + /* only handle FLR_NOTIFY now */ + if (!r) + schedule_work(&adev->virt.flr_work); + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = { + .set = xgpu_ai_set_mailbox_ack_irq, + .process = xgpu_ai_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = { + .set = xgpu_ai_set_mailbox_rcv_irq, + .process = xgpu_ai_mailbox_rcv_irq, +}; + +void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs; +} + +int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work); + + return 0; +} + +void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, + .reset_gpu = xgpu_ai_request_reset, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h index bf8ab8fd4367..9aefc44d2c34 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h @@ -24,7 +24,7 @@ #ifndef __MXGPU_AI_H__ #define __MXGPU_AI_H__ -#define AI_MAILBOX_TIMEDOUT 150000 +#define AI_MAILBOX_TIMEDOUT 5000 enum idh_request { IDH_REQ_GPU_INIT_ACCESS = 1, @@ -44,4 +44,9 @@ enum idh_event { extern const struct amdgpu_virt_ops xgpu_ai_virt_ops; +void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev); +int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev); +int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev); +void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 5191c45ffdf3..c3588d1c7cb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -491,7 +491,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp, ucode_size = ucode->ucode_size; ucode_mem = (uint32_t *)ucode->kaddr; - while (!ucode_size) { + while (ucode_size) { fw_sram_reg_val = RREG32(fw_sram_data_reg_offset); if (*ucode_mem != fw_sram_reg_val) @@ -508,14 +508,10 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp, bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; - uint32_t reg, reg_val; + uint32_t reg; - reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000; - WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val); + reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; + WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg); reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)); - if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) - return true; - - return false; + return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2dd2b20d727e..21f38d882335 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->idx; unsigned i; @@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; - uint32_t req = hub->get_invalidate_req(vm_id); amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bb14a45997b5..385de8617075 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -106,6 +106,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) if (adev->asic_type == CHIP_VEGA10) nbio_pcie_id = &nbio_v6_1_pcie_index_data; + else + BUG(); address = nbio_pcie_id->index_offset; data = nbio_pcie_id->data_offset; @@ -125,6 +127,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) if (adev->asic_type == CHIP_VEGA10) nbio_pcie_id = &nbio_v6_1_pcie_index_data; + else + BUG(); address = nbio_pcie_id->index_offset; data = nbio_pcie_id->data_offset; @@ -493,7 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block); amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); + if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) + amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) @@ -558,6 +563,7 @@ static int soc15_common_early_init(void *handle) if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_setting(adev); + xgpu_ai_mailbox_set_irq_funcs(adev); } /* @@ -610,8 +616,23 @@ static int soc15_common_early_init(void *handle) return 0; } +static int soc15_common_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_ai_mailbox_get_irq(adev); + + return 0; +} + static int soc15_common_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_ai_mailbox_add_irq_id(adev); + return 0; } @@ -642,6 +663,8 @@ static int soc15_common_hw_fini(void *handle) /* disable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, false); + if (amdgpu_sriov_vf(adev)) + xgpu_ai_mailbox_put_irq(adev); return 0; } @@ -855,7 +878,7 @@ static int soc15_common_set_powergating_state(void *handle, const struct amd_ip_funcs soc15_common_ip_funcs = { .name = "soc15_common", .early_init = soc15_common_early_init, - .late_init = NULL, + .late_init = soc15_common_late_init, .sw_init = soc15_common_sw_init, .sw_fini = soc15_common_sw_fini, .hw_init = soc15_common_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 9a4129d881aa..8ab0f78794a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -135,12 +135,9 @@ static int uvd_v4_2_sw_fini(void *handle) if (r) return r; - r = amdgpu_uvd_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_sw_fini(adev); } + static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, bool enable); /** @@ -230,11 +227,7 @@ static int uvd_v4_2_suspend(void *handle) if (r) return r; - r = amdgpu_uvd_suspend(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_suspend(adev); } static int uvd_v4_2_resume(void *handle) @@ -246,11 +239,7 @@ static int uvd_v4_2_resume(void *handle) if (r) return r; - r = uvd_v4_2_hw_init(adev); - if (r) - return r; - - return r; + return uvd_v4_2_hw_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e448f7d86bc0..bb6d46e168a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -131,11 +131,7 @@ static int uvd_v5_0_sw_fini(void *handle) if (r) return r; - r = amdgpu_uvd_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_sw_fini(adev); } /** @@ -228,11 +224,7 @@ static int uvd_v5_0_suspend(void *handle) return r; uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE); - r = amdgpu_uvd_suspend(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_suspend(adev); } static int uvd_v5_0_resume(void *handle) @@ -244,11 +236,7 @@ static int uvd_v5_0_resume(void *handle) if (r) return r; - r = uvd_v5_0_hw_init(adev); - if (r) - return r; - - return r; + return uvd_v5_0_hw_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 5679a4249bd9..31db356476f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -134,11 +134,7 @@ static int uvd_v6_0_sw_fini(void *handle) if (r) return r; - r = amdgpu_uvd_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_sw_fini(adev); } /** @@ -230,11 +226,8 @@ static int uvd_v6_0_suspend(void *handle) return r; /* Skip this for APU for now */ - if (!(adev->flags & AMD_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) r = amdgpu_uvd_suspend(adev); - if (r) - return r; - } return r; } @@ -250,11 +243,7 @@ static int uvd_v6_0_resume(void *handle) if (r) return r; } - r = uvd_v6_0_hw_init(adev); - if (r) - return r; - - return r; + return uvd_v6_0_hw_init(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 13f52e0af9b8..9bcf01469282 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -438,11 +438,7 @@ static int uvd_v7_0_sw_fini(void *handle) for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.ring_enc[i]); - r = amdgpu_uvd_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_uvd_sw_fini(adev); } /** @@ -547,11 +543,8 @@ static int uvd_v7_0_suspend(void *handle) return r; /* Skip this for APU for now */ - if (!(adev->flags & AMD_IS_APU)) { + if (!(adev->flags & AMD_IS_APU)) r = amdgpu_uvd_suspend(adev); - if (r) - return r; - } return r; } @@ -567,11 +560,7 @@ static int uvd_v7_0_resume(void *handle) if (r) return r; } - r = uvd_v7_0_hw_init(adev); - if (r) - return r; - - return r; + return uvd_v7_0_hw_init(adev); } /** @@ -1045,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vm_id, uint64_t pd_addr) { + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); uint32_t data0, data1, mask; unsigned eng = ring->idx; unsigned i; @@ -1055,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; - uint32_t req = hub->get_invalidate_req(vm_id); data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2; data1 = upper_32_bits(pd_addr); @@ -1091,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vm_id, uint64_t pd_addr) { + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->idx; unsigned i; @@ -1100,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; - uint32_t req = hub->get_invalidate_req(vm_id); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 49a6c45e65be..47f70827195b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -451,11 +451,7 @@ static int vce_v2_0_sw_fini(void *handle) if (r) return r; - r = amdgpu_vce_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_vce_sw_fini(adev); } static int vce_v2_0_hw_init(void *handle) @@ -495,11 +491,7 @@ static int vce_v2_0_suspend(void *handle) if (r) return r; - r = amdgpu_vce_suspend(adev); - if (r) - return r; - - return r; + return amdgpu_vce_suspend(adev); } static int vce_v2_0_resume(void *handle) @@ -511,11 +503,7 @@ static int vce_v2_0_resume(void *handle) if (r) return r; - r = vce_v2_0_hw_init(adev); - if (r) - return r; - - return r; + return vce_v2_0_hw_init(adev); } static int vce_v2_0_soft_reset(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index db0adac073c6..fb0819359909 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -417,11 +417,7 @@ static int vce_v3_0_sw_fini(void *handle) if (r) return r; - r = amdgpu_vce_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_vce_sw_fini(adev); } static int vce_v3_0_hw_init(void *handle) @@ -471,11 +467,7 @@ static int vce_v3_0_suspend(void *handle) if (r) return r; - r = amdgpu_vce_suspend(adev); - if (r) - return r; - - return r; + return amdgpu_vce_suspend(adev); } static int vce_v3_0_resume(void *handle) @@ -487,11 +479,7 @@ static int vce_v3_0_resume(void *handle) if (r) return r; - r = vce_v3_0_hw_init(adev); - if (r) - return r; - - return r; + return vce_v3_0_hw_init(adev); } static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index becc5f744a98..edde5fe938d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -527,11 +527,7 @@ static int vce_v4_0_sw_fini(void *handle) if (r) return r; - r = amdgpu_vce_sw_fini(adev); - if (r) - return r; - - return r; + return amdgpu_vce_sw_fini(adev); } static int vce_v4_0_hw_init(void *handle) @@ -584,11 +580,7 @@ static int vce_v4_0_suspend(void *handle) if (r) return r; - r = amdgpu_vce_suspend(adev); - if (r) - return r; - - return r; + return amdgpu_vce_suspend(adev); } static int vce_v4_0_resume(void *handle) @@ -600,11 +592,7 @@ static int vce_v4_0_resume(void *handle) if (r) return r; - r = vce_v4_0_hw_init(adev); - if (r) - return r; - - return r; + return vce_v4_0_hw_init(adev); } static void vce_v4_0_mc_resume(struct amdgpu_device *adev) @@ -985,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring) static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vm_id, uint64_t pd_addr) { + uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id); unsigned eng = ring->idx; unsigned i; @@ -994,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[i]; - uint32_t req = hub->get_invalidate_req(vm_id); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index b3a86e0e96e6..5f2ab9c1609a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -362,7 +362,89 @@ #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SWITCH_BUFFER 0x8B #define PACKET3_SET_RESOURCES 0xA0 +/* 1. header + * 2. CONTROL + * 3. QUEUE_MASK_LO [31:0] + * 4. QUEUE_MASK_HI [31:0] + * 5. GWS_MASK_LO [31:0] + * 6. GWS_MASK_HI [31:0] + * 7. OAC_MASK [15:0] + * 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0] + */ +# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0) +# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16) +# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29) #define PACKET3_MAP_QUEUES 0xA2 +/* 1. header + * 2. CONTROL + * 3. CONTROL2 + * 4. MQD_ADDR_LO [31:0] + * 5. MQD_ADDR_HI [31:0] + * 6. WPTR_ADDR_LO [31:0] + * 7. WPTR_ADDR_HI [31:0] + */ +/* CONTROL */ +# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4) +# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8) +# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21) +# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24) +# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26) +# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29) +/* CONTROL2 */ +# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1) +# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2) +# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26) +# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29) +# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31) +#define PACKET3_UNMAP_QUEUES 0xA3 +/* 1. header + * 2. CONTROL + * 3. CONTROL2 + * 4. CONTROL3 + * 5. CONTROL4 + * 6. CONTROL5 + */ +/* CONTROL */ +# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0) + /* 0 - PREEMPT_QUEUES + * 1 - RESET_QUEUES + * 2 - DISABLE_PROCESS_QUEUES + * 3 - PREEMPT_QUEUES_NO_UNMAP + */ +# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4) +# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26) +# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29) +/* CONTROL2a */ +# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0) +/* CONTROL2b */ +# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2) +/* CONTROL3a */ +# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2) +/* CONTROL3b */ +# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0) +/* CONTROL4 */ +# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2) +/* CONTROL5 */ +# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2) +#define PACKET3_QUERY_STATUS 0xA4 +/* 1. header + * 2. CONTROL + * 3. CONTROL2 + * 4. ADDR_LO [31:0] + * 5. ADDR_HI [31:0] + * 6. DATA_LO [31:0] + * 7. DATA_HI [31:0] + */ +/* CONTROL */ +# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0) +# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28) +# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30) +/* CONTROL2a */ +# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0) +/* CONTROL2b */ +# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2) +# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25) + #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index dfd4fe6f0578..9da5b0bb66d8 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -493,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, { enum amd_pm_state_type ps; - if (input == NULL) - return -EINVAL; + if (input == NULL) { + ret = -EINVAL; + break; + } ps = *(unsigned long *)input; data.requested_ui_label = power_state_convert(ps); @@ -539,15 +541,19 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) switch (state->classification.ui_label) { case PP_StateUILabel_Battery: pm_type = POWER_STATE_TYPE_BATTERY; + break; case PP_StateUILabel_Balanced: pm_type = POWER_STATE_TYPE_BALANCED; + break; case PP_StateUILabel_Performance: pm_type = POWER_STATE_TYPE_PERFORMANCE; + break; default: if (state->classification.flags & PP_StateClassificationFlag_Boot) pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; else pm_type = POWER_STATE_TYPE_DEFAULT; + break; } mutex_unlock(&pp_handle->pp_lock); @@ -894,7 +900,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) mutex_lock(&pp_handle->pp_lock); ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); - mutex_lock(&pp_handle->pp_lock); + mutex_unlock(&pp_handle->pp_lock); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h index 8e53d3a5e725..6a907c93fd9c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h @@ -250,6 +250,29 @@ typedef struct _ATOM_Vega10_Fan_Table { USHORT usFanStartTemperature; } ATOM_Vega10_Fan_Table; +typedef struct _ATOM_Vega10_Fan_Table_V2 { + UCHAR ucRevId; + USHORT usFanOutputSensitivity; + USHORT usFanAcousticLimitRpm; + USHORT usThrottlingRPM; + USHORT usTargetTemperature; + USHORT usMinimumPWMLimit; + USHORT usTargetGfxClk; + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + UCHAR ucEnableZeroRPM; + USHORT usFanStopTemperature; + USHORT usFanStartTemperature; + UCHAR ucFanParameters; + UCHAR ucFanMinRPM; + UCHAR ucFanMaxRPM; +} ATOM_Vega10_Fan_Table_V2; + typedef struct _ATOM_Vega10_Thermal_Controller { UCHAR ucRevId; UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ @@ -305,6 +328,33 @@ typedef struct _ATOM_Vega10_PowerTune_Table { USHORT usTemperatureLimitTedge; } ATOM_Vega10_PowerTune_Table; +typedef struct _ATOM_Vega10_PowerTune_Table_V2 +{ + UCHAR ucRevId; + USHORT usSocketPowerLimit; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usTdcLimit; + USHORT usEdcLimit; + USHORT usSoftwareShutdownTemp; + USHORT usTemperatureLimitHotSpot; + USHORT usTemperatureLimitLiquid1; + USHORT usTemperatureLimitLiquid2; + USHORT usTemperatureLimitHBM; + USHORT usTemperatureLimitVrSoc; + USHORT usTemperatureLimitVrMem; + USHORT usTemperatureLimitPlx; + USHORT usLoadLineResistance; + UCHAR ucLiquid1_I2C_address; + UCHAR ucLiquid2_I2C_address; + UCHAR ucLiquid_I2C_Line; + UCHAR ucVr_I2C_address; + UCHAR ucVr_I2C_Line; + UCHAR ucPlx_I2C_address; + UCHAR ucPlx_I2C_Line; + USHORT usTemperatureLimitTedge; +} ATOM_Vega10_PowerTune_Table_V2; + typedef struct _ATOM_Vega10_Hard_Limit_Record { ULONG ulSOCCLKLimit; ULONG ulGFXCLKLimit; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 518634f995e7..8b55ae01132d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -116,14 +116,16 @@ static int init_thermal_controller( const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) { const ATOM_Vega10_Thermal_Controller *thermal_controller; - const ATOM_Vega10_Fan_Table *fan_table; + const Vega10_PPTable_Generic_SubTable_Header *header; + const ATOM_Vega10_Fan_Table *fan_table_v1; + const ATOM_Vega10_Fan_Table_V2 *fan_table_v2; thermal_controller = (ATOM_Vega10_Thermal_Controller *) (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usThermalControllerOffset)); PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0), - "Thermal controller table not set!", return -1); + "Thermal controller table not set!", return -EINVAL); hwmgr->thermal_controller.ucType = thermal_controller->ucType; hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine; @@ -142,6 +144,9 @@ static int init_thermal_controller( hwmgr->thermal_controller.fanInfo.ulMaxRPM = thermal_controller->ucFanMaxRPM * 100UL; + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay + = 100000; + set_hw_cap( hwmgr, ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, @@ -150,54 +155,101 @@ static int init_thermal_controller( if (!powerplay_table->usFanTableOffset) return 0; - fan_table = (const ATOM_Vega10_Fan_Table *) + header = (const Vega10_PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usFanTableOffset)); - PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8), - "Invalid Input Fan Table!", return -1); + if (header->ucRevId == 10) { + fan_table_v1 = (ATOM_Vega10_Fan_Table *)header; - hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay - = 100000; - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_MicrocodeFanControl); - - hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = - le16_to_cpu(fan_table->usFanOutputSensitivity); - hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = - le16_to_cpu(fan_table->usFanRPMMax); - hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = - le16_to_cpu(fan_table->usThrottlingRPM); - hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = - le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit)); - hwmgr->thermal_controller.advanceFanControlParameters.usTMax = - le16_to_cpu(fan_table->usTargetTemperature); - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = - le16_to_cpu(fan_table->usMinimumPWMLimit); - hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = - le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk)); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = - le16_to_cpu(fan_table->usFanGainEdge); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = - le16_to_cpu(fan_table->usFanGainHotspot); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = - le16_to_cpu(fan_table->usFanGainLiquid); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = - le16_to_cpu(fan_table->usFanGainVrVddc); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = - le16_to_cpu(fan_table->usFanGainVrMvdd); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = - le16_to_cpu(fan_table->usFanGainPlx); - hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = - le16_to_cpu(fan_table->usFanGainHbm); - - hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = - fan_table->ucEnableZeroRPM; - hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = - le16_to_cpu(fan_table->usFanStopTemperature); - hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = - le16_to_cpu(fan_table->usFanStartTemperature); + PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8), + "Invalid Input Fan Table!", return -EINVAL); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table_v1->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + le16_to_cpu(fan_table_v1->usFanRPMMax); + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table_v1->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le16_to_cpu(fan_table_v1->usFanAcousticLimit); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table_v1->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table_v1->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le16_to_cpu(fan_table_v1->usTargetGfxClk); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table_v1->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table_v1->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table_v1->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table_v1->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table_v1->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table_v1->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table_v1->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table_v1->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table_v1->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table_v1->usFanStartTemperature); + } else if (header->ucRevId > 10) { + fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header; + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table_v2->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + fan_table_v2->ucFanMaxRPM * 100UL; + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table_v2->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table_v2->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table_v2->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le16_to_cpu(fan_table_v2->usTargetGfxClk); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table_v2->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table_v2->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table_v2->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table_v2->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table_v2->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table_v2->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table_v2->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table_v2->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table_v2->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table_v2->usFanStartTemperature); + } return 0; } @@ -261,6 +313,48 @@ static int get_mm_clock_voltage_table( return 0; } +static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda) +{ + switch(line){ + case Vega10_I2CLineID_DDC1: + *scl = Vega10_I2C_DDC1CLK; + *sda = Vega10_I2C_DDC1DATA; + break; + case Vega10_I2CLineID_DDC2: + *scl = Vega10_I2C_DDC2CLK; + *sda = Vega10_I2C_DDC2DATA; + break; + case Vega10_I2CLineID_DDC3: + *scl = Vega10_I2C_DDC3CLK; + *sda = Vega10_I2C_DDC3DATA; + break; + case Vega10_I2CLineID_DDC4: + *scl = Vega10_I2C_DDC4CLK; + *sda = Vega10_I2C_DDC4DATA; + break; + case Vega10_I2CLineID_DDC5: + *scl = Vega10_I2C_DDC5CLK; + *sda = Vega10_I2C_DDC5DATA; + break; + case Vega10_I2CLineID_DDC6: + *scl = Vega10_I2C_DDC6CLK; + *sda = Vega10_I2C_DDC6DATA; + break; + case Vega10_I2CLineID_SCLSDA: + *scl = Vega10_I2C_SCL; + *sda = Vega10_I2C_SDA; + break; + case Vega10_I2CLineID_DDCVGA: + *scl = Vega10_I2C_DDCVGACLK; + *sda = Vega10_I2C_DDCVGADATA; + break; + default: + *scl = 0; + *sda = 0; + break; + } +} + static int get_tdp_table( struct pp_hwmgr *hwmgr, struct phm_tdp_table **info_tdp_table, @@ -268,59 +362,99 @@ static int get_tdp_table( { uint32_t table_size; struct phm_tdp_table *tdp_table; - - const ATOM_Vega10_PowerTune_Table *power_tune_table = - (ATOM_Vega10_PowerTune_Table *)table; - - table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table); - hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *) - kzalloc(table_size, GFP_KERNEL); - - if (!hwmgr->dyn_state.cac_dtp_table) - return -ENOMEM; + uint8_t scl; + uint8_t sda; + const ATOM_Vega10_PowerTune_Table *power_tune_table; + const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2; table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table); + tdp_table = kzalloc(table_size, GFP_KERNEL); - if (!tdp_table) { - kfree(hwmgr->dyn_state.cac_dtp_table); - hwmgr->dyn_state.cac_dtp_table = NULL; + if (!tdp_table) return -ENOMEM; - } - tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit); - tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit); - tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit); - tdp_table->usSoftwareShutdownTemp = - le16_to_cpu(power_tune_table->usSoftwareShutdownTemp); - tdp_table->usTemperatureLimitTedge = - le16_to_cpu(power_tune_table->usTemperatureLimitTedge); - tdp_table->usTemperatureLimitHotspot = - le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot); - tdp_table->usTemperatureLimitLiquid1 = - le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1); - tdp_table->usTemperatureLimitLiquid2 = - le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2); - tdp_table->usTemperatureLimitHBM = - le16_to_cpu(power_tune_table->usTemperatureLimitHBM); - tdp_table->usTemperatureLimitVrVddc = - le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc); - tdp_table->usTemperatureLimitVrMvdd = - le16_to_cpu(power_tune_table->usTemperatureLimitVrMem); - tdp_table->usTemperatureLimitPlx = - le16_to_cpu(power_tune_table->usTemperatureLimitPlx); - tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address; - tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address; - tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL; - tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA; - tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address; - tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL; - tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA; - tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address; - tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL; - tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA; - - hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance; + if (table->ucRevId == 5) { + power_tune_table = (ATOM_Vega10_PowerTune_Table *)table; + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = + le16_to_cpu(power_tune_table->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = + le16_to_cpu(power_tune_table->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = + le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = + le16_to_cpu(power_tune_table->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = + le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = + le16_to_cpu(power_tune_table->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = + le16_to_cpu(power_tune_table->usTemperatureLimitPlx); + tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address; + tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL; + tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA; + tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address; + tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL; + tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA; + tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address; + tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL; + tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA; + hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance; + } else { + power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table; + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = + le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = + le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx); + tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda); + + tdp_table->ucLiquid_I2C_Line = scl; + tdp_table->ucLiquid_I2C_LineSDA = sda; + + tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda); + + tdp_table->ucVr_I2C_Line = scl; + tdp_table->ucVr_I2C_LineSDA = sda; + tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address; + + get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda); + + tdp_table->ucPlx_I2C_Line = scl; + tdp_table->ucPlx_I2C_LineSDA = sda; + + hwmgr->platform_descriptor.LoadLineSlope = + power_tune_table_v2->usLoadLineResistance; + } *info_tdp_table = tdp_table; @@ -836,7 +970,7 @@ static int init_dpm_2_parameters( (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usVddcLookupTableOffset)); result = get_vddc_lookup_table(hwmgr, - &pp_table_info->vddc_lookup_table, vddc_table, 16); + &pp_table_info->vddc_lookup_table, vddc_table, 8); } if (powerplay_table->usVddmemLookupTableOffset) { @@ -845,7 +979,7 @@ static int init_dpm_2_parameters( (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usVddmemLookupTableOffset)); result = get_vddc_lookup_table(hwmgr, - &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16); + &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4); } if (powerplay_table->usVddciLookupTableOffset) { @@ -854,7 +988,7 @@ static int init_dpm_2_parameters( (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usVddciLookupTableOffset)); result = get_vddc_lookup_table(hwmgr, - &pp_table_info->vddci_lookup_table, vddci_table, 16); + &pp_table_info->vddci_lookup_table, vddci_table, 4); } return result; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h index 995d133ba6aa..d83ed2af7aa3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h @@ -26,6 +26,34 @@ #include "hwmgr.h" +enum Vega10_I2CLineID { + Vega10_I2CLineID_DDC1 = 0x90, + Vega10_I2CLineID_DDC2 = 0x91, + Vega10_I2CLineID_DDC3 = 0x92, + Vega10_I2CLineID_DDC4 = 0x93, + Vega10_I2CLineID_DDC5 = 0x94, + Vega10_I2CLineID_DDC6 = 0x95, + Vega10_I2CLineID_SCLSDA = 0x96, + Vega10_I2CLineID_DDCVGA = 0x97 +}; + +#define Vega10_I2C_DDC1DATA 0 +#define Vega10_I2C_DDC1CLK 1 +#define Vega10_I2C_DDC2DATA 2 +#define Vega10_I2C_DDC2CLK 3 +#define Vega10_I2C_DDC3DATA 4 +#define Vega10_I2C_DDC3CLK 5 +#define Vega10_I2C_SDA 40 +#define Vega10_I2C_SCL 41 +#define Vega10_I2C_DDC4DATA 65 +#define Vega10_I2C_DDC4CLK 66 +#define Vega10_I2C_DDC5DATA 0x48 +#define Vega10_I2C_DDC5CLK 0x49 +#define Vega10_I2C_DDC6DATA 0x4a +#define Vega10_I2C_DDC6CLK 0x4b +#define Vega10_I2C_DDCVGADATA 0x4c +#define Vega10_I2C_DDCVGACLK 0x4d + extern const struct pp_table_func vega10_pptable_funcs; extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h index aee021451d35..2037910adcb1 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h @@ -30,7 +30,9 @@ * SMU TEAM: Always increment the interface version if * any structure is changed in this file */ -#define SMU9_DRIVER_IF_VERSION 0xa +#define SMU9_DRIVER_IF_VERSION 0xB + +#define PPTABLE_V10_SMU_VERSION 1 #define NUM_GFXCLK_DPM_LEVELS 8 #define NUM_UVD_DPM_LEVELS 8 @@ -87,6 +89,11 @@ typedef struct { int32_t a0; int32_t a1; int32_t a2; + + uint8_t a0_shift; + uint8_t a1_shift; + uint8_t a2_shift; + uint8_t padding; } GbVdroopTable_t; typedef struct { @@ -293,7 +300,9 @@ typedef struct { uint16_t Platform_sigma; uint16_t PSM_Age_CompFactor; - uint32_t Reserved[20]; + uint32_t DpmLevelPowerDelta; + + uint32_t Reserved[19]; /* Padding - ignore */ uint32_t MmHubPadding[7]; /* SMU internal use */ @@ -350,8 +359,8 @@ typedef struct { typedef struct { uint16_t avgPsmCount[30]; uint16_t minPsmCount[30]; - uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */ - uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */ + float avgPsmVoltage[30]; + float minPsmVoltage[30]; uint32_t MmHubPadding[7]; /* SMU internal use */ } AvfsDebugTable_t; @@ -414,5 +423,45 @@ typedef struct { #define UCLK_SWITCH_SLOW 0 #define UCLK_SWITCH_FAST 1 +/* GFX DIDT Configuration */ +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 #endif diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index f9d665550d3e..9446a673d469 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -16,6 +16,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <linux/clk.h> +#include <linux/pm_runtime.h> #include <video/videomode.h> #include "malidp_drv.h" @@ -35,13 +36,6 @@ static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc, long rate, req_rate = mode->crtc_clock * 1000; if (req_rate) { - rate = clk_round_rate(hwdev->mclk, req_rate); - if (rate < req_rate) { - DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n", - mode->crtc_clock); - return false; - } - rate = clk_round_rate(hwdev->pxlclk, req_rate); if (rate != req_rate) { DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n", @@ -58,9 +52,14 @@ static void malidp_crtc_enable(struct drm_crtc *crtc) struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; struct videomode vm; + int err = pm_runtime_get_sync(crtc->dev->dev); - drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); + if (err < 0) { + DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err); + return; + } + drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm); clk_prepare_enable(hwdev->pxlclk); /* We rely on firmware to set mclk to a sensible level. */ @@ -75,10 +74,254 @@ static void malidp_crtc_disable(struct drm_crtc *crtc) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); struct malidp_hw_device *hwdev = malidp->dev; + int err; drm_crtc_vblank_off(crtc); hwdev->enter_config_mode(hwdev); clk_disable_unprepare(hwdev->pxlclk); + + err = pm_runtime_put(crtc->dev->dev); + if (err < 0) { + DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err); + } +} + +static const struct gamma_curve_segment { + u16 start; + u16 end; +} segments[MALIDP_COEFFTAB_NUM_COEFFS] = { + /* sector 0 */ + { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 }, + { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, + { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 }, + { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 }, + /* sector 1 */ + { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 }, + /* sector 2 */ + { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 }, + /* sector 3 */ + { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 }, + /* sector 4 */ + { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 }, + /* sector 5 */ + { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 }, + /* sector 6 */ + { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 }, + { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 }, + { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 }, + { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 }, + { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 }, + { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 }, + { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 }, +}; + +#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff))) + +static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob, + u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS]) +{ + struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data; + int i; + + for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) { + u32 a, b, delta_in, out_start, out_end; + + delta_in = segments[i].end - segments[i].start; + /* DP has 12-bit internal precision for its LUTs. */ + out_start = drm_color_lut_extract(lut[segments[i].start].green, + 12); + out_end = drm_color_lut_extract(lut[segments[i].end].green, 12); + a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in; + b = out_start; + coeffs[i] = DE_COEFTAB_DATA(a, b); + } +} + +/* + * Check if there is a new gamma LUT and if it is of an acceptable size. Also, + * reject any LUTs that use distinct red, green, and blue curves. + */ +static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mc = to_malidp_crtc_state(state); + struct drm_color_lut *lut; + size_t lut_size; + int i; + + if (!state->color_mgmt_changed || !state->gamma_lut) + return 0; + + if (crtc->state->gamma_lut && + (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id)) + return 0; + + if (state->gamma_lut->length % sizeof(struct drm_color_lut)) + return -EINVAL; + + lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut); + if (lut_size != MALIDP_GAMMA_LUT_SIZE) + return -EINVAL; + + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < lut_size; ++i) + if (!((lut[i].red == lut[i].green) && + (lut[i].red == lut[i].blue))) + return -EINVAL; + + if (!state->mode_changed) { + int ret; + + state->mode_changed = true; + /* + * Kerneldoc for drm_atomic_helper_check_modeset mandates that + * it be invoked when the driver sets ->mode_changed. Since + * changing the gamma LUT doesn't depend on any external + * resources, it is safe to call it only once. + */ + ret = drm_atomic_helper_check_modeset(crtc->dev, state->state); + if (ret) + return ret; + } + + malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs); + return 0; +} + +/* + * Check if there is a new CTM and if it contains valid input. Valid here means + * that the number is inside the representable range for a Q3.12 number, + * excluding truncating the fractional part of the input data. + * + * The COLORADJ registers can be changed atomically. + */ +static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mc = to_malidp_crtc_state(state); + struct drm_color_ctm *ctm; + int i; + + if (!state->color_mgmt_changed) + return 0; + + if (!state->ctm) + return 0; + + if (crtc->state->ctm && (crtc->state->ctm->base.id == + state->ctm->base.id)) + return 0; + + /* + * The size of the ctm is checked in + * drm_atomic_replace_property_blob_from_id. + */ + ctm = (struct drm_color_ctm *)state->ctm->data; + for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) { + /* Convert from S31.32 to Q3.12. */ + s64 val = ctm->matrix[i]; + u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) & + GENMASK_ULL(14, 0); + + /* + * Convert to 2s complement and check the destination's top bit + * for overflow. NB: Can't check before converting or it'd + * incorrectly reject the case: + * sign == 1 + * mag == 0x2000 + */ + if (val & BIT_ULL(63)) + mag = ~mag + 1; + if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14))) + return -EINVAL; + mc->coloradj_coeffs[i] = mag; + } + + return 0; +} + +static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_crtc_state *cs = to_malidp_crtc_state(state); + struct malidp_se_config *s = &cs->scaler_config; + struct drm_plane *plane; + struct videomode vm; + const struct drm_plane_state *pstate; + u32 h_upscale_factor = 0; /* U16.16 */ + u32 v_upscale_factor = 0; /* U16.16 */ + u8 scaling = cs->scaled_planes_mask; + int ret; + + if (!scaling) { + s->scale_enable = false; + goto mclk_calc; + } + + /* The scaling engine can only handle one plane at a time. */ + if (scaling & (scaling - 1)) + return -EINVAL; + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { + struct malidp_plane *mp = to_malidp_plane(plane); + u32 phase; + + if (!(mp->layer->id & scaling)) + continue; + + /* + * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h] + * to get the U16.16 result. + */ + h_upscale_factor = div_u64((u64)pstate->crtc_w << 32, + pstate->src_w); + v_upscale_factor = div_u64((u64)pstate->crtc_h << 32, + pstate->src_h); + + s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 || + (v_upscale_factor >> 16) >= 2); + + s->input_w = pstate->src_w >> 16; + s->input_h = pstate->src_h >> 16; + s->output_w = pstate->crtc_w; + s->output_h = pstate->crtc_h; + +#define SE_N_PHASE 4 +#define SE_SHIFT_N_PHASE 12 + /* Calculate initial_phase and delta_phase for horizontal. */ + phase = s->input_w; + s->h_init_phase = + ((phase << SE_N_PHASE) / s->output_w + 1) / 2; + + phase = s->input_w; + phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); + s->h_delta_phase = phase / s->output_w; + + /* Same for vertical. */ + phase = s->input_h; + s->v_init_phase = + ((phase << SE_N_PHASE) / s->output_h + 1) / 2; + + phase = s->input_h; + phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE); + s->v_delta_phase = phase / s->output_h; +#undef SE_N_PHASE +#undef SE_SHIFT_N_PHASE + s->plane_src_id = mp->layer->id; + } + + s->scale_enable = true; + s->hcoeff = malidp_se_select_coeffs(h_upscale_factor); + s->vcoeff = malidp_se_select_coeffs(v_upscale_factor); + +mclk_calc: + drm_display_mode_to_videomode(&state->adjusted_mode, &vm); + ret = hwdev->se_calc_mclk(hwdev, s, &vm); + if (ret < 0) + return -EINVAL; + return 0; } static int malidp_crtc_atomic_check(struct drm_crtc *crtc, @@ -90,6 +333,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, const struct drm_plane_state *pstate; u32 rot_mem_free, rot_mem_usable; int rotated_planes = 0; + int ret; /* * check if there is enough rotation memory available for planes @@ -156,7 +400,11 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc, } } - return 0; + ret = malidp_crtc_atomic_check_gamma(crtc, state); + ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state); + ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state); + + return ret; } static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { @@ -166,6 +414,60 @@ static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = { .atomic_check = malidp_crtc_atomic_check, }; +static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct malidp_crtc_state *state, *old_state; + + if (WARN_ON(!crtc->state)) + return NULL; + + old_state = to_malidp_crtc_state(crtc->state); + state = kmalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + memcpy(state->gamma_coeffs, old_state->gamma_coeffs, + sizeof(state->gamma_coeffs)); + memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs, + sizeof(state->coloradj_coeffs)); + memcpy(&state->scaler_config, &old_state->scaler_config, + sizeof(state->scaler_config)); + state->scaled_planes_mask = 0; + + return &state->base; +} + +static void malidp_crtc_reset(struct drm_crtc *crtc) +{ + struct malidp_crtc_state *state = NULL; + + if (crtc->state) { + state = to_malidp_crtc_state(crtc->state); + __drm_atomic_helper_crtc_destroy_state(crtc->state); + } + + kfree(state); + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + crtc->state = &state->base; + crtc->state->crtc = crtc; + } +} + +static void malidp_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct malidp_crtc_state *mali_state = NULL; + + if (state) { + mali_state = to_malidp_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + } + + kfree(mali_state); +} + static int malidp_crtc_enable_vblank(struct drm_crtc *crtc) { struct malidp_drm *malidp = crtc_to_malidp_device(crtc); @@ -186,12 +488,13 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc) } static const struct drm_crtc_funcs malidp_crtc_funcs = { + .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .reset = malidp_crtc_reset, + .atomic_duplicate_state = malidp_crtc_duplicate_state, + .atomic_destroy_state = malidp_crtc_destroy_state, .enable_vblank = malidp_crtc_enable_vblank, .disable_vblank = malidp_crtc_disable_vblank, }; @@ -223,11 +526,17 @@ int malidp_crtc_init(struct drm_device *drm) ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, &malidp_crtc_funcs, NULL); + if (ret) + goto crtc_cleanup_planes; - if (!ret) { - drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); - return 0; - } + drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs); + drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE); + /* No inverse-gamma: it is per-plane. */ + drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE); + + malidp_se_set_enh_coeffs(malidp->dev); + + return 0; crtc_cleanup_planes: malidp_de_planes_destroy(drm); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 898c2b58e73d..0d3eb537d08b 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -13,9 +13,11 @@ #include <linux/module.h> #include <linux/clk.h> #include <linux/component.h> +#include <linux/console.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/of_reserved_mem.h> +#include <linux/pm_runtime.h> #include <drm/drmP.h> #include <drm/drm_atomic.h> @@ -32,6 +34,131 @@ #define MALIDP_CONF_VALID_TIMEOUT 250 +static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, + u32 data[MALIDP_COEFFTAB_NUM_COEFFS]) +{ + int i; + /* Update all channels with a single gamma curve. */ + const u32 gamma_write_mask = GENMASK(18, 16); + /* + * Always write an entire table, so the address field in + * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask + * directly. + */ + malidp_hw_write(hwdev, gamma_write_mask, + hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); + for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) + malidp_hw_write(hwdev, data[i], + hwdev->map.coeffs_base + + MALIDP_COEF_TABLE_DATA); +} + +static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + + if (!crtc->state->color_mgmt_changed) + return; + + if (!crtc->state->gamma_lut) { + malidp_hw_clearbits(hwdev, + MALIDP_DISP_FUNC_GAMMA, + MALIDP_DE_DISPLAY_FUNC); + } else { + struct malidp_crtc_state *mc = + to_malidp_crtc_state(crtc->state); + + if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id != + old_state->gamma_lut->base.id)) + malidp_write_gamma_table(hwdev, mc->gamma_coeffs); + + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA, + MALIDP_DE_DISPLAY_FUNC); + } +} + +static +void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + int i; + + if (!crtc->state->color_mgmt_changed) + return; + + if (!crtc->state->ctm) { + malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ, + MALIDP_DE_DISPLAY_FUNC); + } else { + struct malidp_crtc_state *mc = + to_malidp_crtc_state(crtc->state); + + if (!old_state->ctm || (crtc->state->ctm->base.id != + old_state->ctm->base.id)) + for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) + malidp_hw_write(hwdev, + mc->coloradj_coeffs[i], + hwdev->map.coeffs_base + + MALIDP_COLOR_ADJ_COEF + 4 * i); + + malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, + MALIDP_DE_DISPLAY_FUNC); + } +} + +static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); + struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state); + struct malidp_drm *malidp = crtc_to_malidp_device(crtc); + struct malidp_hw_device *hwdev = malidp->dev; + struct malidp_se_config *s = &cs->scaler_config; + struct malidp_se_config *old_s = &old_cs->scaler_config; + u32 se_control = hwdev->map.se_base + + ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? + 0x10 : 0xC); + u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; + u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; + u32 val; + + /* Set SE_CONTROL */ + if (!s->scale_enable) { + val = malidp_hw_read(hwdev, se_control); + val &= ~MALIDP_SE_SCALING_EN; + malidp_hw_write(hwdev, val, se_control); + return; + } + + hwdev->se_set_scaling_coeffs(hwdev, s, old_s); + val = malidp_hw_read(hwdev, se_control); + val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; + + val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK); + val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0; + + val |= MALIDP_SE_RGBO_IF_EN; + malidp_hw_write(hwdev, val, se_control); + + /* Set IN_SIZE & OUT_SIZE. */ + val = MALIDP_SE_SET_V_SIZE(s->input_h) | + MALIDP_SE_SET_H_SIZE(s->input_w); + malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE); + val = MALIDP_SE_SET_V_SIZE(s->output_h) | + MALIDP_SE_SET_H_SIZE(s->output_w); + malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE); + + /* Set phase regs. */ + malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH); + malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH); + malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH); + malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH); +} + /* * set the "config valid" bit and wait until the hardware acts on it */ @@ -66,10 +193,12 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) struct drm_pending_vblank_event *event; struct drm_device *drm = state->dev; struct malidp_drm *malidp = drm->dev_private; - int ret = malidp_set_and_wait_config_valid(drm); - if (ret) - DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + if (malidp->crtc.enabled) { + /* only set config_valid if the CRTC is enabled */ + if (malidp_set_and_wait_config_valid(drm)) + DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); + } event = malidp->crtc.state->event; if (event) { @@ -88,15 +217,30 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) static void malidp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + + pm_runtime_get_sync(drm->dev); drm_atomic_helper_commit_modeset_disables(drm, state); - drm_atomic_helper_commit_modeset_enables(drm, state); + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + malidp_atomic_commit_update_gamma(crtc, old_crtc_state); + malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); + malidp_atomic_commit_se_config(crtc, old_crtc_state); + } + drm_atomic_helper_commit_planes(drm, state, 0); + drm_atomic_helper_commit_modeset_enables(drm, state); + malidp_atomic_commit_hw_done(state); drm_atomic_helper_wait_for_vblanks(drm, state); + pm_runtime_put(drm->dev); + drm_atomic_helper_cleanup_planes(drm, state); } @@ -277,8 +421,65 @@ static bool malidp_has_sufficient_address_space(const struct resource *res, return true; } +static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + + return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); +} + +DEVICE_ATTR_RO(core_id); + +static int malidp_init_sysfs(struct device *dev) +{ + int ret = device_create_file(dev, &dev_attr_core_id); + + if (ret) + DRM_ERROR("failed to create device file for core_id\n"); + + return ret; +} + +static void malidp_fini_sysfs(struct device *dev) +{ + device_remove_file(dev, &dev_attr_core_id); +} + #define MAX_OUTPUT_CHANNELS 3 +static int malidp_runtime_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + /* we can only suspend if the hardware is in config mode */ + WARN_ON(!hwdev->in_config_mode(hwdev)); + + hwdev->pm_suspended = true; + clk_disable_unprepare(hwdev->mclk); + clk_disable_unprepare(hwdev->aclk); + clk_disable_unprepare(hwdev->pclk); + + return 0; +} + +static int malidp_runtime_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + struct malidp_hw_device *hwdev = malidp->dev; + + clk_prepare_enable(hwdev->pclk); + clk_prepare_enable(hwdev->aclk); + clk_prepare_enable(hwdev->mclk); + hwdev->pm_suspended = false; + + return 0; +} + static int malidp_bind(struct device *dev) { struct resource *res; @@ -307,7 +508,6 @@ static int malidp_bind(struct device *dev) memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); malidp->dev = hwdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); if (IS_ERR(hwdev->regs)) @@ -340,14 +540,17 @@ static int malidp_bind(struct device *dev) goto alloc_fail; } - /* Enable APB clock in order to get access to the registers */ - clk_prepare_enable(hwdev->pclk); - /* - * Enable AXI clock and main clock so that prefetch can start once - * the registers are set - */ - clk_prepare_enable(hwdev->aclk); - clk_prepare_enable(hwdev->mclk); + drm->dev_private = malidp; + dev_set_drvdata(dev, drm); + + /* Enable power management */ + pm_runtime_enable(dev); + + /* Resume device to enable the clocks */ + if (pm_runtime_enabled(dev)) + pm_runtime_get_sync(dev); + else + malidp_runtime_pm_resume(dev); dev_id = of_match_device(malidp_drm_of_match, dev); if (!dev_id) { @@ -376,6 +579,8 @@ static int malidp_bind(struct device *dev) DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, (version >> 12) & 0xf, (version >> 8) & 0xf); + malidp->core_id = version; + /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", @@ -387,13 +592,15 @@ static int malidp_bind(struct device *dev) out_depth = (out_depth << 8) | (output_width[i] & 0xf); malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); - drm->dev_private = malidp; - dev_set_drvdata(dev, drm); atomic_set(&malidp->config_valid, 0); init_waitqueue_head(&malidp->wq); ret = malidp_init(drm); if (ret < 0) + goto query_hw_fail; + + ret = malidp_init_sysfs(dev); + if (ret) goto init_fail; /* Set the CRTC's port so that the encoder component can find it */ @@ -416,6 +623,7 @@ static int malidp_bind(struct device *dev) DRM_ERROR("failed to initialise vblank\n"); goto vblank_fail; } + pm_runtime_put(dev); drm_mode_config_reset(drm); @@ -441,7 +649,9 @@ register_fail: drm_fbdev_cma_fini(malidp->fbdev); malidp->fbdev = NULL; } + drm_kms_helper_poll_fini(drm); fbdev_fail: + pm_runtime_get_sync(dev); drm_vblank_cleanup(drm); vblank_fail: malidp_se_irq_fini(drm); @@ -452,14 +662,17 @@ irq_init_fail: bind_fail: of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; - malidp_fini(drm); init_fail: + malidp_fini_sysfs(dev); + malidp_fini(drm); +query_hw_fail: + pm_runtime_put(dev); + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + malidp_runtime_pm_suspend(dev); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); -query_hw_fail: - clk_disable_unprepare(hwdev->mclk); - clk_disable_unprepare(hwdev->aclk); - clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); alloc_fail: of_reserved_mem_device_release(dev); @@ -471,7 +684,6 @@ static void malidp_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private; - struct malidp_hw_device *hwdev = malidp->dev; drm_dev_unregister(drm); if (malidp->fbdev) { @@ -479,18 +691,22 @@ static void malidp_unbind(struct device *dev) malidp->fbdev = NULL; } drm_kms_helper_poll_fini(drm); + pm_runtime_get_sync(dev); + drm_vblank_cleanup(drm); malidp_se_irq_fini(drm); malidp_de_irq_fini(drm); - drm_vblank_cleanup(drm); component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; + malidp_fini_sysfs(dev); malidp_fini(drm); + pm_runtime_put(dev); + if (pm_runtime_enabled(dev)) + pm_runtime_disable(dev); + else + malidp_runtime_pm_suspend(dev); drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - clk_disable_unprepare(hwdev->mclk); - clk_disable_unprepare(hwdev->aclk); - clk_disable_unprepare(hwdev->pclk); drm_dev_unref(drm); of_reserved_mem_device_release(dev); } @@ -533,11 +749,52 @@ static int malidp_platform_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused malidp_pm_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + + drm_kms_helper_poll_disable(drm); + console_lock(); + drm_fbdev_cma_set_suspend(malidp->fbdev, 1); + console_unlock(); + malidp->pm_state = drm_atomic_helper_suspend(drm); + if (IS_ERR(malidp->pm_state)) { + console_lock(); + drm_fbdev_cma_set_suspend(malidp->fbdev, 0); + console_unlock(); + drm_kms_helper_poll_enable(drm); + return PTR_ERR(malidp->pm_state); + } + + return 0; +} + +static int __maybe_unused malidp_pm_resume(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct malidp_drm *malidp = drm->dev_private; + + drm_atomic_helper_resume(drm, malidp->pm_state); + console_lock(); + drm_fbdev_cma_set_suspend(malidp->fbdev, 0); + console_unlock(); + drm_kms_helper_poll_enable(drm); + + return 0; +} + +static const struct dev_pm_ops malidp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \ + SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL) +}; + static struct platform_driver malidp_platform_driver = { .probe = malidp_platform_probe, .remove = malidp_platform_remove, .driver = { .name = "mali-dp", + .pm = &malidp_pm_ops, .of_match_table = malidp_drm_of_match, }, }; diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index dbc617c6e4ef..040311ffcaec 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -24,6 +24,8 @@ struct malidp_drm { struct drm_crtc crtc; wait_queue_head_t wq; atomic_t config_valid; + struct drm_atomic_state *pm_state; + u32 core_id; }; #define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) @@ -47,6 +49,17 @@ struct malidp_plane_state { #define to_malidp_plane(x) container_of(x, struct malidp_plane, base) #define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base) +struct malidp_crtc_state { + struct drm_crtc_state base; + u32 gamma_coeffs[MALIDP_COEFFTAB_NUM_COEFFS]; + u32 coloradj_coeffs[MALIDP_COLORADJ_NUM_COEFFS]; + struct malidp_se_config scaler_config; + /* Bitfield of all the planes that have requested a scaled output. */ + u8 scaled_planes_mask; +}; + +#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base) + int malidp_de_planes_init(struct drm_device *drm); void malidp_de_planes_destroy(struct drm_device *drm); int malidp_crtc_init(struct drm_device *drm); diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 9f5513006eee..28360b8542f7 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -12,6 +12,7 @@ * in an attempt to provide to the rest of the driver code a unified view */ +#include <linux/clk.h> #include <linux/types.h> #include <linux/io.h> #include <drm/drmP.h> @@ -86,6 +87,80 @@ static const struct malidp_layer malidp550_layers[] = { { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, }; +#define SE_N_SCALING_COEFFS 96 +static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = { + [MALIDP_UPSCALING_COEFFS - 1] = { + 0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052, + 0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f, + 0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a, + 0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40, + 0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c, + 0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5, + 0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15, + 0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5, + 0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0, + 0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6, + 0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073, + 0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001 + }, + [MALIDP_DOWNSCALING_1_5_COEFFS - 1] = { + 0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4, + 0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c, + 0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5, + 0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8, + 0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba, + 0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20, + 0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03, + 0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d, + 0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68, + 0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f, + 0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62, + 0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f + }, + [MALIDP_DOWNSCALING_2_COEFFS - 1] = { + 0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9, + 0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52, + 0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158, + 0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455, + 0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e, + 0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887, + 0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5, + 0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e, + 0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d, + 0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0, + 0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf, + 0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03 + }, + [MALIDP_DOWNSCALING_2_75_COEFFS - 1] = { + 0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3, + 0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106, + 0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280, + 0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410, + 0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533, + 0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3, + 0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566, + 0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468, + 0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4, + 0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160, + 0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f, + 0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60 + }, + [MALIDP_DOWNSCALING_4_COEFFS - 1] = { + 0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f, + 0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff, + 0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd, + 0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374, + 0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db, + 0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a, + 0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed, + 0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397, + 0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb, + 0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233, + 0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160, + 0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9 + }, +}; + #define MALIDP_DE_DEFAULT_PREFETCH_START 5 static int malidp500_query_hw(struct malidp_hw_device *hwdev) @@ -211,6 +286,88 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 return w * drm_format_plane_cpp(fmt, 0) * 8; } +static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev, + u32 direction, + u16 addr, + u8 coeffs_id) +{ + int i; + u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL; + + malidp_hw_write(hwdev, + direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK), + scaling_control + MALIDP_SE_COEFFTAB_ADDR); + for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i) + malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA( + dp500_se_scaling_coeffs[coeffs_id][i]), + scaling_control + MALIDP_SE_COEFFTAB_DATA); +} + +static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config) +{ + /* Get array indices into dp500_se_scaling_coeffs. */ + u8 h = (u8)se_config->hcoeff - 1; + u8 v = (u8)se_config->vcoeff - 1; + + if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) || + v >= ARRAY_SIZE(dp500_se_scaling_coeffs))) + return -EINVAL; + + if ((h == v) && (se_config->hcoeff != old_config->hcoeff || + se_config->vcoeff != old_config->vcoeff)) { + malidp500_se_write_pp_coefftab(hwdev, + (MALIDP_SE_V_COEFFTAB | + MALIDP_SE_H_COEFFTAB), + 0, v); + } else { + if (se_config->vcoeff != old_config->vcoeff) + malidp500_se_write_pp_coefftab(hwdev, + MALIDP_SE_V_COEFFTAB, + 0, v); + if (se_config->hcoeff != old_config->hcoeff) + malidp500_se_write_pp_coefftab(hwdev, + MALIDP_SE_H_COEFFTAB, + 0, h); + } + + return 0; +} + +static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm) +{ + unsigned long mclk; + unsigned long pxlclk = vm->pixelclock; /* Hz */ + unsigned long htotal = vm->hactive + vm->hfront_porch + + vm->hback_porch + vm->hsync_len; + unsigned long input_size = se_config->input_w * se_config->input_h; + unsigned long a = 10; + long ret; + + /* + * mclk = max(a, 1.5) * pxlclk + * + * To avoid float calculaiton, using 15 instead of 1.5 and div by + * 10 to get mclk. + */ + if (se_config->scale_enable) { + a = 15 * input_size / (htotal * se_config->output_h); + if (a < 15) + a = 15; + } + mclk = a * pxlclk / 10; + ret = clk_get_rate(hwdev->mclk); + if (ret < mclk) { + DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n", + mclk / 1000); + return -EINVAL; + } + return ret; +} + static int malidp550_query_hw(struct malidp_hw_device *hwdev) { u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); @@ -384,6 +541,53 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 return w * bytes_per_col; } +static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config) +{ + u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) | + MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK); + u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) | + MALIDP550_SE_CTL_HCSEL(se_config->hcoeff); + + malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL); + malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL); + return 0; +} + +static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm) +{ + unsigned long mclk; + unsigned long pxlclk = vm->pixelclock; + unsigned long htotal = vm->hactive + vm->hfront_porch + + vm->hback_porch + vm->hsync_len; + unsigned long numerator = 1, denominator = 1; + long ret; + + if (se_config->scale_enable) { + numerator = max(se_config->input_w, se_config->output_w) * + se_config->input_h; + numerator += se_config->output_w * + (se_config->output_h - + min(se_config->input_h, se_config->output_h)); + denominator = (htotal - 2) * se_config->output_h; + } + + /* mclk can't be slower than pxlclk. */ + if (numerator < denominator) + numerator = denominator = 1; + mclk = (pxlclk * numerator) / denominator; + ret = clk_get_rate(hwdev->mclk); + if (ret < mclk) { + DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n", + mclk / 1000); + return -EINVAL; + } + return ret; +} + static int malidp650_query_hw(struct malidp_hw_device *hwdev) { u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID); @@ -415,6 +619,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev) const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { [MALIDP_500] = { .map = { + .coeffs_base = MALIDP500_COEFFS_BASE, .se_base = MALIDP500_SE_BASE, .dc_base = MALIDP500_DC_BASE, .out_depth_base = MALIDP500_OUTPUT_DEPTH, @@ -447,10 +652,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp500_set_config_valid, .modeset = malidp500_modeset, .rotmem_required = malidp500_rotmem_required, + .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs, + .se_calc_mclk = malidp500_se_calc_mclk, .features = MALIDP_DEVICE_LV_HAS_3_STRIDES, }, [MALIDP_550] = { .map = { + .coeffs_base = MALIDP550_COEFFS_BASE, .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, @@ -481,10 +689,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, .rotmem_required = malidp550_rotmem_required, + .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, + .se_calc_mclk = malidp550_se_calc_mclk, .features = 0, }, [MALIDP_650] = { .map = { + .coeffs_base = MALIDP550_COEFFS_BASE, .se_base = MALIDP550_SE_BASE, .dc_base = MALIDP550_DC_BASE, .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH, @@ -516,6 +727,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { .set_config_valid = malidp550_set_config_valid, .modeset = malidp550_modeset, .rotmem_required = malidp550_rotmem_required, + .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs, + .se_calc_mclk = malidp550_se_calc_mclk, .features = 0, }, }; diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 00974b59407d..849ad9a30c3a 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -61,12 +61,34 @@ struct malidp_layer { u16 stride_offset; /* Offset to the first stride register. */ }; +enum malidp_scaling_coeff_set { + MALIDP_UPSCALING_COEFFS = 1, + MALIDP_DOWNSCALING_1_5_COEFFS = 2, + MALIDP_DOWNSCALING_2_COEFFS = 3, + MALIDP_DOWNSCALING_2_75_COEFFS = 4, + MALIDP_DOWNSCALING_4_COEFFS = 5, +}; + +struct malidp_se_config { + u8 scale_enable : 1; + u8 enhancer_enable : 1; + u8 hcoeff : 3; + u8 vcoeff : 3; + u8 plane_src_id; + u16 input_w, input_h; + u16 output_w, output_h; + u32 h_init_phase, h_delta_phase; + u32 v_init_phase, v_delta_phase; +}; + /* regmap features */ #define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0) struct malidp_hw_regmap { /* address offset of the DE register bank */ /* is always 0x0000 */ + /* address offset of the DE coefficients registers */ + const u16 coeffs_base; /* address offset of the SE registers bank */ const u16 se_base; /* address offset of the DC registers bank */ @@ -151,11 +173,22 @@ struct malidp_hw_device { */ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt); + int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct malidp_se_config *old_config); + + long (*se_calc_mclk)(struct malidp_hw_device *hwdev, + struct malidp_se_config *se_config, + struct videomode *vm); + u8 features; u8 min_line_size; u16 max_line_size; + /* track the device PM state */ + bool pm_suspended; + /* size of memory used for rotating layers, up to two banks available */ u32 rotation_memory[2]; }; @@ -173,12 +206,14 @@ extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES]; static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) { + WARN_ON(hwdev->pm_suspended); return readl(hwdev->regs + reg); } static inline void malidp_hw_write(struct malidp_hw_device *hwdev, u32 value, u32 reg) { + WARN_ON(hwdev->pm_suspended); writel(value, hwdev->regs + reg); } @@ -243,6 +278,47 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev, return !(pitch & (hwdev->map.bus_align_bytes - 1)); } +/* U16.16 */ +#define FP_1_00000 0x00010000 /* 1.0 */ +#define FP_0_66667 0x0000AAAA /* 0.6667 = 1/1.5 */ +#define FP_0_50000 0x00008000 /* 0.5 = 1/2 */ +#define FP_0_36363 0x00005D17 /* 0.36363 = 1/2.75 */ +#define FP_0_25000 0x00004000 /* 0.25 = 1/4 */ + +static inline enum malidp_scaling_coeff_set +malidp_se_select_coeffs(u32 upscale_factor) +{ + return (upscale_factor >= FP_1_00000) ? MALIDP_UPSCALING_COEFFS : + (upscale_factor >= FP_0_66667) ? MALIDP_DOWNSCALING_1_5_COEFFS : + (upscale_factor >= FP_0_50000) ? MALIDP_DOWNSCALING_2_COEFFS : + (upscale_factor >= FP_0_36363) ? MALIDP_DOWNSCALING_2_75_COEFFS : + MALIDP_DOWNSCALING_4_COEFFS; +} + +#undef FP_0_25000 +#undef FP_0_36363 +#undef FP_0_50000 +#undef FP_0_66667 +#undef FP_1_00000 + +static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev) +{ + static const s32 enhancer_coeffs[] = { + -8, -8, -8, -8, 128, -8, -8, -8, -8 + }; + u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) | + MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL); + u32 image_enh = hwdev->map.se_base + + ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? + 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH; + u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0; + int i; + + malidp_hw_write(hwdev, val, image_enh); + for (i = 0; i < ARRAY_SIZE(enhancer_coeffs); ++i) + malidp_hw_write(hwdev, enhancer_coeffs[i], enh_coeffs + i * 4); +} + /* * background color components are defined as 12bits values, * they will be shifted right when stored on hardware that @@ -252,4 +328,9 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev, #define MALIDP_BGND_COLOR_G 0x000 #define MALIDP_BGND_COLOR_B 0x000 +#define MALIDP_COLORADJ_NUM_COEFFS 12 +#define MALIDP_COEFFTAB_NUM_COEFFS 64 + +#define MALIDP_GAMMA_LUT_SIZE 4096 + #endif /* __MALIDP_HW_H__ */ diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index d5aec082294c..814fda23cead 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -16,6 +16,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_print.h> #include "malidp_hw.h" #include "malidp_drv.h" @@ -24,6 +25,9 @@ #define MALIDP_LAYER_FORMAT 0x000 #define MALIDP_LAYER_CONTROL 0x004 #define LAYER_ENABLE (1 << 0) +#define LAYER_FLOWCFG_MASK 7 +#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1) +#define LAYER_FLOWCFG_SCALE_SE 3 #define LAYER_ROT_OFFSET 8 #define LAYER_H_FLIP (1 << 10) #define LAYER_V_FLIP (1 << 11) @@ -60,6 +64,27 @@ static void malidp_de_plane_destroy(struct drm_plane *plane) devm_kfree(plane->dev->dev, mp); } +/* + * Replicate what the default ->reset hook does: free the state pointer and + * allocate a new empty object. We just need enough space to store + * a malidp_plane_state instead of a drm_plane_state. + */ +static void malidp_plane_reset(struct drm_plane *plane) +{ + struct malidp_plane_state *state = to_malidp_plane_state(plane->state); + + if (state) + __drm_atomic_helper_plane_destroy_state(&state->base); + kfree(state); + plane->state = NULL; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state) { + state->base.plane = plane; + state->base.rotation = DRM_ROTATE_0; + plane->state = &state->base; + } +} + static struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) { @@ -90,26 +115,71 @@ static void malidp_destroy_plane_state(struct drm_plane *plane, kfree(m_state); } +static void malidp_plane_atomic_print_state(struct drm_printer *p, + const struct drm_plane_state *state) +{ + struct malidp_plane_state *ms = to_malidp_plane_state(state); + + drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size); + drm_printf(p, "\tformat_id=%u\n", ms->format); + drm_printf(p, "\tn_planes=%u\n", ms->n_planes); +} + static const struct drm_plane_funcs malidp_de_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .set_property = drm_atomic_helper_plane_set_property, .destroy = malidp_de_plane_destroy, - .reset = drm_atomic_helper_plane_reset, + .reset = malidp_plane_reset, .atomic_duplicate_state = malidp_duplicate_plane_state, .atomic_destroy_state = malidp_destroy_plane_state, + .atomic_print_state = malidp_plane_atomic_print_state, }; +static int malidp_se_check_scaling(struct malidp_plane *mp, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + struct malidp_crtc_state *mc; + struct drm_rect clip = { 0 }; + u32 src_w, src_h; + int ret; + + if (!crtc_state) + return -EINVAL; + + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + ret = drm_plane_helper_check_state(state, &clip, 0, INT_MAX, true, true); + if (ret) + return ret; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) { + /* Scaling not necessary for this plane. */ + mc->scaled_planes_mask &= ~(mp->layer->id); + return 0; + } + + if (mp->layer->id & (DE_SMART | DE_GRAPHICS2)) + return -EINVAL; + + mc = to_malidp_crtc_state(crtc_state); + + mc->scaled_planes_mask |= mp->layer->id; + /* Defer scaling requirements calculation to the crtc check. */ + return 0; +} + static int malidp_de_plane_check(struct drm_plane *plane, struct drm_plane_state *state) { struct malidp_plane *mp = to_malidp_plane(plane); struct malidp_plane_state *ms = to_malidp_plane_state(state); - struct drm_crtc_state *crtc_state; struct drm_framebuffer *fb; - struct drm_rect clip = { 0 }; int i, ret; - u32 src_w, src_h; if (!state->crtc || !state->fb) return 0; @@ -130,9 +200,6 @@ static int malidp_de_plane_check(struct drm_plane *plane, } } - src_w = state->src_w >> 16; - src_h = state->src_h >> 16; - if ((state->crtc_w > mp->hwdev->max_line_size) || (state->crtc_h > mp->hwdev->max_line_size) || (state->crtc_w < mp->hwdev->min_line_size) || @@ -149,22 +216,16 @@ static int malidp_de_plane_check(struct drm_plane *plane, (state->fb->pitches[1] != state->fb->pitches[2])) return -EINVAL; + ret = malidp_se_check_scaling(mp, state); + if (ret) + return ret; + /* packed RGB888 / BGR888 can't be rotated or flipped */ if (state->rotation != DRM_ROTATE_0 && (fb->format->format == DRM_FORMAT_RGB888 || fb->format->format == DRM_FORMAT_BGR888)) return -EINVAL; - crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); - clip.x2 = crtc_state->adjusted_mode.hdisplay; - clip.y2 = crtc_state->adjusted_mode.vdisplay; - ret = drm_plane_helper_check_state(state, &clip, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true); - if (ret) - return ret; - ms->rotmem_size = 0; if (state->rotation & MALIDP_ROTATED_MASK) { int val; @@ -269,6 +330,16 @@ static void malidp_de_plane_update(struct drm_plane *plane, val &= ~LAYER_COMP_MASK; val |= LAYER_COMP_PIXEL; + val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK); + if (plane->state->crtc) { + struct malidp_crtc_state *m = + to_malidp_crtc_state(plane->state->crtc->state); + + if (m->scaler_config.scale_enable && + m->scaler_config.plane_src_id == mp->layer->id) + val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE); + } + /* set the 'enable layer' bit */ val |= LAYER_ENABLE; @@ -281,7 +352,8 @@ static void malidp_de_plane_disable(struct drm_plane *plane, { struct malidp_plane *mp = to_malidp_plane(plane); - malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE, + malidp_hw_clearbits(mp->hwdev, + LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK), mp->layer->base + MALIDP_LAYER_CONTROL); } diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index b816067a65c5..2039f857f77d 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -63,6 +63,8 @@ /* bit masks that are common between products */ #define MALIDP_CFG_VALID (1 << 0) +#define MALIDP_DISP_FUNC_GAMMA (1 << 0) +#define MALIDP_DISP_FUNC_CADJ (1 << 4) #define MALIDP_DISP_FUNC_ILACED (1 << 8) /* register offsets for IRQ management */ @@ -99,6 +101,58 @@ #define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16) +/* register offsets relative to MALIDP5x0_COEFFS_BASE */ +#define MALIDP_COLOR_ADJ_COEF 0x00000 +#define MALIDP_COEF_TABLE_ADDR 0x00030 +#define MALIDP_COEF_TABLE_DATA 0x00034 + +/* Scaling engine registers and masks. */ +#define MALIDP_SE_SCALING_EN (1 << 0) +#define MALIDP_SE_ALPHA_EN (1 << 1) +#define MALIDP_SE_ENH_MASK 3 +#define MALIDP_SE_ENH(x) (((x) & MALIDP_SE_ENH_MASK) << 2) +#define MALIDP_SE_RGBO_IF_EN (1 << 4) +#define MALIDP550_SE_CTL_SEL_MASK 7 +#define MALIDP550_SE_CTL_VCSEL(x) \ + (((x) & MALIDP550_SE_CTL_SEL_MASK) << 20) +#define MALIDP550_SE_CTL_HCSEL(x) \ + (((x) & MALIDP550_SE_CTL_SEL_MASK) << 16) + +/* Blocks with offsets from SE_CONTROL register. */ +#define MALIDP_SE_LAYER_CONTROL 0x14 +#define MALIDP_SE_L0_IN_SIZE 0x00 +#define MALIDP_SE_L0_OUT_SIZE 0x04 +#define MALIDP_SE_SET_V_SIZE(x) (((x) & 0x1fff) << 16) +#define MALIDP_SE_SET_H_SIZE(x) (((x) & 0x1fff) << 0) +#define MALIDP_SE_SCALING_CONTROL 0x24 +#define MALIDP_SE_H_INIT_PH 0x00 +#define MALIDP_SE_H_DELTA_PH 0x04 +#define MALIDP_SE_V_INIT_PH 0x08 +#define MALIDP_SE_V_DELTA_PH 0x0c +#define MALIDP_SE_COEFFTAB_ADDR 0x10 +#define MALIDP_SE_COEFFTAB_ADDR_MASK 0x7f +#define MALIDP_SE_V_COEFFTAB (1 << 8) +#define MALIDP_SE_H_COEFFTAB (1 << 9) +#define MALIDP_SE_SET_V_COEFFTAB_ADDR(x) \ + (MALIDP_SE_V_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK)) +#define MALIDP_SE_SET_H_COEFFTAB_ADDR(x) \ + (MALIDP_SE_H_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK)) +#define MALIDP_SE_COEFFTAB_DATA 0x14 +#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff +#define MALIDP_SE_SET_COEFFTAB_DATA(x) \ + ((x) & MALIDP_SE_COEFFTAB_DATA_MASK) +/* Enhance coeffents reigster offset */ +#define MALIDP_SE_IMAGE_ENH 0x3C +/* ENH_LIMITS offset 0x0 */ +#define MALIDP_SE_ENH_LOW_LEVEL 24 +#define MALIDP_SE_ENH_HIGH_LEVEL 63 +#define MALIDP_SE_ENH_LIMIT_MASK 0xfff +#define MALIDP_SE_SET_ENH_LIMIT_LOW(x) \ + ((x) & MALIDP_SE_ENH_LIMIT_MASK) +#define MALIDP_SE_SET_ENH_LIMIT_HIGH(x) \ + (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16) +#define MALIDP_SE_ENH_COEFF0 0x04 + /* register offsets and bits specific to DP500 */ #define MALIDP500_ADDR_SPACE_SIZE 0x01000 #define MALIDP500_DC_BASE 0x00000 @@ -120,6 +174,18 @@ #define MALIDP500_COLOR_ADJ_COEF 0x00078 #define MALIDP500_COEF_TABLE_ADDR 0x000a8 #define MALIDP500_COEF_TABLE_DATA 0x000ac + +/* + * The YUV2RGB coefficients on the DP500 are not in the video layer's register + * block. They belong in a separate block above the layer's registers, hence + * the negative offset. + */ +#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8)) +/* + * To match DP550/650, the start of the coeffs registers is + * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1. + */ +#define MALIDP500_COEFFS_BASE 0x00078 #define MALIDP500_DE_LV_BASE 0x00100 #define MALIDP500_DE_LV_PTR_BASE 0x00124 #define MALIDP500_DE_LG1_BASE 0x00200 @@ -127,6 +193,7 @@ #define MALIDP500_DE_LG2_BASE 0x00300 #define MALIDP500_DE_LG2_PTR_BASE 0x0031c #define MALIDP500_SE_BASE 0x00c00 +#define MALIDP500_SE_CONTROL 0x00c0c #define MALIDP500_SE_PTR_BASE 0x00e0c #define MALIDP500_DC_IRQ_BASE 0x00f00 #define MALIDP500_CONFIG_VALID 0x00f00 @@ -145,9 +212,7 @@ #define MALIDP550_DE_DISP_SIDEBAND 0x00040 #define MALIDP550_DE_BGND_COLOR 0x00044 #define MALIDP550_DE_OUTPUT_DEPTH 0x0004c -#define MALIDP550_DE_COLOR_COEF 0x00050 -#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080 -#define MALIDP550_DE_COEF_TABLE_DATA 0x00084 +#define MALIDP550_COEFFS_BASE 0x00050 #define MALIDP550_DE_LV1_BASE 0x00100 #define MALIDP550_DE_LV1_PTR_BASE 0x00124 #define MALIDP550_DE_LV2_BASE 0x00200 @@ -158,6 +223,7 @@ #define MALIDP550_DE_LS_PTR_BASE 0x0042c #define MALIDP550_DE_PERF_BASE 0x00500 #define MALIDP550_SE_BASE 0x08000 +#define MALIDP550_SE_CONTROL 0x08010 #define MALIDP550_DC_BASE 0x0c000 #define MALIDP550_DC_CONTROL 0x0c010 #define MALIDP550_DC_CONFIG_REQ (1 << 16) diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 1597458d884e..d6c2a5d190eb 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -529,10 +529,10 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = { .map_dma_buf = armada_gem_prime_map_dma_buf, .unmap_dma_buf = armada_gem_prime_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap_atomic = armada_gem_dmabuf_no_kmap, - .kunmap_atomic = armada_gem_dmabuf_no_kunmap, - .kmap = armada_gem_dmabuf_no_kmap, - .kunmap = armada_gem_dmabuf_no_kunmap, + .map_atomic = armada_gem_dmabuf_no_kmap, + .unmap_atomic = armada_gem_dmabuf_no_kunmap, + .map = armada_gem_dmabuf_no_kmap, + .unmap = armada_gem_dmabuf_no_kunmap, .mmap = armada_gem_dmabuf_mmap, }; diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 50c910efa13d..e879496b8a42 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = { .verify_access = ast_bo_verify_access, .io_mem_reserve = &ast_ttm_io_mem_reserve, .io_mem_free = &ast_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int ast_mm_init(struct ast_private *ast) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index f987b4572d4a..65a3bd7a0c00 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -221,7 +221,8 @@ err_encoder_cleanup: int atmel_hlcdc_create_outputs(struct drm_device *dev) { struct device_node *remote; - int ret, endpoint = 0; + int ret = -ENODEV; + int endpoint = 0; while (true) { /* Loop thru possible multiple connections to the output */ @@ -236,7 +237,5 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) return ret; } - if (!endpoint) - return -ENODEV; return ret; } diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 857755ac2d70..c4cadb638460 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = { .verify_access = bochs_bo_verify_access, .io_mem_reserve = &bochs_ttm_io_mem_reserve, .io_mem_free = &bochs_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int bochs_mm_init(struct bochs_device *bochs) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 80a6d09cf73d..8737de8c1c52 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -1973,6 +1973,20 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) return 0; } +static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *orig_mode, + struct drm_display_mode *mode) +{ + struct dw_hdmi *hdmi = bridge->driver_private; + struct drm_connector *connector = &hdmi->connector; + enum drm_mode_status status; + + status = dw_hdmi_connector_mode_valid(connector, mode); + if (status != MODE_OK) + return false; + return true; +} + static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, struct drm_display_mode *orig_mode, struct drm_display_mode *mode) @@ -2014,6 +2028,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .enable = dw_hdmi_bridge_enable, .disable = dw_hdmi_bridge_disable, .mode_set = dw_hdmi_bridge_mode_set, + .mode_fixup = dw_hdmi_bridge_mode_fixup, }; static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index f53aa8f4a143..93dbcd38355d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = { .verify_access = cirrus_bo_verify_access, .io_mem_reserve = &cirrus_ttm_io_mem_reserve, .io_mem_free = &cirrus_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int cirrus_mm_init(struct cirrus_device *cirrus) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 9fb65b736a90..954eb848b5e2 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -403,10 +403,10 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap = drm_gem_dmabuf_kmap, - .kmap_atomic = drm_gem_dmabuf_kmap_atomic, - .kunmap = drm_gem_dmabuf_kunmap, - .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, + .map = drm_gem_dmabuf_kmap, + .map_atomic = drm_gem_dmabuf_kmap_atomic, + .unmap = drm_gem_dmabuf_kunmap, + .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c index 3feef0659940..3e88fa24eab3 100644 --- a/drivers/gpu/drm/drm_property.c +++ b/drivers/gpu/drm/drm_property.c @@ -476,7 +476,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { list_for_each_entry(prop_enum, &property->enum_list, head) { enum_count++; - if (out_resp->count_enum_blobs <= enum_count) + if (out_resp->count_enum_blobs < enum_count) continue; if (copy_to_user(&enum_ptr[copied].value, diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index cc1731c5289c..71cee4e9fefb 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -5,6 +5,7 @@ config DRM_ETNAVIV depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on MMU select SHMEM + select SYNC_FILE select TMPFS select IOMMU_API select IOMMU_SUPPORT diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 587e45043542..5255278dde56 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) return 0; } -static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) +static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) { struct etnaviv_drm_private *priv = dev->dev_private; struct etnaviv_file_private *ctx = file->driver_priv; @@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = { DRIVER_PRIME | DRIVER_RENDER, .open = etnaviv_open, - .preclose = etnaviv_preclose, + .postclose = etnaviv_postclose, .gem_free_object_unlocked = etnaviv_gem_free_object, .gem_vm_ops = &vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 0, + .minor = 1, }; /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index e63ff116a3b3..c4a091e87426 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -20,6 +20,7 @@ #include <linux/reservation.h> #include "etnaviv_drv.h" +struct dma_fence; struct etnaviv_gem_ops; struct etnaviv_gem_object; @@ -104,9 +105,10 @@ struct etnaviv_gem_submit { struct drm_device *dev; struct etnaviv_gpu *gpu; struct ww_acquire_ctx ticket; - u32 fence; + struct dma_fence *fence; unsigned int nr_bos; struct etnaviv_gem_submit_bo bos[0]; + u32 flags; }; int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 726090d7a6ac..e1909429837e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -14,7 +14,9 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/dma-fence-array.h> #include <linux/reservation.h> +#include <linux/sync_file.h> #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" #include "etnaviv_gpu.h" @@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; + bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); - ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); + ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, + explicit); if (ret) break; } @@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit) } ww_acquire_fini(&submit->ticket); + dma_fence_put(submit->fence); kfree(submit); } @@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct etnaviv_gem_submit *submit; struct etnaviv_cmdbuf *cmdbuf; struct etnaviv_gpu *gpu; + struct dma_fence *in_fence = NULL; + struct sync_file *sync_file = NULL; + int out_fence_fd = -1; void *stream; int ret; @@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if (args->flags & ~ETNA_SUBMIT_FLAGS) { + DRM_ERROR("invalid flags: 0x%x\n", args->flags); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. @@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_cmds; } + if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + ret = out_fence_fd; + goto err_submit_cmds; + } + } + submit = submit_create(dev, gpu, args->nr_bos); if (!submit) { ret = -ENOMEM; goto err_submit_cmds; } + submit->flags = args->flags; + ret = submit_lookup_objects(submit, file, bos, args->nr_bos); if (ret) goto err_submit_objects; @@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_objects; } + if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) { + in_fence = sync_file_get_fence(args->fence_fd); + if (!in_fence) { + ret = -EINVAL; + goto err_submit_objects; + } + + /* + * Wait if the fence is from a foreign context, or if the fence + * array contains any fence from a foreign context. + */ + if (!dma_fence_match_context(in_fence, gpu->fence_context)) { + ret = dma_fence_wait(in_fence, true); + if (ret) + goto err_submit_objects; + } + } + ret = submit_fence_sync(submit); if (ret) goto err_submit_objects; @@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret == 0) cmdbuf = NULL; - args->fence = submit->fence; + if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { + /* + * This can be improved: ideally we want to allocate the sync + * file before kicking off the GPU job and just attach the + * fence to the sync file here, eliminating the ENOMEM + * possibility at this stage. + */ + sync_file = sync_file_create(submit->fence); + if (!sync_file) { + ret = -ENOMEM; + goto out; + } + fd_install(out_fence_fd, sync_file->file); + } + + args->fence_fd = out_fence_fd; + args->fence = submit->fence->seqno; out: submit_unpin_objects(submit); @@ -419,9 +476,13 @@ out: flush_workqueue(priv->wq); err_submit_objects: + if (in_fence) + dma_fence_put(in_fence); submit_cleanup(submit); err_submit_cmds: + if (ret && (out_fence_fd >= 0)) + put_unused_fd(out_fence_fd); /* if we still own the cmdbuf */ if (cmdbuf) etnaviv_cmdbuf_free(cmdbuf); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index da48819ff2e6..9a9c40717801 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -18,6 +18,7 @@ #include <linux/dma-fence.h> #include <linux/moduleparam.h> #include <linux/of_device.h> +#include <linux/thermal.h> #include "etnaviv_cmdbuf.h" #include "etnaviv_dump.h" @@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); } +static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) +{ + unsigned int fscale = 1 << (6 - gpu->freq_scale); + u32 clock; + + clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | + VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); + + etnaviv_gpu_load_clock(gpu, clock); +} + static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) { u32 control, idle; @@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) timeout = jiffies + msecs_to_jiffies(1000); while (time_is_after_jiffies(timeout)) { - control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | - VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); - /* enable clock */ - etnaviv_gpu_load_clock(gpu, control); + etnaviv_gpu_update_clock(gpu); + + control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); /* Wait for stable clock. Vivante's code waited for 1ms */ usleep_range(1000, 10000); @@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) } /* We rely on the GPU running, so program the clock */ - control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | - VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); - - /* enable clock */ - etnaviv_gpu_load_clock(gpu, control); + etnaviv_gpu_update_clock(gpu); return 0; } @@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) { struct etnaviv_fence *f; + /* + * GPU lock must already be held, otherwise fence completion order might + * not match the seqno order assigned here. + */ + lockdep_assert_held(&gpu->lock); + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) return NULL; @@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) } int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive) + unsigned int context, bool exclusive, bool explicit) { struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object_list *fobj; @@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, return ret; } + if (explicit) + return 0; + /* * If we have any shared fences, then the exclusive fence * should be ignored as it will already have been signalled. @@ -1317,12 +1333,12 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, if (!fence) { event_free(gpu, event); ret = -ENOMEM; - goto out_pm_put; + goto out_unlock; } gpu->event[event].fence = fence; - submit->fence = fence->seqno; - gpu->active_fence = submit->fence; + submit->fence = dma_fence_get(fence); + gpu->active_fence = submit->fence->seqno; if (gpu->lastctx != cmdbuf->ctx) { gpu->mmu->need_flush = true; @@ -1357,6 +1373,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, hangcheck_timer_reset(gpu); ret = 0; +out_unlock: mutex_unlock(&gpu->lock); out_pm_put: @@ -1526,17 +1543,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) #ifdef CONFIG_PM static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) { - u32 clock; int ret; ret = mutex_lock_killable(&gpu->lock); if (ret) return ret; - clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | - VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); - - etnaviv_gpu_load_clock(gpu, clock); + etnaviv_gpu_update_clock(gpu); etnaviv_gpu_hw_init(gpu); gpu->switch_context = true; @@ -1548,6 +1561,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) } #endif +static int +etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = 6; + + return 0; +} + +static int +etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct etnaviv_gpu *gpu = cdev->devdata; + + *state = gpu->freq_scale; + + return 0; +} + +static int +etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct etnaviv_gpu *gpu = cdev->devdata; + + mutex_lock(&gpu->lock); + gpu->freq_scale = state; + if (!pm_runtime_suspended(gpu->dev)) + etnaviv_gpu_update_clock(gpu); + mutex_unlock(&gpu->lock); + + return 0; +} + +static struct thermal_cooling_device_ops cooling_ops = { + .get_max_state = etnaviv_gpu_cooling_get_max_state, + .get_cur_state = etnaviv_gpu_cooling_get_cur_state, + .set_cur_state = etnaviv_gpu_cooling_set_cur_state, +}; + static int etnaviv_gpu_bind(struct device *dev, struct device *master, void *data) { @@ -1556,13 +1610,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, struct etnaviv_gpu *gpu = dev_get_drvdata(dev); int ret; + gpu->cooling = thermal_of_cooling_device_register(dev->of_node, + (char *)dev_name(dev), gpu, &cooling_ops); + if (IS_ERR(gpu->cooling)) + return PTR_ERR(gpu->cooling); + #ifdef CONFIG_PM ret = pm_runtime_get_sync(gpu->dev); #else ret = etnaviv_gpu_clk_enable(gpu); #endif - if (ret < 0) + if (ret < 0) { + thermal_cooling_device_unregister(gpu->cooling); return ret; + } gpu->drm = drm; gpu->fence_context = dma_fence_context_alloc(1); @@ -1616,6 +1677,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, } gpu->drm = NULL; + + thermal_cooling_device_unregister(gpu->cooling); + gpu->cooling = NULL; } static const struct component_ops gpu_ops = { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 1c0606ea7d5e..9227a9740447 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -97,6 +97,7 @@ struct etnaviv_cmdbuf; struct etnaviv_gpu { struct drm_device *drm; + struct thermal_cooling_device *cooling; struct device *dev; struct mutex lock; struct etnaviv_chip_identity identity; @@ -150,6 +151,7 @@ struct etnaviv_gpu { u32 hangcheck_fence; u32 hangcheck_dma_addr; struct work_struct recover_work; + unsigned int freq_scale; }; static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) @@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); #endif int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, - unsigned int context, bool exclusive); + unsigned int context, bool exclusive, bool implicit); void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b7d7721e72fa..40af17ec6312 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, { int ret; - if (vgpu->failsafe) - return 0; - if (WARN_ON(bytes > 4)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6f972afbdbc3..41b2c3aaa04a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id) { struct decode_info *d_info; - if (ring_id >= I915_NUM_ENGINES) - return INVALID_OP; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; if (d_info == NULL) return INVALID_OP; @@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id) struct decode_info *d_info; int i; - if (ring_id >= I915_NUM_ENGINES) - return; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; if (d_info == NULL) return; @@ -1215,7 +1209,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1243,7 +1237,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1267,7 +1261,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1278,7 +1272,9 @@ static int check_mi_display_flip(struct parser_exec_state *s, { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) + || IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) return gen8_check_mi_display_flip(s, info); return -ENODEV; } @@ -1289,7 +1285,9 @@ static int update_plane_mmio_from_mi_display_flip( { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) + || IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) return gen8_update_plane_mmio_from_mi_display_flip(s, info); return -ENODEV; } @@ -1569,7 +1567,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) return 0; @@ -2478,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) t1 = get_cycles(); - memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state)); + s_before_advance_custom = *s; if (info->handler) { ret = info->handler(s); @@ -2604,6 +2603,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail; struct parser_exec_state s; int ret = 0; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); /* ring base is page aligned */ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) @@ -2618,14 +2620,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; - s.vgpu = wa_ctx->workload->vgpu; - s.ring_id = wa_ctx->workload->ring_id; + s.vgpu = workload->vgpu; + s.ring_id = workload->ring_id; s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_size = ring_size; s.ring_head = gma_head; s.ring_tail = gma_tail; s.rb_va = wa_ctx->indirect_ctx.shadow_va; - s.workload = wa_ctx->workload; + s.workload = workload; ret = ip_gma_set(&s, gma_head); if (ret) @@ -2708,12 +2710,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; - struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_gem_object *obj; int ret = 0; void *map; - obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, + obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv, roundup(ctx_size + CACHELINE_BYTES, PAGE_SIZE)); if (IS_ERR(obj)) @@ -2733,8 +2738,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) goto unmap_src; } - ret = copy_gma_to_hva(wa_ctx->workload->vgpu, - wa_ctx->workload->vgpu->gtt.ggtt_mm, + ret = copy_gma_to_hva(workload->vgpu, + workload->vgpu->gtt.ggtt_mm, guest_gma, guest_gma + ctx_size, map); if (ret < 0) { @@ -2772,7 +2777,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ret; - struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + struct intel_vgpu *vgpu = workload->vgpu; if (wa_ctx->indirect_ctx.size == 0) return 0; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5419ae6ec633..e0261fcc5b50 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { #define DPCD_HEADER_SIZE 0xb +/* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { - 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) @@ -172,26 +173,64 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); + vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG0_DIST_STATUS | + SKL_FUSE_PG1_DIST_STATUS | + SKL_FUSE_PG2_DIST_STATUS; + vgpu_vreg(vgpu, LCPLL1_CTL) |= + LCPLL_PLL_ENABLE | + LCPLL_PLL_LOCK; + vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; + + } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; + vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_C << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_D << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } - if (IS_SKYLAKE(dev_priv) && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; } @@ -353,7 +392,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -375,7 +414,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, resolution); else diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f1f426a97aa9..dca989eb2d42 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -56,8 +56,8 @@ static int context_switch_events[] = { static int ring_id_to_context_switch_event(int ring_id) { - if (WARN_ON(ring_id < RCS && ring_id > - ARRAY_SIZE(context_switch_events))) + if (WARN_ON(ring_id < RCS || + ring_id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[ring_id]; @@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - int ring_id = wa_ctx->workload->ring_id; - struct i915_gem_context *shadow_ctx = - wa_ctx->workload->vgpu->shadow_ctx; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + int ring_id = workload->ring_id; + struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_gem_object *ctx_obj = shadow_ctx->engine[ring_id].state->obj; struct execlist_ring_context *shadow_ring_context; @@ -680,15 +682,12 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, CACHELINE_BYTES; workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; - workload->wa_ctx.workload = workload; WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); } if (emulate_schedule_in) - memcpy(&workload->elsp_dwords, - &vgpu->execlist[ring_id].elsp_dwords, - sizeof(workload->elsp_dwords)); + workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", workload, ring_id, head, tail, start, ctl); @@ -775,7 +774,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) _EL_OFFSET_STATUS_PTR); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); - ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; + ctx_status_ptr.read_ptr = 0; + ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 933a7c211a1c..dce8d15f706f 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) struct gvt_firmware_header *h; void *firmware; void *p; - unsigned long size; + unsigned long size, crc32_start; int i; int ret; - size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; + size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); if (!firmware) return -ENOMEM; @@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) memcpy(gvt->firmware.mmio, p, info->mmio_size); + crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; + h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); + firmware_attr.size = size; firmware_attr.private = firmware; @@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) firmware->mmio = mem; - sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index b832bea64e03..c6f0077f590d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2224,7 +2224,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops; gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; @@ -2293,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; u32 index; u32 offset; u32 num_entries; struct intel_gvt_gtt_entry e; + intel_runtime_pm_get(dev_priv); + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); e.type = GTT_TYPE_GGTT_PTE; ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); @@ -2313,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; for (offset = 0; offset < num_entries; offset++) ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); + + intel_runtime_pm_put(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 3b9d59e457ba..7dea5e5d5567 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { .vgpu_create = intel_gvt_create_vgpu, .vgpu_destroy = intel_gvt_destroy_vgpu, .vgpu_reset = intel_gvt_reset_vgpu, + .vgpu_activate = intel_gvt_activate_vgpu, + .vgpu_deactivate = intel_gvt_deactivate_vgpu, }; /** @@ -106,7 +108,8 @@ static void init_device_info(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { info->max_support_vgpus = 8; info->cfg_space_size = 256; info->mmio_size = 2 * 1024 * 1024; @@ -143,6 +146,11 @@ static int gvt_service_thread(void *data) intel_gvt_emulate_vblank(gvt); mutex_unlock(&gvt->lock); } + + if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } } return 0; @@ -196,6 +204,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) idr_destroy(&gvt->vgpu_idr); + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -214,6 +224,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) int intel_gvt_init_device(struct drm_i915_private *dev_priv) { struct intel_gvt *gvt; + struct intel_vgpu *vgpu; int ret; /* @@ -286,6 +297,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_types; } + vgpu = intel_gvt_create_idle_vgpu(gvt); + if (IS_ERR(vgpu)) { + ret = PTR_ERR(vgpu); + gvt_err("failed to create idle vgpu\n"); + goto out_clean_types; + } + gvt->idle_vgpu = vgpu; + gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; return 0; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 6dfc48b63b71..930732e5c780 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -138,6 +138,10 @@ struct intel_vgpu_display { struct intel_vgpu_sbi sbi; }; +struct vgpu_sched_ctl { + int weight; +}; + struct intel_vgpu { struct intel_gvt *gvt; int id; @@ -147,6 +151,7 @@ struct intel_vgpu { bool failsafe; bool resetting; void *sched_data; + struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; struct intel_vgpu_gm gm; @@ -160,6 +165,7 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + ktime_t last_ctx_submit_time; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; @@ -215,6 +221,7 @@ struct intel_vgpu_type { unsigned int low_gm_size; unsigned int high_gm_size; unsigned int fence; + unsigned int weight; enum intel_vgpu_edid resolution; }; @@ -236,6 +243,7 @@ struct intel_gvt { DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); struct intel_vgpu_type *types; unsigned int num_types; + struct intel_vgpu *idle_vgpu; struct task_struct *service_thread; wait_queue_head_t service_thread_wq; @@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) enum { INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, + INTEL_GVT_REQUEST_SCHED = 1, }; static inline void intel_gvt_request_service(struct intel_gvt *gvt, @@ -322,6 +331,8 @@ struct intel_vgpu_creation_params { __u64 resolution; __s32 primary; __u64 vgpu_id; + + __u32 weight; }; int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, @@ -376,13 +387,16 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); +struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); +void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); - +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ @@ -449,6 +463,8 @@ struct intel_gvt_ops { struct intel_vgpu_type *); void (*vgpu_destroy)(struct intel_vgpu *); void (*vgpu_reset)(struct intel_vgpu *); + void (*vgpu_activate)(struct intel_vgpu *); + void (*vgpu_deactivate)(struct intel_vgpu *); }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 6da9ae1618e3..0ad1a508e2af 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_BDW; else if (IS_SKYLAKE(gvt->dev_priv)) return D_SKL; + else if (IS_KABYLAKE(gvt->dev_priv)) + return D_KBL; return 0; } @@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (IS_SKYLAKE(vgpu->gvt->dev_priv) && - offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { + if ((IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) + && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && @@ -1311,7 +1315,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read @@ -1324,7 +1329,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, } break; case SKL_PCODE_CDCLK_CONTROL: - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) *data0 = SKL_CDCLK_READY_FOR_CHANGE; break; case GEN6_PCODE_READ_RC6VIDS: @@ -1418,6 +1424,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { + vgpu->last_ctx_submit_time = ktime_get(); ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) gvt_vgpu_err("fail submit workload on ring %d\n", @@ -2592,219 +2599,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); - MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); - MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); + MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL, + skl_power_well_ctl_write); + MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write); MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write); - MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write); - MMIO_D(0x45504, D_SKL); - MMIO_D(0x45520, D_SKL); - MMIO_D(0x46000, D_SKL); - MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write); - MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write); - MMIO_D(0x6C040, D_SKL); - MMIO_D(0x6C048, D_SKL); - MMIO_D(0x6C050, D_SKL); - MMIO_D(0x6C044, D_SKL); - MMIO_D(0x6C04C, D_SKL); - MMIO_D(0x6C054, D_SKL); - MMIO_D(0x6c058, D_SKL); - MMIO_D(0x6c05c, D_SKL); - MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL); - - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); - - MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL); - MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL); - MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL); - - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL); - - MMIO_D(0x70380, D_SKL); - MMIO_D(0x71380, D_SKL); - MMIO_D(0x72380, D_SKL); - MMIO_D(0x7039c, D_SKL); - - MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_D(0x8f074, D_SKL); - MMIO_D(0x8f004, D_SKL); - MMIO_D(0x8f034, D_SKL); - - MMIO_D(0xb11c, D_SKL); - - MMIO_D(0x51000, D_SKL); - MMIO_D(0x6c00c, D_SKL); - - MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); - MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); - - MMIO_D(0xd08, D_SKL); - MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write); + MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write); + MMIO_D(0x45504, D_SKL_PLUS); + MMIO_D(0x45520, D_SKL_PLUS); + MMIO_D(0x46000, D_SKL_PLUS); + MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write); + MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write); + MMIO_D(0x6C040, D_SKL | D_KBL); + MMIO_D(0x6C048, D_SKL | D_KBL); + MMIO_D(0x6C050, D_SKL | D_KBL); + MMIO_D(0x6C044, D_SKL | D_KBL); + MMIO_D(0x6C04C, D_SKL | D_KBL); + MMIO_D(0x6C054, D_SKL | D_KBL); + MMIO_D(0x6c058, D_SKL | D_KBL); + MMIO_D(0x6c05c, D_SKL | D_KBL); + MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL); + + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_D(0x70380, D_SKL_PLUS); + MMIO_D(0x71380, D_SKL_PLUS); + MMIO_D(0x72380, D_SKL_PLUS); + MMIO_D(0x7039c, D_SKL_PLUS); + + MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_D(0x8f074, D_SKL | D_KBL); + MMIO_D(0x8f004, D_SKL | D_KBL); + MMIO_D(0x8f034, D_SKL | D_KBL); + + MMIO_D(0xb11c, D_SKL | D_KBL); + + MMIO_D(0x51000, D_SKL | D_KBL); + MMIO_D(0x6c00c, D_SKL_PLUS); + + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + + MMIO_D(0xd08, D_SKL_PLUS); + MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL); + MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* TRTT */ - MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); - MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); + MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write); + MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write); - MMIO_D(0x45008, D_SKL); + MMIO_D(0x45008, D_SKL | D_KBL); - MMIO_D(0x46430, D_SKL); + MMIO_D(0x46430, D_SKL | D_KBL); - MMIO_D(0x46520, D_SKL); + MMIO_D(0x46520, D_SKL | D_KBL); - MMIO_D(0xc403c, D_SKL); - MMIO_D(0xb004, D_SKL); + MMIO_D(0xc403c, D_SKL | D_KBL); + MMIO_D(0xb004, D_SKL_PLUS); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); - MMIO_D(0x65900, D_SKL); - MMIO_D(0x1082c0, D_SKL); - MMIO_D(0x4068, D_SKL); - MMIO_D(0x67054, D_SKL); - MMIO_D(0x6e560, D_SKL); - MMIO_D(0x6e554, D_SKL); - MMIO_D(0x2b20, D_SKL); - MMIO_D(0x65f00, D_SKL); - MMIO_D(0x65f08, D_SKL); - MMIO_D(0x320f0, D_SKL); - - MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(0x70034, D_SKL); - MMIO_D(0x71034, D_SKL); - MMIO_D(0x72034, D_SKL); - - MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL); - MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL); - MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); - - MMIO_D(0x44500, D_SKL); + MMIO_D(0x65900, D_SKL_PLUS); + MMIO_D(0x1082c0, D_SKL | D_KBL); + MMIO_D(0x4068, D_SKL | D_KBL); + MMIO_D(0x67054, D_SKL | D_KBL); + MMIO_D(0x6e560, D_SKL | D_KBL); + MMIO_D(0x6e554, D_SKL | D_KBL); + MMIO_D(0x2b20, D_SKL | D_KBL); + MMIO_D(0x65f00, D_SKL | D_KBL); + MMIO_D(0x65f08, D_SKL | D_KBL); + MMIO_D(0x320f0, D_SKL | D_KBL); + + MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_D(0x70034, D_SKL_PLUS); + MMIO_D(0x71034, D_SKL_PLUS); + MMIO_D(0x72034, D_SKL_PLUS); + + MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS); + MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS); + MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS); + + MMIO_D(0x44500, D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + + MMIO_D(0x4ab8, D_KBL); + MMIO_D(0x940c, D_SKL_PLUS); + MMIO_D(0x2248, D_SKL_PLUS | D_KBL); + MMIO_D(0x4ab0, D_SKL | D_KBL); + MMIO_D(0x20d4, D_SKL | D_KBL); + return 0; } @@ -2881,7 +2901,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_broadwell_mmio_info(gvt); if (ret) goto err; - } else if (IS_SKYLAKE(dev_priv)) { + } else if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) { ret = init_broadwell_mmio_info(gvt); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 92bb247e3478..9d6812f0957f 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -580,7 +580,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { irq->ops = &gen8_irq_ops; irq->irq_map = gen8_irq_map; } else { diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d641214578a7..1ae0b4083ce1 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, return 0; return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" - "fence: %d\nresolution: %s\n", + "fence: %d\nresolution: %s\n" + "weight: %d\n", BYTES_TO_MB(type->low_gm_size), BYTES_TO_MB(type->high_gm_size), - type->fence, vgpu_edid_str(type->resolution)); + type->fence, vgpu_edid_str(type->resolution), + type->weight); } static MDEV_TYPE_ATTR_RO(available_instances); @@ -544,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) if (ret) goto undo_group; + intel_gvt_ops->vgpu_activate(vgpu); + atomic_set(&vgpu->vdev.released, 0); return ret; @@ -569,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) return; + intel_gvt_ops->vgpu_deactivate(vgpu); + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); @@ -1146,8 +1152,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return 0; } +static ssize_t +vgpu_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_device *mdev = mdev_from_dev(dev); + + if (mdev) { + struct intel_vgpu *vgpu = (struct intel_vgpu *) + mdev_get_drvdata(mdev); + return sprintf(buf, "%d\n", vgpu->id); + } + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(vgpu_id); + +static struct attribute *intel_vgpu_attrs[] = { + &dev_attr_vgpu_id.attr, + NULL +}; + +static const struct attribute_group intel_vgpu_group = { + .name = "intel_vgpu", + .attrs = intel_vgpu_attrs, +}; + +static const struct attribute_group *intel_vgpu_groups[] = { + &intel_vgpu_group, + NULL, +}; + static const struct mdev_parent_ops intel_vgpu_ops = { .supported_type_groups = intel_vgpu_type_groups, + .mdev_attr_groups = intel_vgpu_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1340,13 +1378,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { - struct intel_vgpu *vgpu = info->vgpu; - - if (!info) { - gvt_vgpu_err("kvmgt_guest_info invalid\n"); - return false; - } - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index a3a027025cd0..7edd66f38ef9 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -44,20 +44,21 @@ struct intel_vgpu; #define D_HSW (1 << 2) #define D_BDW (1 << 3) #define D_SKL (1 << 4) +#define D_KBL (1 << 5) -#define D_GEN9PLUS (D_SKL) -#define D_GEN8PLUS (D_BDW | D_SKL) -#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL) -#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL) +#define D_GEN9PLUS (D_SKL | D_KBL) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL) +#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) -#define D_SKL_PLUS (D_SKL) -#define D_BDW_PLUS (D_BDW | D_SKL) -#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL) -#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL) +#define D_SKL_PLUS (D_SKL | D_KBL) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL) +#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) #define D_PRE_BDW (D_SNB | D_IVB | D_HSW) #define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW) -#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL) +#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) struct intel_gvt_mmio_info { u32 offset; diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 0beb83563b08..c6e7972ac21d 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -44,7 +44,7 @@ struct render_mmio { u32 value; }; -static struct render_mmio gen8_render_mmio_list[] = { +static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x229c), 0xffff, false}, {RCS, _MMIO(0x2248), 0x0, false}, {RCS, _MMIO(0x2098), 0x0, false}, @@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = { {BCS, _MMIO(0x22028), 0x0, false}, }; -static struct render_mmio gen9_render_mmio_list[] = { +static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x229c), 0xffff, false}, {RCS, _MMIO(0x2248), 0x0, false}, {RCS, _MMIO(0x2098), 0x0, false}, @@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = { {VCS2, _MMIO(0x1c028), 0xffff, false}, {VECS, _MMIO(0x1a028), 0xffff, false}, + + {RCS, _MMIO(0x7304), 0xffff, true}, + {RCS, _MMIO(0x2248), 0x0, false}, + {RCS, _MMIO(0x940c), 0x0, false}, + {RCS, _MMIO(0x4ab8), 0x0, false}, + + {RCS, _MMIO(0x4ab0), 0x0, false}, + {RCS, _MMIO(0x20d4), 0x0, false}, + + {RCS, _MMIO(0xb004), 0x0, false}, + {RCS, _MMIO(0x20a0), 0x0, false}, + {RCS, _MMIO(0x20e4), 0xffff, false}, }; static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; @@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && IS_SKYLAKE(dev_priv)) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -192,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (!IS_SKYLAKE(dev_priv)) - return; - offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { gen9_render_mocs[ring_id][i] = I915_READ(offset); @@ -230,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (!IS_SKYLAKE(dev_priv)) - return; - offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { vgpu_vreg(vgpu, offset) = I915_READ(offset); @@ -265,7 +271,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); load_mocs(vgpu, ring_id); @@ -312,7 +319,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id) u32 v; int i, array_size; - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); restore_mocs(vgpu, ring_id); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 34b9acdf3479..79ba4b3440aa 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -47,19 +47,92 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) return false; } +struct vgpu_sched_data { + struct list_head lru_list; + struct intel_vgpu *vgpu; + + ktime_t sched_in_time; + ktime_t sched_out_time; + ktime_t sched_time; + ktime_t left_ts; + ktime_t allocated_ts; + + struct vgpu_sched_ctl sched_ctl; +}; + +struct gvt_sched_data { + struct intel_gvt *gvt; + struct hrtimer timer; + unsigned long period; + struct list_head lru_runq_head; +}; + +static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu) +{ + ktime_t delta_ts; + struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data; + + delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time; + + vgpu_data->sched_time += delta_ts; + vgpu_data->left_ts -= delta_ts; +} + +#define GVT_TS_BALANCE_PERIOD_MS 100 +#define GVT_TS_BALANCE_STAGE_NUM 10 + +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +{ + struct vgpu_sched_data *vgpu_data; + struct list_head *pos; + static uint64_t stage_check; + int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + + /* The timeslice accumulation reset at stage 0, which is + * allocated again without adding previous debt. + */ + if (stage == 0) { + int total_weight = 0; + ktime_t fair_timeslice; + + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + total_weight += vgpu_data->sched_ctl.weight; + } + + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * + vgpu_data->sched_ctl.weight / + total_weight; + + vgpu_data->allocated_ts = fair_timeslice; + vgpu_data->left_ts = vgpu_data->allocated_ts; + } + } else { + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + + /* timeslice for next 100ms should add the left/debt + * slice of previous stages. + */ + vgpu_data->left_ts += vgpu_data->allocated_ts; + } + } +} + static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id i; struct intel_engine_cs *engine; + struct vgpu_sched_data *vgpu_data; + ktime_t cur_time; /* no target to schedule */ if (!scheduler->next_vgpu) return; - gvt_dbg_sched("try to schedule next vgpu %d\n", - scheduler->next_vgpu->id); - /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu @@ -68,14 +141,18 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) /* still have uncompleted workload? */ for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) { - gvt_dbg_sched("still have running workload\n"); + if (scheduler->current_workload[i]) return; - } } - gvt_dbg_sched("switch to next vgpu %d\n", - scheduler->next_vgpu->id); + cur_time = ktime_get(); + if (scheduler->current_vgpu) { + vgpu_data = scheduler->current_vgpu->sched_data; + vgpu_data->sched_out_time = cur_time; + vgpu_update_timeslice(scheduler->current_vgpu); + } + vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ scheduler->current_vgpu = scheduler->next_vgpu; @@ -88,97 +165,106 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) wake_up(&scheduler->waitq[i]); } -struct tbs_vgpu_data { - struct list_head list; - struct intel_vgpu *vgpu; - /* put some per-vgpu sched stats here */ -}; - -struct tbs_sched_data { - struct intel_gvt *gvt; - struct delayed_work work; - unsigned long period; - struct list_head runq_head; -}; - -#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) - -static void tbs_sched_func(struct work_struct *work) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) { - struct tbs_sched_data *sched_data = container_of(work, - struct tbs_sched_data, work.work); - struct tbs_vgpu_data *vgpu_data; - - struct intel_gvt *gvt = sched_data->gvt; - struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - + struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *pos, *head; - - mutex_lock(&gvt->lock); - - /* no vgpu or has already had a target */ - if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) - goto out; - - if (scheduler->current_vgpu) { - vgpu_data = scheduler->current_vgpu->sched_data; - head = &vgpu_data->list; - } else { - head = &sched_data->runq_head; - } + struct list_head *head = &sched_data->lru_runq_head; + struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { - if (pos == &sched_data->runq_head) - continue; - vgpu_data = container_of(pos, struct tbs_vgpu_data, list); + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; - vgpu = vgpu_data->vgpu; - break; + /* Return the vGPU only if it has time slice left */ + if (vgpu_data->left_ts > 0) { + vgpu = vgpu_data->vgpu; + break; + } } + return vgpu; +} + +/* in nanosecond */ +#define GVT_DEFAULT_TIME_SLICE 1000000 + +static void tbs_sched_func(struct gvt_sched_data *sched_data) +{ + struct intel_gvt *gvt = sched_data->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + struct vgpu_sched_data *vgpu_data; + struct intel_vgpu *vgpu = NULL; + static uint64_t timer_check; + + if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS)) + gvt_balance_timeslice(sched_data); + + /* no active vgpu or has already had a target */ + if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + goto out; + + vgpu = find_busy_vgpu(sched_data); if (vgpu) { scheduler->next_vgpu = vgpu; - gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); + + /* Move the last used vGPU to the tail of lru_list */ + vgpu_data = vgpu->sched_data; + list_del_init(&vgpu_data->lru_list); + list_add_tail(&vgpu_data->lru_list, + &sched_data->lru_runq_head); + } else { + scheduler->next_vgpu = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) { - gvt_dbg_sched("try to schedule next vgpu %d\n", - scheduler->next_vgpu->id); + if (scheduler->next_vgpu) try_to_schedule_next_vgpu(gvt); - } +} - /* - * still have vgpu on runq - * or last schedule haven't finished due to running workload - */ - if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) - schedule_delayed_work(&sched_data->work, sched_data->period); +void intel_gvt_schedule(struct intel_gvt *gvt) +{ + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; + mutex_lock(&gvt->lock); + tbs_sched_func(sched_data); mutex_unlock(&gvt->lock); } +static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) +{ + struct gvt_sched_data *data; + + data = container_of(timer_data, struct gvt_sched_data, timer); + + intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); + + hrtimer_add_expires_ns(&data->timer, data->period); + + return HRTIMER_RESTART; +} + static int tbs_sched_init(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data; + struct gvt_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->runq_head); - INIT_DELAYED_WORK(&data->work, tbs_sched_func); + INIT_LIST_HEAD(&data->lru_runq_head); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; data->gvt = gvt; scheduler->sched_data = data; + return 0; } @@ -186,25 +272,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data = scheduler->sched_data; + struct gvt_sched_data *data = scheduler->sched_data; + + hrtimer_cancel(&data->timer); - cancel_delayed_work(&data->work); kfree(data); scheduler->sched_data = NULL; } static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *data; + struct vgpu_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + data->sched_ctl.weight = vgpu->sched_ctl.weight; data->vgpu = vgpu; - INIT_LIST_HEAD(&data->list); + INIT_LIST_HEAD(&data->lru_list); vgpu->sched_data = data; + return 0; } @@ -216,21 +305,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { - struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - if (!list_empty(&vgpu_data->list)) + if (!list_empty(&vgpu_data->lru_list)) return; - list_add_tail(&vgpu_data->list, &sched_data->runq_head); - schedule_delayed_work(&sched_data->work, 0); + list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + + if (!hrtimer_active(&sched_data->timer)) + hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), + sched_data->period), HRTIMER_MODE_ABS); } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - list_del_init(&vgpu_data->list); + list_del_init(&vgpu_data->lru_list); } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index bb8b9097e41a..ba00a5f7455f 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops { void (*stop_schedule)(struct intel_vgpu *vgpu); }; +void intel_gvt_schedule(struct intel_gvt *gvt); + int intel_gvt_init_sched_policy(struct intel_gvt *gvt); void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d9a735310e75..bada32b33237 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -279,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload( goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { - gvt_dbg_sched("ring id %d stop - no available workload\n", - ring_id); + if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) goto out; - } /* * still have current workload, maybe the workload disptacher @@ -453,7 +450,8 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; - bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); + bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 2833dfa8c9ae..2cd725c0573e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -67,7 +67,6 @@ struct shadow_per_ctx { }; struct intel_shadow_wa_ctx { - struct intel_vgpu_workload *workload; struct shadow_indirect_ctx indirect_ctx; struct shadow_per_ctx per_ctx; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 41cfa5ccae84..6e3cbd8caec2 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } +#define VGPU_MAX_WEIGHT 16 +#define VGPU_WEIGHT(vgpu_num) \ + (VGPU_MAX_WEIGHT / (vgpu_num)) + static struct { unsigned int low_mm; unsigned int high_mm; unsigned int fence; + + /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU + * with a weight of 4 on a contended host, different vGPU type has + * different weight set. Legal weights range from 1 to 16. + */ + unsigned int weight; enum intel_vgpu_edid edid; char *name; } vgpu_types[] = { /* Fixed vGPU type table */ - { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, - { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, - { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, - { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, + { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, + { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" }, + { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" }, + { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" }, }; /** @@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) gvt->types[i].low_gm_size = vgpu_types[i].low_mm; gvt->types[i].high_gm_size = vgpu_types[i].high_mm; gvt->types[i].fence = vgpu_types[i].fence; + + if (vgpu_types[i].weight < 1 || + vgpu_types[i].weight > VGPU_MAX_WEIGHT) + return -EINVAL; + + gvt->types[i].weight = vgpu_types[i].weight; gvt->types[i].resolution = vgpu_types[i].edid; gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, high_avail / vgpu_types[i].high_mm); @@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) sprintf(gvt->types[i].name, "GVTg_V5_%s", vgpu_types[i].name); - gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", + gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, gvt->types[i].avail_instance, gvt->types[i].low_gm_size, gvt->types[i].high_gm_size, gvt->types[i].fence, + gvt->types[i].weight, vgpu_edid_str(gvt->types[i].resolution)); } @@ -179,20 +196,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) } /** - * intel_gvt_destroy_vgpu - destroy a virtual GPU + * intel_gvt_active_vgpu - activate a virtual GPU * @vgpu: virtual GPU * - * This function is called when user wants to destroy a virtual GPU. + * This function is called when user wants to activate a virtual GPU. * */ -void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + vgpu->active = true; + mutex_unlock(&vgpu->gvt->lock); +} + +/** + * intel_gvt_deactive_vgpu - deactivate a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to deactivate a virtual GPU. + * All virtual GPU runtime information will be destroyed. + * + */ +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; mutex_lock(&gvt->lock); vgpu->active = false; - idr_remove(&gvt->vgpu_idr, vgpu->id); if (atomic_read(&vgpu->running_workload_num)) { mutex_unlock(&gvt->lock); @@ -201,6 +232,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) } intel_vgpu_stop_schedule(vgpu); + + mutex_unlock(&gvt->lock); +} + +/** + * intel_gvt_destroy_vgpu - destroy a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to destroy a virtual GPU. + * + */ +void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + + mutex_lock(&gvt->lock); + + WARN(vgpu->active, "vGPU is still active!\n"); + + idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_gvt_context(vgpu); intel_vgpu_clean_execlist(vgpu); @@ -216,6 +267,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) mutex_unlock(&gvt->lock); } +#define IDLE_VGPU_IDR 0 + +/** + * intel_gvt_create_idle_vgpu - create an idle virtual GPU + * @gvt: GVT device + * + * This function is called when user wants to create an idle virtual GPU. + * + * Returns: + * pointer to intel_vgpu, error pointer if failed. + */ +struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + enum intel_engine_id i; + int ret; + + vgpu = vzalloc(sizeof(*vgpu)); + if (!vgpu) + return ERR_PTR(-ENOMEM); + + vgpu->id = IDLE_VGPU_IDR; + vgpu->gvt = gvt; + + for (i = 0; i < I915_NUM_ENGINES; i++) + INIT_LIST_HEAD(&vgpu->workload_q_head[i]); + + ret = intel_vgpu_init_sched_policy(vgpu); + if (ret) + goto out_free_vgpu; + + vgpu->active = false; + + return vgpu; + +out_free_vgpu: + vfree(vgpu); + return ERR_PTR(ret); +} + +/** + * intel_gvt_destroy_vgpu - destroy an idle virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to destroy an idle virtual GPU. + * + */ +void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) +{ + intel_vgpu_clean_sched_policy(vgpu); + vfree(vgpu); +} + static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params *param) { @@ -232,13 +336,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, mutex_lock(&gvt->lock); - ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL); + ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, + GFP_KERNEL); if (ret < 0) goto out_free_vgpu; vgpu->id = ret; vgpu->handle = param->handle; vgpu->gvt = gvt; + vgpu->sched_ctl.weight = param->weight; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); intel_vgpu_init_cfg_space(vgpu, param->primary); @@ -277,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_shadow_ctx; - vgpu->active = true; mutex_unlock(&gvt->lock); return vgpu; @@ -325,6 +430,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; param.fence_sz = type->fence; + param.weight = type->weight; param.resolution = type->resolution; /* XXX current param based on MB */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 47e707d83c4d..d689e511744e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1012,9 +1012,12 @@ static int gpu_state_release(struct inode *inode, struct file *file) static int i915_gpu_info_open(struct inode *inode, struct file *file) { + struct drm_i915_private *i915 = inode->i_private; struct i915_gpu_state *gpu; - gpu = i915_capture_gpu_state(inode->i_private); + intel_runtime_pm_get(i915); + gpu = i915_capture_gpu_state(i915); + intel_runtime_pm_put(i915); if (!gpu) return -ENOMEM; @@ -1459,16 +1462,14 @@ static int ironlake_drpc_info(struct seq_file *m) static int i915_forcewake_domains(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_uncore_forcewake_domain *fw_domain; + unsigned int tmp; - spin_lock_irq(&dev_priv->uncore.lock); - for_each_fw_domain(fw_domain, dev_priv) { + for_each_fw_domain(fw_domain, i915, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), - fw_domain->wake_count); - } - spin_unlock_irq(&dev_priv->uncore.lock); + READ_ONCE(fw_domain->wake_count)); return 0; } @@ -1938,9 +1939,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) { - seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", - ring->space, ring->head, ring->tail, - ring->last_retired_head); + seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", + ring->space, ring->head, ring->tail); } static int i915_context_status(struct seq_file *m, void *unused) @@ -2474,9 +2474,9 @@ static void i915_guc_client_info(struct seq_file *m, enum intel_engine_id id; uint64_t tot = 0; - seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", - client->priority, client->ctx_index, client->proc_desc_offset); - seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n", + seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", + client->priority, client->stage_id, client->proc_desc_offset); + seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", client->wq_size, client->wq_offset, client->wq_tail); @@ -2511,7 +2511,7 @@ static int i915_guc_info(struct seq_file *m, void *data) } seq_printf(m, "Doorbell map:\n"); - seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap); + seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); seq_printf(m, "GuC total action count: %llu\n", guc->action_count); @@ -4129,7 +4129,9 @@ i915_wedged_get(void *data, u64 *val) static int i915_wedged_set(void *data, u64 val) { - struct drm_i915_private *dev_priv = data; + struct drm_i915_private *i915 = data; + struct intel_engine_cs *engine; + unsigned int tmp; /* * There is no safeguard against this debugfs entry colliding @@ -4139,13 +4141,17 @@ i915_wedged_set(void *data, u64 val) * while it is writing to 'i915_wedged' */ - if (i915_reset_backoff(&dev_priv->gpu_error)) + if (i915_reset_backoff(&i915->gpu_error)) return -EAGAIN; - i915_handle_error(dev_priv, val, - "Manually setting wedged to %llu", val); + for_each_engine_masked(engine, i915, val, tmp) { + engine->hangcheck.seqno = intel_engine_get_seqno(engine); + engine->hangcheck.stalled = true; + } + + i915_handle_error(i915, val, "Manually setting wedged to %llu", val); - wait_on_bit(&dev_priv->gpu_error.flags, + wait_on_bit(&i915->gpu_error.flags, I915_RESET_HANDOFF, TASK_UNINTERRUPTIBLE); @@ -4173,10 +4179,6 @@ fault_irq_set(struct drm_i915_private *i915, if (err) goto err_unlock; - /* Retire to kick idle work */ - i915_gem_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); - *irq = val; mutex_unlock(&i915->drm.struct_mutex); @@ -4280,7 +4282,7 @@ i915_drop_caches_set(void *data, u64 val) goto unlock; } - if (val & (DROP_RETIRE | DROP_ACTIVE)) + if (val & DROP_RETIRE) i915_gem_retire_requests(dev_priv); lockdep_set_current_reclaim_state(GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 98b17070a123..3036d4835b0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -549,6 +549,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { static void i915_gem_fini(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->drm.struct_mutex); + intel_uc_fini_hw(dev_priv); i915_gem_cleanup_engines(dev_priv); i915_gem_context_fini(dev_priv); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -609,7 +610,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_gem_init(dev_priv); if (ret) - goto cleanup_irq; + goto cleanup_uc; intel_modeset_gem_init(dev); @@ -631,9 +632,9 @@ cleanup_gem: if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); +cleanup_uc: + intel_uc_fini_fw(dev_priv); cleanup_irq: - intel_guc_fini(dev_priv); - intel_huc_fini(dev_priv); drm_irq_uninstall(dev); intel_teardown_gmbus(dev_priv); cleanup_csr: @@ -1351,9 +1352,8 @@ void i915_driver_unload(struct drm_device *dev) /* Flush any outstanding unpin_work. */ drain_workqueue(dev_priv->wq); - intel_guc_fini(dev_priv); - intel_huc_fini(dev_priv); i915_gem_fini(dev_priv); + intel_uc_fini_fw(dev_priv); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); @@ -1469,8 +1469,6 @@ static int i915_drm_suspend(struct drm_device *dev) goto out; } - intel_guc_suspend(dev_priv); - intel_display_suspend(dev); intel_dp_mst_suspend(dev); @@ -2177,6 +2175,20 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } +static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv, + u32 mask, u32 val) +{ + /* The HW does not like us polling for PW_STATUS frequently, so + * use the sleeping loop rather than risk the busy spin within + * intel_wait_for_register(). + * + * Transitioning between RC6 states should be at most 2ms (see + * valleyview_enable_rps) so use a 3ms timeout. + */ + return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val, + 3); +} + int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) { u32 val; @@ -2205,8 +2217,9 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) { + u32 mask; u32 val; - int err = 0; + int err; val = I915_READ(VLV_GTLC_WAKE_CTRL); val &= ~VLV_GTLC_ALLOWWAKEREQ; @@ -2215,45 +2228,32 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) I915_WRITE(VLV_GTLC_WAKE_CTRL, val); POSTING_READ(VLV_GTLC_WAKE_CTRL); - err = intel_wait_for_register(dev_priv, - VLV_GTLC_PW_STATUS, - VLV_GTLC_ALLOWWAKEACK, - allow, - 1); + mask = VLV_GTLC_ALLOWWAKEACK; + val = allow ? mask : 0; + + err = vlv_wait_for_pw_status(dev_priv, mask, val); if (err) DRM_ERROR("timeout disabling GT waking\n"); return err; } -static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, - bool wait_for_on) +static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, + bool wait_for_on) { u32 mask; u32 val; - int err; mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; val = wait_for_on ? mask : 0; - if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) - return 0; - - DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", - onoff(wait_for_on), - I915_READ(VLV_GTLC_PW_STATUS)); /* * RC6 transitioning can be delayed up to 2 msec (see * valleyview_enable_rps), use 3 msec for safety. */ - err = intel_wait_for_register(dev_priv, - VLV_GTLC_PW_STATUS, mask, val, - 3); - if (err) + if (vlv_wait_for_pw_status(dev_priv, mask, val)) DRM_ERROR("timeout waiting for GT wells to go %s\n", onoff(wait_for_on)); - - return err; } static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) @@ -2274,7 +2274,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) * Bspec defines the following GT well on flags as debug only, so * don't treat them as hard failures. */ - (void)vlv_wait_for_gt_wells(dev_priv, false); + vlv_wait_for_gt_wells(dev_priv, false); mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a5947a496d0a..c9b0949f6c1a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -79,26 +79,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170320" -#define DRIVER_TIMESTAMP 1489994464 - -#undef WARN_ON -/* Many gcc seem to no see through this and fall over :( */ -#if 0 -#define WARN_ON(x) ({ \ - bool __i915_warn_cond = (x); \ - if (__builtin_constant_p(__i915_warn_cond)) \ - BUILD_BUG_ON(__i915_warn_cond); \ - WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) -#else -#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") -#endif - -#undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") - -#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ - (long) (x), __func__); +#define DRIVER_DATE "20170403" +#define DRIVER_TIMESTAMP 1491198738 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -703,9 +685,9 @@ enum forcewake_domain_id { }; enum forcewake_domains { - FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), - FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), - FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), + FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), + FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), + FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), FORCEWAKE_ALL = (FORCEWAKE_RENDER | FORCEWAKE_BLITTER | FORCEWAKE_MEDIA) @@ -732,21 +714,25 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv, - enum forcewake_domains domains); + enum forcewake_domains domains); void (*force_wake_put)(struct drm_i915_private *dev_priv, - enum forcewake_domains domains); - - uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); - uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); - uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); - uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); - - void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, - uint8_t val, bool trace); - void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, - uint16_t val, bool trace); - void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, - uint32_t val, bool trace); + enum forcewake_domains domains); + + uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + + void (*mmio_writeb)(struct drm_i915_private *dev_priv, + i915_reg_t r, uint8_t val, bool trace); + void (*mmio_writew)(struct drm_i915_private *dev_priv, + i915_reg_t r, uint16_t val, bool trace); + void (*mmio_writel)(struct drm_i915_private *dev_priv, + i915_reg_t r, uint32_t val, bool trace); }; struct intel_forcewake_range { @@ -770,32 +756,35 @@ struct intel_uncore { enum forcewake_domains fw_domains; enum forcewake_domains fw_domains_active; + u32 fw_set; + u32 fw_clear; + u32 fw_reset; + struct intel_uncore_forcewake_domain { - struct drm_i915_private *i915; enum forcewake_domain_id id; enum forcewake_domains mask; unsigned wake_count; struct hrtimer timer; i915_reg_t reg_set; - u32 val_set; - u32 val_clear; i915_reg_t reg_ack; - i915_reg_t reg_post; - u32 val_reset; } fw_domain[FW_DOMAIN_ID_COUNT]; int unclaimed_mmio_check; }; +#define __mask_next_bit(mask) ({ \ + int __idx = ffs(mask) - 1; \ + mask &= ~BIT(__idx); \ + __idx; \ +}) + /* Iterate over initialised fw domains */ -#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ - for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ - (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ - (domain__)++) \ - for_each_if ((mask__) & (domain__)->mask) +#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \ + for (tmp__ = (mask__); \ + tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) -#define for_each_fw_domain(domain__, dev_priv__) \ - for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) +#define for_each_fw_domain(domain__, dev_priv__, tmp__) \ + for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__) #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) #define CSR_VERSION_MAJOR(version) ((version) >> 16) @@ -846,6 +835,7 @@ struct intel_csr { func(has_resource_streamer); \ func(has_runtime_pm); \ func(has_snoop); \ + func(unfenced_needs_alignment); \ func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ @@ -2578,12 +2568,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) (id__)++) \ for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) -#define __mask_next_bit(mask) ({ \ - int __idx = ffs(mask) - 1; \ - mask &= ~BIT(__idx); \ - __idx; \ -}) - /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ @@ -3956,14 +3940,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define __raw_read(x, s) \ -static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ +static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg) \ { \ return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ } #define __raw_write(x, s) \ -static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ +static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg, uint##x##_t val) \ { \ write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 58e1db77d70e..532a577ff7a1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2321,7 +2321,7 @@ rebuild_st: st->nents = 0; for (i = 0; i < page_count; i++) { page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (IS_ERR(page)) { + if (unlikely(IS_ERR(page))) { i915_gem_shrink(dev_priv, page_count, I915_SHRINK_BOUND | @@ -2329,12 +2329,21 @@ rebuild_st: I915_SHRINK_PURGEABLE); page = shmem_read_mapping_page_gfp(mapping, i, gfp); } - if (IS_ERR(page)) { + if (unlikely(IS_ERR(page))) { + gfp_t reclaim; + /* We've tried hard to allocate the memory by reaping * our own buffer, now let the real VM do its job and * go down in flames if truly OOM. + * + * However, since graphics tend to be disposable, + * defer the oom here by reporting the ENOMEM back + * to userspace. */ - page = shmem_read_mapping_page(mapping, i); + reclaim = mapping_gfp_mask(mapping); + reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ + + page = shmem_read_mapping_page_gfp(mapping, i, reclaim); if (IS_ERR(page)) { ret = PTR_ERR(page); goto err_sg; @@ -2989,10 +2998,15 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv) lockdep_assert_held(&dev_priv->drm.struct_mutex); set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); + /* Retire completed requests first so the list of inflight/incomplete + * requests is accurate and we don't try and mark successful requests + * as in error during __i915_gem_set_wedged_BKL(). + */ + i915_gem_retire_requests(dev_priv); + stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); i915_gem_context_lost(dev_priv); - i915_gem_retire_requests(dev_priv); mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); } @@ -3098,9 +3112,7 @@ i915_gem_idle_work_handler(struct work_struct *work) * Wait for last execlists context complete, but bail out in case a * new request is submitted. */ - wait_for(READ_ONCE(dev_priv->gt.active_requests) || - intel_engines_are_idle(dev_priv), - 10); + wait_for(intel_engines_are_idle(dev_priv), 10); if (READ_ONCE(dev_priv->gt.active_requests)) return; @@ -3259,6 +3271,29 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) return 0; } +static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) +{ + return wait_for(intel_engine_is_idle(engine), timeout_ms); +} + +static int wait_for_engines(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + if (GEM_WARN_ON(wait_for_engine(engine, 50))) { + i915_gem_set_wedged(i915); + return -EIO; + } + + GEM_BUG_ON(intel_engine_get_seqno(engine) != + intel_engine_last_submit(engine)); + } + + return 0; +} + int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) { int ret; @@ -3273,13 +3308,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) if (ret) return ret; } + + i915_gem_retire_requests(i915); + GEM_BUG_ON(i915->gt.active_requests); + + ret = wait_for_engines(i915); } else { ret = wait_for_timeline(&i915->gt.global_timeline, flags); - if (ret) - return ret; } - return 0; + return ret; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -3307,8 +3345,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) * system agents we cannot reproduce this behaviour). */ wmb(); - if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) - POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { + if (intel_runtime_pm_get_if_in_use(dev_priv)) { + spin_lock_irq(&dev_priv->uncore.lock); + POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + spin_unlock_irq(&dev_priv->uncore.lock); + intel_runtime_pm_put(dev_priv); + } + } intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT)); @@ -4408,13 +4452,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) if (ret) goto err_unlock; - i915_gem_retire_requests(dev_priv); - GEM_BUG_ON(dev_priv->gt.active_requests); - assert_kernel_context_is_current(dev_priv); i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); + intel_guc_suspend(dev_priv); + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); cancel_delayed_work_sync(&dev_priv->gt.retire_work); diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index d925fb582ba7..ffd01e02fe94 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c @@ -168,7 +168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, i915_sw_fence_await_reservation(&clflush->wait, obj->resv, NULL, - false, I915_FENCE_TIMEOUT, + true, I915_FENCE_TIMEOUT, GFP_KERNEL); reservation_object_lock(obj->resv, NULL); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 486051ed681d..8bd0c4966913 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -576,25 +576,25 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) } static inline int -mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) +mi_set_context(struct drm_i915_gem_request *req, u32 flags) { struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine = req->engine; enum intel_engine_id id; - u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = - /* Use an extended w/a on ivb+ if signalling from other rings */ - i915.semaphores ? + /* Use an extended w/a on gen7 if signalling from other rings */ + (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len; + u32 *cs; - /* These flags are for resource streamer on HSW+ */ + flags |= MI_MM_SPACE_GTT; if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) - flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_GEN(dev_priv) < 8) - flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); - + /* These flags are for resource streamer on HSW+ */ + flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; + else + flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; len = 4; if (INTEL_GEN(dev_priv) >= 7) diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 11898cd97596..f225bf680b6d 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -200,10 +200,10 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, - .kmap = i915_gem_dmabuf_kmap, - .kmap_atomic = i915_gem_dmabuf_kmap_atomic, - .kunmap = i915_gem_dmabuf_kunmap, - .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic, + .map = i915_gem_dmabuf_kmap, + .map_atomic = i915_gem_dmabuf_kmap_atomic, + .unmap = i915_gem_dmabuf_kunmap, + .unmap_atomic = i915_gem_dmabuf_kunmap_atomic, .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 2da3a94fc9f3..51e365f70464 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -196,7 +196,6 @@ search_again: if (ret) return ret; - i915_gem_retire_requests(dev_priv); goto search_again; found: @@ -383,7 +382,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) if (ret) return ret; - i915_gem_retire_requests(dev_priv); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dd7181ed5eca..a3e59c8ef27b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -890,6 +890,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head ordered_vmas; struct list_head pinned_vmas; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; + bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment; int retry; vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; @@ -910,7 +911,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, if (!has_fenced_gpu_access) entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; need_fence = - entry->flags & EXEC_OBJECT_NEEDS_FENCE && + (entry->flags & EXEC_OBJECT_NEEDS_FENCE || + needs_unfenced_map) && i915_gem_object_is_tiled(obj); need_mappable = need_fence || need_reloc_mappable(vma); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cee9c4fec52a..8bab4aea63e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2364,7 +2364,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) { + if (i915_gem_wait_for_idle(dev_priv, 0)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 0e8d1010cecb..5ddbc9499775 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence) static const char *i915_fence_get_timeline_name(struct dma_fence *fence) { + /* The timeline struct (as part of the ppgtt underneath a context) + * may be freed when the request is no longer in use by the GPU. + * We could extend the life of a context to beyond that of all + * fences, possibly keeping the hw resource around indefinitely, + * or we just give them a false name. Since + * dma_fence_ops.get_timeline_name is a debug feature, the occasional + * lie seems justifiable. + */ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return "signaled"; + return to_request(fence)->timeline->common->name; } @@ -180,7 +191,6 @@ i915_priotree_init(struct i915_priotree *pt) static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) { - struct i915_gem_timeline *timeline = &i915->gt.global_timeline; struct intel_engine_cs *engine; enum intel_engine_id id; int ret; @@ -192,15 +202,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) if (ret) return ret; - i915_gem_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests > 1); - /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ for_each_engine(engine, i915, id) { - struct intel_timeline *tl = &timeline->engine[id]; - - if (wait_for(intel_engine_is_idle(engine), 50)) - return -EBUSY; + struct i915_gem_timeline *timeline; + struct intel_timeline *tl = engine->timeline; if (!i915_seqno_passed(seqno, tl->seqno)) { /* spin until threads are complete */ @@ -211,14 +216,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) /* Finally reset hw state */ tl->seqno = seqno; intel_engine_init_global_seqno(engine, seqno); - } - - list_for_each_entry(timeline, &i915->gt.timelines, link) { - for_each_engine(engine, i915, id) { - struct intel_timeline *tl = &timeline->engine[id]; - memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno)); - } + list_for_each_entry(timeline, &i915->gt.timelines, link) + memset(timeline->engine[id].sync_seqno, 0, + sizeof(timeline->engine[id].sync_seqno)); } return 0; @@ -295,7 +296,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) * completion order. */ list_del(&request->ring_link); - request->ring->last_retired_head = request->postfix; + request->ring->head = request->postfix; if (!--request->i915->gt.active_requests) { GEM_BUG_ON(!request->i915->gt.awake); mod_delayed_work(request->i915->wq, @@ -651,6 +652,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to, GEM_BUG_ON(to == from); + if (i915_gem_request_completed(from)) + return 0; + if (to->engine->schedule) { ret = i915_priotree_add_dependency(to->i915, &to->priotree, diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 2978acdd995e..129ed303a6c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) BUG(); } +static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) +{ + if (!unlock) + return; + + mutex_unlock(&dev->struct_mutex); + + /* expedite the RCU grace period to free some request slabs */ + synchronize_rcu_expedited(); +} + static bool any_vma_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; @@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, intel_runtime_pm_put(dev_priv); i915_gem_retire_requests(dev_priv); - if (unlock) - mutex_unlock(&dev_priv->drm.struct_mutex); - /* expedite the RCU grace period to free some request slabs */ - synchronize_rcu_expedited(); + i915_gem_shrinker_unlock(&dev_priv->drm, unlock); return count; } @@ -296,8 +304,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count += obj->base.size >> PAGE_SHIFT; } - if (unlock) - mutex_unlock(&dev->struct_mutex); + i915_gem_shrinker_unlock(dev, unlock); return count; } @@ -324,8 +331,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) sc->nr_to_scan - freed, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - if (unlock) - mutex_unlock(&dev->struct_mutex); + + i915_gem_shrinker_unlock(dev, unlock); return freed; } @@ -367,8 +374,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, struct shrinker_lock_uninterruptible *slu) { dev_priv->mm.interruptible = slu->was_interruptible; - if (slu->unlock) - mutex_unlock(&dev_priv->drm.struct_mutex); + i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock); } static int diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 832ac9e45801..1642fff9cf13 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -30,16 +30,25 @@ /** * DOC: GuC-based command submission * - * i915_guc_client: - * We use the term client to avoid confusion with contexts. A i915_guc_client is - * equivalent to GuC object guc_context_desc. This context descriptor is - * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell - * and workqueue for it. Also the process descriptor (guc_process_desc), which - * is mapped to client space. So the client can write Work Item then ring the - * doorbell. + * GuC client: + * A i915_guc_client refers to a submission path through GuC. Currently, there + * is only one of these (the execbuf_client) and this one is charged with all + * submissions to the GuC. This struct is the owner of a doorbell, a process + * descriptor and a workqueue (all of them inside a single gem object that + * contains all required pages for these elements). * - * To simplify the implementation, we allocate one gem object that contains all - * pages for doorbell, process descriptor and workqueue. + * GuC stage descriptor: + * During initialization, the driver allocates a static pool of 1024 such + * descriptors, and shares them with the GuC. + * Currently, there exists a 1:1 mapping between a i915_guc_client and a + * guc_stage_desc (via the client's stage_id), so effectively only one + * gets used. This stage descriptor lets the GuC know about the doorbell, + * workqueue and process descriptor. Theoretically, it also lets the GuC + * know about our HW contexts (context ID, etc...), but we actually + * employ a kind of submission where the GuC uses the LRCA sent via the work + * item instead (the single guc_stage_desc associated to execbuf client + * contains information about the default kernel context only, but this is + * essentially unused). This is called a "proxy" submission. * * The Scratch registers: * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes @@ -62,34 +71,91 @@ * ELSP context descriptor dword into Work Item. * See guc_wq_item_append() * + * ADS: + * The Additional Data Struct (ADS) has pointers for different buffers used by + * the GuC. One single gem object contains the ADS struct itself (guc_ads), the + * scheduling policies (guc_policies), a structure describing a collection of + * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save + * its internal state for sleep. + * */ +static inline bool is_high_priority(struct i915_guc_client* client) +{ + return client->priority <= GUC_CLIENT_PRIORITY_HIGH; +} + +static int __reserve_doorbell(struct i915_guc_client *client) +{ + unsigned long offset; + unsigned long end; + u16 id; + + GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); + + /* + * The bitmap tracks which doorbell registers are currently in use. + * It is split into two halves; the first half is used for normal + * priority contexts, the second half for high-priority ones. + */ + offset = 0; + end = GUC_NUM_DOORBELLS/2; + if (is_high_priority(client)) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end); + if (id == end) + return -ENOSPC; + + __set_bit(id, client->guc->doorbell_bitmap); + client->doorbell_id = id; + DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", + client->stage_id, yesno(is_high_priority(client)), + id); + return 0; +} + +static void __unreserve_doorbell(struct i915_guc_client *client) +{ + GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID); + + __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); + client->doorbell_id = GUC_DOORBELL_INVALID; +} + /* * Tell the GuC to allocate or deallocate a specific doorbell */ -static int guc_allocate_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) { u32 action[] = { INTEL_GUC_ACTION_ALLOCATE_DOORBELL, - client->ctx_index + stage_id }; return intel_guc_send(guc, action, ARRAY_SIZE(action)); } -static int guc_release_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) { u32 action[] = { INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, - client->ctx_index + stage_id }; return intel_guc_send(guc, action, ARRAY_SIZE(action)); } +static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client) +{ + struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; + + return &base[client->stage_id]; +} + /* * Initialise, update, or clear doorbell data shared with the GuC * @@ -97,107 +163,129 @@ static int guc_release_doorbell(struct intel_guc *guc, * client object which contains the page being used for the doorbell */ -static int guc_update_doorbell_id(struct intel_guc *guc, - struct i915_guc_client *client, - u16 new_id) +static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id) { - struct sg_table *sg = guc->ctx_pool_vma->pages; - void *doorbell_bitmap = guc->doorbell_bitmap; - struct guc_doorbell_info *doorbell; - struct guc_context_desc desc; - size_t len; + struct guc_stage_desc *desc; - doorbell = client->vaddr + client->doorbell_offset; + /* Update the GuC's idea of the doorbell ID */ + desc = __get_stage_desc(client); + desc->db_id = new_id; +} - if (client->doorbell_id != GUC_INVALID_DOORBELL_ID && - test_bit(client->doorbell_id, doorbell_bitmap)) { - /* Deactivate the old doorbell */ - doorbell->db_status = GUC_DOORBELL_DISABLED; - (void)guc_release_doorbell(guc, client); - __clear_bit(client->doorbell_id, doorbell_bitmap); - } +static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client) +{ + return client->vaddr + client->doorbell_offset; +} - /* Update the GuC's idea of the doorbell ID */ - len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), - sizeof(desc) * client->ctx_index); - if (len != sizeof(desc)) - return -EFAULT; - desc.db_id = new_id; - len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), - sizeof(desc) * client->ctx_index); - if (len != sizeof(desc)) - return -EFAULT; - - client->doorbell_id = new_id; - if (new_id == GUC_INVALID_DOORBELL_ID) - return 0; +static bool has_doorbell(struct i915_guc_client *client) +{ + if (client->doorbell_id == GUC_DOORBELL_INVALID) + return false; - /* Activate the new doorbell */ - __set_bit(new_id, doorbell_bitmap); + return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); +} + +static int __create_doorbell(struct i915_guc_client *client) +{ + struct guc_doorbell_info *doorbell; + int err; + + doorbell = __get_doorbell(client); doorbell->db_status = GUC_DOORBELL_ENABLED; doorbell->cookie = client->doorbell_cookie; - return guc_allocate_doorbell(guc, client); + + err = __guc_allocate_doorbell(client->guc, client->stage_id); + if (err) { + doorbell->db_status = GUC_DOORBELL_DISABLED; + doorbell->cookie = 0; + } + return err; } -static void guc_disable_doorbell(struct intel_guc *guc, - struct i915_guc_client *client) +static int __destroy_doorbell(struct i915_guc_client *client) { - (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID); + struct drm_i915_private *dev_priv = guc_to_i915(client->guc); + struct guc_doorbell_info *doorbell; + u16 db_id = client->doorbell_id; - /* XXX: wait for any interrupts */ - /* XXX: wait for workqueue to drain */ + GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); + + doorbell = __get_doorbell(client); + doorbell->db_status = GUC_DOORBELL_DISABLED; + doorbell->cookie = 0; + + /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit + * to go to zero after updating db_status before we call the GuC to + * release the doorbell */ + if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10)) + WARN_ONCE(true, "Doorbell never became invalid after disable\n"); + + return __guc_deallocate_doorbell(client->guc, client->stage_id); } -static uint16_t -select_doorbell_register(struct intel_guc *guc, uint32_t priority) +static int create_doorbell(struct i915_guc_client *client) { - /* - * The bitmap tracks which doorbell registers are currently in use. - * It is split into two halves; the first half is used for normal - * priority contexts, the second half for high-priority ones. - * Note that logically higher priorities are numerically less than - * normal ones, so the test below means "is it high-priority?" - */ - const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH); - const uint16_t half = GUC_MAX_DOORBELLS / 2; - const uint16_t start = hi_pri ? half : 0; - const uint16_t end = start + half; - uint16_t id; + int ret; - id = find_next_zero_bit(guc->doorbell_bitmap, end, start); - if (id == end) - id = GUC_INVALID_DOORBELL_ID; + ret = __reserve_doorbell(client); + if (ret) + return ret; - DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n", - hi_pri ? "high" : "normal", id); + __update_doorbell_desc(client, client->doorbell_id); + + ret = __create_doorbell(client); + if (ret) + goto err; + + return 0; - return id; +err: + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + __unreserve_doorbell(client); + return ret; } -/* - * Select, assign and relase doorbell cachelines - * - * These functions track which doorbell cachelines are in use. - * The data they manipulate is protected by the intel_guc_send lock. - */ +static int destroy_doorbell(struct i915_guc_client *client) +{ + int err; + + GEM_BUG_ON(!has_doorbell(client)); + + /* XXX: wait for any interrupts */ + /* XXX: wait for workqueue to drain */ + + err = __destroy_doorbell(client); + if (err) + return err; + + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + + __unreserve_doorbell(client); -static uint32_t select_doorbell_cacheline(struct intel_guc *guc) + return 0; +} + +static unsigned long __select_cacheline(struct intel_guc* guc) { - const uint32_t cacheline_size = cache_line_size(); - uint32_t offset; + unsigned long offset; /* Doorbell uses a single cache line within a page */ offset = offset_in_page(guc->db_cacheline); /* Moving to next cache line to reduce contention */ - guc->db_cacheline += cacheline_size; - - DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n", - offset, guc->db_cacheline, cacheline_size); + guc->db_cacheline += cache_line_size(); + DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", + offset, guc->db_cacheline, cache_line_size()); return offset; } +static inline struct guc_process_desc * +__get_process_desc(struct i915_guc_client *client) +{ + return client->vaddr + client->proc_desc_offset; +} + /* * Initialise the process descriptor shared with the GuC firmware. */ @@ -206,9 +294,7 @@ static void guc_proc_desc_init(struct intel_guc *guc, { struct guc_process_desc *desc; - desc = client->vaddr + client->proc_desc_offset; - - memset(desc, 0, sizeof(*desc)); + desc = memset(__get_process_desc(client), 0, sizeof(*desc)); /* * XXX: pDoorbell and WQVBaseAddress are pointers in process address @@ -219,42 +305,41 @@ static void guc_proc_desc_init(struct intel_guc *guc, desc->wq_base_addr = 0; desc->db_base_addr = 0; - desc->context_id = client->ctx_index; + desc->stage_id = client->stage_id; desc->wq_size_bytes = client->wq_size; desc->wq_status = WQ_STATUS_ACTIVE; desc->priority = client->priority; } /* - * Initialise/clear the context descriptor shared with the GuC firmware. + * Initialise/clear the stage descriptor shared with the GuC firmware. * * This descriptor tells the GuC where (in GGTT space) to find the important * data structures relating to this client (doorbell, process descriptor, * write queue, etc). */ - -static void guc_ctx_desc_init(struct intel_guc *guc, - struct i915_guc_client *client) +static void guc_stage_desc_init(struct intel_guc *guc, + struct i915_guc_client *client) { struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; struct i915_gem_context *ctx = client->owner; - struct guc_context_desc desc; - struct sg_table *sg; + struct guc_stage_desc *desc; unsigned int tmp; u32 gfx_addr; - memset(&desc, 0, sizeof(desc)); + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); - desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL; - desc.context_id = client->ctx_index; - desc.priority = client->priority; - desc.db_id = client->doorbell_id; + desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL; + desc->stage_id = client->stage_id; + desc->priority = client->priority; + desc->db_id = client->doorbell_id; for_each_engine_masked(engine, dev_priv, client->engines, tmp) { struct intel_context *ce = &ctx->engine[engine->id]; uint32_t guc_engine_id = engine->guc_id; - struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id]; + struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -266,12 +351,22 @@ static void guc_ctx_desc_init(struct intel_guc *guc, if (!ce->state) break; /* XXX: continue? */ + /* + * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy + * submission or, in other words, not using a direct submission + * model) the KMD's LRCA is not used for any work submission. + * Instead, the GuC uses the LRCA of the user mode context (see + * guc_wq_item_append below). + */ lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - lrc->ring_lcra = + lrc->ring_lrca = guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; - lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | + + /* XXX: In direct submission, the GuC wants the HW context id + * here. In proxy submission, it wants the stage id */ + lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) | (guc_engine_id << GUC_ELC_ENGINE_OFFSET); lrc->ring_begin = guc_ggtt_offset(ce->ring->vma); @@ -279,50 +374,36 @@ static void guc_ctx_desc_init(struct intel_guc *guc, lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << guc_engine_id); + desc->engines_used |= (1 << guc_engine_id); } DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", - client->engines, desc.engines_used); - WARN_ON(desc.engines_used == 0); + client->engines, desc->engines_used); + WARN_ON(desc->engines_used == 0); /* * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT */ gfx_addr = guc_ggtt_offset(client->vma); - desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + + desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + client->doorbell_offset; - desc.db_trigger_cpu = - (uintptr_t)client->vaddr + client->doorbell_offset; - desc.db_trigger_uk = gfx_addr + client->doorbell_offset; - desc.process_desc = gfx_addr + client->proc_desc_offset; - desc.wq_addr = gfx_addr + client->wq_offset; - desc.wq_size = client->wq_size; - - /* - * XXX: Take LRCs from an existing context if this is not an - * IsKMDCreatedContext client - */ - desc.desc_private = (uintptr_t)client; + desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); + desc->db_trigger_uk = gfx_addr + client->doorbell_offset; + desc->process_desc = gfx_addr + client->proc_desc_offset; + desc->wq_addr = gfx_addr + client->wq_offset; + desc->wq_size = client->wq_size; - /* Pool context is pinned already */ - sg = guc->ctx_pool_vma->pages; - sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), - sizeof(desc) * client->ctx_index); + desc->desc_private = (uintptr_t)client; } -static void guc_ctx_desc_fini(struct intel_guc *guc, - struct i915_guc_client *client) +static void guc_stage_desc_fini(struct intel_guc *guc, + struct i915_guc_client *client) { - struct guc_context_desc desc; - struct sg_table *sg; - - memset(&desc, 0, sizeof(desc)); + struct guc_stage_desc *desc; - sg = guc->ctx_pool_vma->pages; - sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc), - sizeof(desc) * client->ctx_index); + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); } /** @@ -345,8 +426,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request) { const size_t wqi_size = sizeof(struct guc_wq_item); struct i915_guc_client *client = request->i915->guc.execbuf_client; - struct guc_process_desc *desc = client->vaddr + - client->proc_desc_offset; + struct guc_process_desc *desc = __get_process_desc(client); u32 freespace; int ret; @@ -391,19 +471,17 @@ static void guc_wq_item_append(struct i915_guc_client *client, const size_t wqi_size = sizeof(struct guc_wq_item); const u32 wqi_len = wqi_size/sizeof(u32) - 1; struct intel_engine_cs *engine = rq->engine; - struct guc_process_desc *desc; + struct guc_process_desc *desc = __get_process_desc(client); struct guc_wq_item *wqi; u32 freespace, tail, wq_off; - desc = client->vaddr + client->proc_desc_offset; - /* Free space is guaranteed, see i915_guc_wq_reserve() above */ freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); GEM_BUG_ON(freespace < wqi_size); /* The GuC firmware wants the tail index in QWords, not bytes */ tail = rq->tail; - GEM_BUG_ON(tail & 7); + assert_ring_tail_valid(rq->ring, rq->tail); tail >>= 3; GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); @@ -436,19 +514,27 @@ static void guc_wq_item_append(struct i915_guc_client *client, /* The GuC wants only the low-order word of the context descriptor */ wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); - wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; + wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = rq->global_seqno; } +static void guc_reset_wq(struct i915_guc_client *client) +{ + struct guc_process_desc *desc = __get_process_desc(client); + + desc->head = 0; + desc->tail = 0; + + client->wq_tail = 0; +} + static int guc_ring_doorbell(struct i915_guc_client *client) { - struct guc_process_desc *desc; + struct guc_process_desc *desc = __get_process_desc(client); union guc_doorbell_qw db_cmp, db_exc, db_ret; union guc_doorbell_qw *db; int attempt = 2, ret = -EAGAIN; - desc = client->vaddr + client->proc_desc_offset; - /* Update the tail so it is visible to GuC */ desc->tail = client->wq_tail; @@ -463,7 +549,7 @@ static int guc_ring_doorbell(struct i915_guc_client *client) db_exc.cookie = 1; /* pointer of current doorbell cacheline */ - db = client->vaddr + client->doorbell_offset; + db = (union guc_doorbell_qw *)__get_doorbell(client); while (attempt--) { /* lets ring the doorbell */ @@ -573,23 +659,10 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) { struct execlist_port *port = engine->execlist_port; struct drm_i915_gem_request *last = port[0].request; - unsigned long flags; struct rb_node *rb; bool submit = false; - /* After execlist_first is updated, the tasklet will be rescheduled. - * - * If we are currently running (inside the tasklet) and a third - * party queues a request and so updates engine->execlist_first under - * the spinlock (which we have elided), it will atomically set the - * TASKLET_SCHED flag causing the us to be re-executed and pick up - * the change in state (the update to TASKLET_SCHED incurs a memory - * barrier making this cross-cpu checking safe). - */ - if (!READ_ONCE(engine->execlist_first)) - return false; - - spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock_irq(&engine->timeline->lock); rb = engine->execlist_first; while (rb) { struct drm_i915_gem_request *rq = @@ -609,8 +682,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) RB_CLEAR_NODE(&rq->priotree.node); rq->priotree.priority = INT_MAX; - trace_i915_gem_request_in(rq, port - engine->execlist_port); i915_guc_submit(rq); + trace_i915_gem_request_in(rq, port - engine->execlist_port); last = rq; submit = true; } @@ -619,7 +692,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) nested_enable_signaling(last); engine->execlist_first = rb; } - spin_unlock_irqrestore(&engine->timeline->lock, flags); + spin_unlock_irq(&engine->timeline->lock); return submit; } @@ -695,93 +768,100 @@ err: return vma; } -static void -guc_client_free(struct drm_i915_private *dev_priv, - struct i915_guc_client *client) +/* Check that a doorbell register is in the expected state */ +static bool doorbell_ok(struct intel_guc *guc, u16 db_id) { - struct intel_guc *guc = &dev_priv->guc; - - if (!client) - return; - - /* - * XXX: wait for any outstanding submissions before freeing memory. - * Be sure to drop any locks - */ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 drbregl; + bool valid; - if (client->vaddr) { - /* - * If we got as far as setting up a doorbell, make sure we - * shut it down before unmapping & deallocating the memory. - */ - guc_disable_doorbell(guc, client); + GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); - i915_gem_object_unpin_map(client->vma->obj); - } + drbregl = I915_READ(GEN8_DRBREGL(db_id)); + valid = drbregl & GEN8_DRB_VALID; - i915_vma_unpin_and_release(&client->vma); + if (test_bit(db_id, guc->doorbell_bitmap) == valid) + return true; - if (client->ctx_index != GUC_INVALID_CTX_ID) { - guc_ctx_desc_fini(guc, client); - ida_simple_remove(&guc->ctx_ids, client->ctx_index); - } + DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n", + db_id, drbregl, yesno(valid)); - kfree(client); + return false; } -/* Check that a doorbell register is in the expected state */ -static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id) +/* + * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and + * reloaded the GuC FW) we can use this function to tell the GuC to reassign the + * doorbell to the rightful owner. + */ +static int __reset_doorbell(struct i915_guc_client* client, u16 db_id) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - i915_reg_t drbreg = GEN8_DRBREGL(db_id); - uint32_t value = I915_READ(drbreg); - bool enabled = (value & GUC_DOORBELL_ENABLED) != 0; - bool expected = test_bit(db_id, guc->doorbell_bitmap); - - if (enabled == expected) - return true; + int err; - DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n", - db_id, drbreg.reg, value, - expected ? "active" : "inactive"); + __update_doorbell_desc(client, db_id); + err = __create_doorbell(client); + if (!err) + err = __destroy_doorbell(client); - return false; + return err; } /* - * Borrow the first client to set up & tear down each unused doorbell - * in turn, to ensure that all doorbell h/w is (re)initialised. + * Set up & tear down each unused doorbell in turn, to ensure that all doorbell + * HW is (re)initialised. For that end, we might have to borrow the first + * client. Also, tell GuC about all the doorbells in use by all clients. + * We do this because the KMD, the GuC and the doorbell HW can easily go out of + * sync (e.g. we can reset the GuC, but not the doorbel HW). */ -static void guc_init_doorbell_hw(struct intel_guc *guc) +static int guc_init_doorbell_hw(struct intel_guc *guc) { struct i915_guc_client *client = guc->execbuf_client; - uint16_t db_id; - int i, err; - - guc_disable_doorbell(guc, client); + bool recreate_first_client = false; + u16 db_id; + int ret; - for (i = 0; i < GUC_MAX_DOORBELLS; ++i) { - /* Skip if doorbell is OK */ - if (guc_doorbell_check(guc, i)) + /* For unused doorbells, make sure they are disabled */ + for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) { + if (doorbell_ok(guc, db_id)) continue; - err = guc_update_doorbell_id(guc, client, i); - if (err) - DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n", - i, err); + if (has_doorbell(client)) { + /* Borrow execbuf_client (we will recreate it later) */ + destroy_doorbell(client); + recreate_first_client = true; + } + + ret = __reset_doorbell(client, db_id); + WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret); } - db_id = select_doorbell_register(guc, client->priority); - WARN_ON(db_id == GUC_INVALID_DOORBELL_ID); + if (recreate_first_client) { + ret = __reserve_doorbell(client); + if (unlikely(ret)) { + DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret); + return ret; + } + + __update_doorbell_desc(client, client->doorbell_id); + } - err = guc_update_doorbell_id(guc, client, db_id); - if (err) - DRM_WARN("Failed to restore doorbell to %d, err %d\n", - db_id, err); + /* Now for every client (and not only execbuf_client) make sure their + * doorbells are known by the GuC */ + //for (client = client_list; client != NULL; client = client->next) + { + ret = __create_doorbell(client); + if (ret) { + DRM_ERROR("Couldn't recreate client %u doorbell: %d\n", + client->stage_id, ret); + return ret; + } + } - /* Read back & verify all doorbell registers */ - for (i = 0; i < GUC_MAX_DOORBELLS; ++i) - (void)guc_doorbell_check(guc, i); + /* Read back & verify all (used & unused) doorbell registers */ + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) + WARN_ON(!doorbell_ok(guc, db_id)); + + return 0; } /** @@ -807,49 +887,46 @@ guc_client_alloc(struct drm_i915_private *dev_priv, struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; void *vaddr; - uint16_t db_id; + int ret; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) - return NULL; + return ERR_PTR(-ENOMEM); - client->owner = ctx; client->guc = guc; + client->owner = ctx; client->engines = engines; client->priority = priority; - client->doorbell_id = GUC_INVALID_DOORBELL_ID; + client->doorbell_id = GUC_DOORBELL_INVALID; + client->wq_offset = GUC_DB_SIZE; + client->wq_size = GUC_WQ_SIZE; + spin_lock_init(&client->wq_lock); - client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0, - GUC_MAX_GPU_CONTEXTS, GFP_KERNEL); - if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) { - client->ctx_index = GUC_INVALID_CTX_ID; - goto err; - } + ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, + GFP_KERNEL); + if (ret < 0) + goto err_client; + + client->stage_id = ret; /* The first page is doorbell/proc_desc. Two followed pages are wq. */ vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); - if (IS_ERR(vma)) - goto err; + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_id; + } /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ client->vma = vma; vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) - goto err; - + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_vma; + } client->vaddr = vaddr; - spin_lock_init(&client->wq_lock); - client->wq_offset = GUC_DB_SIZE; - client->wq_size = GUC_WQ_SIZE; - - db_id = select_doorbell_register(guc, client->priority); - if (db_id == GUC_INVALID_DOORBELL_ID) - /* XXX: evict a doorbell instead? */ - goto err; - - client->doorbell_offset = select_doorbell_cacheline(guc); + client->doorbell_offset = __select_cacheline(guc); /* * Since the doorbell only requires a single cacheline, we can save @@ -862,28 +939,47 @@ guc_client_alloc(struct drm_i915_private *dev_priv, client->proc_desc_offset = (GUC_DB_SIZE / 2); guc_proc_desc_init(guc, client); - guc_ctx_desc_init(guc, client); + guc_stage_desc_init(guc, client); - /* For runtime client allocation we need to enable the doorbell. Not - * required yet for the static execbuf_client as this special kernel - * client is enabled from i915_guc_submission_enable(). - * - * guc_update_doorbell_id(guc, client, db_id); - */ + ret = create_doorbell(client); + if (ret) + goto err_vaddr; - DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n", - priority, client, client->engines, client->ctx_index); - DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n", - client->doorbell_id, client->doorbell_offset); + DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", + priority, client, client->engines, client->stage_id); + DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", + client->doorbell_id, client->doorbell_offset); return client; -err: - guc_client_free(dev_priv, client); - return NULL; +err_vaddr: + i915_gem_object_unpin_map(client->vma->obj); +err_vma: + i915_vma_unpin_and_release(&client->vma); +err_id: + ida_simple_remove(&guc->stage_ids, client->stage_id); +err_client: + kfree(client); + return ERR_PTR(ret); } +static void guc_client_free(struct i915_guc_client *client) +{ + /* + * XXX: wait for any outstanding submissions before freeing memory. + * Be sure to drop any locks + */ + /* FIXME: in many cases, by the time we get here the GuC has been + * reset, so we cannot destroy the doorbell properly. Ignore the + * error message for now */ + destroy_doorbell(client); + guc_stage_desc_fini(client->guc, client); + i915_gem_object_unpin_map(client->vma->obj); + i915_vma_unpin_and_release(&client->vma); + ida_simple_remove(&client->guc->stage_ids, client->stage_id); + kfree(client); +} static void guc_policies_init(struct guc_policies *policies) { @@ -893,7 +989,7 @@ static void guc_policies_init(struct guc_policies *policies) policies->dpc_promote_time = 500000; policies->max_num_work_items = POLICY_MAX_NUM_WI; - for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { + for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { policy = &policies->policy[p][i]; @@ -907,7 +1003,7 @@ static void guc_policies_init(struct guc_policies *policies) policies->is_valid = 1; } -static void guc_addon_create(struct intel_guc *guc) +static int guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); struct i915_vma *vma; @@ -923,14 +1019,13 @@ static void guc_addon_create(struct intel_guc *guc) enum intel_engine_id id; u32 base; - vma = guc->ads_vma; - if (!vma) { - vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); - if (IS_ERR(vma)) - return; + GEM_BUG_ON(guc->ads_vma); - guc->ads_vma = vma; - } + vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob))); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + guc->ads_vma = vma; page = i915_vma_first_page(vma); blob = kmap(page); @@ -940,11 +1035,11 @@ static void guc_addon_create(struct intel_guc *guc) /* MMIO reg state */ for_each_engine(engine, dev_priv, id) { - blob->reg_state.mmio_white_list[engine->guc_id].mmio_start = + blob->reg_state.white_list[engine->guc_id].mmio_start = engine->mmio_base + GUC_MMIO_WHITE_LIST_START; /* Nothing to be saved or restored for now. */ - blob->reg_state.mmio_white_list[engine->guc_id].count = 0; + blob->reg_state.white_list[engine->guc_id].count = 0; } /* @@ -967,67 +1062,75 @@ static void guc_addon_create(struct intel_guc *guc) blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); kunmap(page); + + return 0; +} + +static void guc_ads_destroy(struct intel_guc *guc) +{ + i915_vma_unpin_and_release(&guc->ads_vma); } /* - * Set up the memory resources to be shared with the GuC. At this point, - * we require just one object that can be mapped through the GGTT. + * Set up the memory resources to be shared with the GuC (via the GGTT) + * at firmware loading time. */ int i915_guc_submission_init(struct drm_i915_private *dev_priv) { - const size_t ctxsize = sizeof(struct guc_context_desc); - const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; - const size_t gemsize = round_up(poolsize, PAGE_SIZE); struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; + void *vaddr; + int ret; - if (!HAS_GUC_SCHED(dev_priv)) + if (guc->stage_desc_pool) return 0; - /* Wipe bitmap & delete client in case of reinitialisation */ - bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS); - i915_guc_submission_disable(dev_priv); - - if (!i915.enable_guc_submission) - return 0; /* not enabled */ - - if (guc->ctx_pool_vma) - return 0; /* already allocated */ - - vma = intel_guc_allocate_vma(guc, gemsize); + vma = intel_guc_allocate_vma(guc, + PAGE_ALIGN(sizeof(struct guc_stage_desc) * + GUC_MAX_STAGE_DESCRIPTORS)); if (IS_ERR(vma)) return PTR_ERR(vma); - guc->ctx_pool_vma = vma; - ida_init(&guc->ctx_ids); - intel_guc_log_create(guc); - guc_addon_create(guc); - - guc->execbuf_client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, - GUC_CTX_PRIORITY_KMD_NORMAL, - dev_priv->kernel_context); - if (!guc->execbuf_client) { - DRM_ERROR("Failed to create GuC client for execbuf!\n"); - goto err; + guc->stage_desc_pool = vma; + + vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_vma; } + guc->stage_desc_pool_vaddr = vaddr; + + ret = intel_guc_log_create(guc); + if (ret < 0) + goto err_vaddr; + + ret = guc_ads_create(guc); + if (ret < 0) + goto err_log; + + ida_init(&guc->stage_ids); + return 0; -err: - i915_guc_submission_fini(dev_priv); - return -ENOMEM; +err_log: + intel_guc_log_destroy(guc); +err_vaddr: + i915_gem_object_unpin_map(guc->stage_desc_pool->obj); +err_vma: + i915_vma_unpin_and_release(&guc->stage_desc_pool); + return ret; } -static void guc_reset_wq(struct i915_guc_client *client) +void i915_guc_submission_fini(struct drm_i915_private *dev_priv) { - struct guc_process_desc *desc = client->vaddr + - client->proc_desc_offset; - - desc->head = 0; - desc->tail = 0; + struct intel_guc *guc = &dev_priv->guc; - client->wq_tail = 0; + ida_destroy(&guc->stage_ids); + guc_ads_destroy(guc); + intel_guc_log_destroy(guc); + i915_gem_object_unpin_map(guc->stage_desc_pool->obj); + i915_vma_unpin_and_release(&guc->stage_desc_pool); } static void guc_interrupts_capture(struct drm_i915_private *dev_priv) @@ -1072,20 +1175,60 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; } +static void guc_interrupts_release(struct drm_i915_private *dev_priv) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int irqs; + + /* + * tell all command streamers NOT to forward interrupts or vblank + * to GuC. + */ + irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); + irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); + for_each_engine(engine, dev_priv, id) + I915_WRITE(RING_MODE_GEN7(engine), irqs); + + /* route all GT interrupts to the host */ + I915_WRITE(GUC_BCS_RCS_IER, 0); + I915_WRITE(GUC_VCS2_VCS1_IER, 0); + I915_WRITE(GUC_WD_VECS_IER, 0); + + dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; + dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; +} + int i915_guc_submission_enable(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine; enum intel_engine_id id; + int err; + + if (!client) { + client = guc_client_alloc(dev_priv, + INTEL_INFO(dev_priv)->ring_mask, + GUC_CLIENT_PRIORITY_KMD_NORMAL, + dev_priv->kernel_context); + if (IS_ERR(client)) { + DRM_ERROR("Failed to create GuC client for execbuf!\n"); + return PTR_ERR(client); + } - if (!client) - return -ENODEV; + guc->execbuf_client = client; + } - intel_guc_sample_forcewake(guc); + err = intel_guc_sample_forcewake(guc); + if (err) + goto err_execbuf_client; guc_reset_wq(client); - guc_init_doorbell_hw(guc); + + err = guc_init_doorbell_hw(guc); + if (err) + goto err_execbuf_client; /* Take over from manual control of ELSP (execlists) */ guc_interrupts_capture(dev_priv); @@ -1112,30 +1255,11 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) } return 0; -} -static void guc_interrupts_release(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int irqs; - - /* - * tell all command streamers NOT to forward interrupts or vblank - * to GuC. - */ - irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); - irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MODE_GEN7(engine), irqs); - - /* route all GT interrupts to the host */ - I915_WRITE(GUC_BCS_RCS_IER, 0); - I915_WRITE(GUC_VCS2_VCS1_IER, 0); - I915_WRITE(GUC_WD_VECS_IER, 0); - - dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; +err_execbuf_client: + guc_client_free(guc->execbuf_client); + guc->execbuf_client = NULL; + return err; } void i915_guc_submission_disable(struct drm_i915_private *dev_priv) @@ -1144,30 +1268,11 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) guc_interrupts_release(dev_priv); - if (!guc->execbuf_client) - return; - /* Revert back to manual ELSP submission */ intel_engines_reset_default_submission(dev_priv); -} - -void i915_guc_submission_fini(struct drm_i915_private *dev_priv) -{ - struct intel_guc *guc = &dev_priv->guc; - struct i915_guc_client *client; - client = fetch_and_zero(&guc->execbuf_client); - if (!client) - return; - - guc_client_free(dev_priv, client); - - i915_vma_unpin_and_release(&guc->ads_vma); - i915_vma_unpin_and_release(&guc->log.vma); - - if (guc->ctx_pool_vma) - ida_destroy(&guc->ctx_ids); - i915_vma_unpin_and_release(&guc->ctx_pool_vma); + guc_client_free(guc->execbuf_client); + guc->execbuf_client = NULL; } /** @@ -1196,7 +1301,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv) return intel_guc_send(guc, data, ARRAY_SIZE(data)); } - /** * intel_guc_resume() - notify GuC resuming from suspend state * @dev_priv: i915 device private @@ -1222,5 +1326,3 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) return intel_guc_send(guc, data, ARRAY_SIZE(data)); } - - diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8163d5024ff8..fd97fe00cd0d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1742,8 +1742,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); /* Handle flush interrupt in bottom half */ - queue_work(dev_priv->guc.log.flush_wq, - &dev_priv->guc.log.flush_work); + queue_work(dev_priv->guc.log.runtime.flush_wq, + &dev_priv->guc.log.runtime.flush_work); dev_priv->guc.log.flush_interrupt_count++; } else { @@ -4252,12 +4252,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev_priv->rps.pm_intrmsk_mbz = 0; /* - * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer * if GEN6_PM_UP_EI_EXPIRED is masked. * * TODO: verify if this can be reproduced on VLV,CHV. */ - if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + if (INTEL_INFO(dev_priv)->gen <= 7) dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; if (INTEL_INFO(dev_priv)->gen >= 8) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 732101ed57fb..f87b0c4e564d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -61,6 +61,7 @@ .has_overlay = 1, .overlay_needs_physical = 1, \ .has_gmch_display = 1, \ .hws_needs_physical = 1, \ + .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS @@ -102,6 +103,7 @@ static const struct intel_device_info intel_i915g_info = { .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i915gm_info = { @@ -113,6 +115,7 @@ static const struct intel_device_info intel_i915gm_info = { .supports_tv = 1, .has_fbc = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i945g_info = { @@ -121,6 +124,7 @@ static const struct intel_device_info intel_i945g_info = { .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_i945gm_info = { @@ -131,6 +135,7 @@ static const struct intel_device_info intel_i945gm_info = { .supports_tv = 1, .has_fbc = 1, .hws_needs_physical = 1, + .unfenced_needs_alignment = 1, }; static const struct intel_device_info intel_g33_info = { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 8c121187ff39..060b171480d5 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, */ if (WARN_ON(stream->sample_flags != props->sample_flags)) { ret = -ENODEV; - goto err_alloc; + goto err_flags; } list_add(&stream->link, &dev_priv->perf.streams); @@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, err_open: list_del(&stream->link); +err_flags: if (stream->ops->destroy) stream->ops->destroy(stream); err_alloc: @@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, if (ret) return ret; + if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) { + DRM_DEBUG("Unknown i915 perf property ID\n"); + return -EINVAL; + } + switch ((enum drm_i915_perf_property_id)id) { case DRM_I915_PERF_PROP_CTX_HANDLE: props->single_context = 1; @@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, props->oa_periodic = true; props->oa_period_exponent = value; break; - default: + case DRM_I915_PERF_PROP_MAX: MISSING_CASE(id); - DRM_DEBUG("Unknown i915 perf property ID\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 04c8f69fcc62..11b12f412492 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7829,7 +7829,14 @@ enum { #define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) #define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) +#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7) +#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6) #define TRANS_DDI_BFI_ENABLE (1<<4) +#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4) +#define TRANS_DDI_HDMI_SCRAMBLING (1<<0) +#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \ + | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ + | TRANS_DDI_HDMI_SCRAMBLING) /* DisplayPort Transport Control */ #define _DP_TP_CTL_A 0x64040 diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 94a3a3299910..c5455d36b617 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -25,6 +25,24 @@ #ifndef __I915_UTILS_H #define __I915_UTILS_H +#undef WARN_ON +/* Many gcc seem to no see through this and fall over :( */ +#if 0 +#define WARN_ON(x) ({ \ + bool __i915_warn_cond = (x); \ + if (__builtin_constant_p(__i915_warn_cond)) \ + BUILD_BUG_ON(__i915_warn_cond); \ + WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) +#else +#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") +#endif + +#undef WARN_ON_ONCE +#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") + +#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ + (long)(x), __func__) + #if GCC_VERSION >= 70000 #define add_overflows(A, B) \ __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0) diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index ba986edee312..9ccbf26124c6 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -47,11 +47,12 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b) unsigned int intel_engine_wakeup(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned long flags; unsigned int result; - spin_lock_irq(&b->irq_lock); + spin_lock_irqsave(&b->irq_lock, flags); result = __intel_breadcrumbs_wakeup(b); - spin_unlock_irq(&b->irq_lock); + spin_unlock_irqrestore(&b->irq_lock, flags); return result; } @@ -579,6 +580,8 @@ static int intel_breadcrumbs_signaler(void *arg) signaler_set_rtpriority(); do { + bool do_schedule = true; + set_current_state(TASK_INTERRUPTIBLE); /* We are either woken up by the interrupt bottom-half, @@ -625,9 +628,23 @@ static int intel_breadcrumbs_signaler(void *arg) spin_unlock_irq(&b->rb_lock); i915_gem_request_put(request); - } else { + + /* If the engine is saturated we may be continually + * processing completed requests. This angers the + * NMI watchdog if we never let anything else + * have access to the CPU. Let's pretend to be nice + * and relinquish the CPU if we burn through the + * entire RT timeslice! + */ + do_schedule = need_resched(); + } + + if (unlikely(do_schedule)) { DEFINE_WAIT(exec); + if (kthread_should_park()) + kthread_parkme(); + if (kthread_should_stop()) { GEM_BUG_ON(request); break; @@ -640,9 +657,6 @@ static int intel_breadcrumbs_signaler(void *arg) if (request) remove_wait_queue(&request->execute, &exec); - - if (kthread_should_park()) - kthread_parkme(); } i915_gem_request_put(request); } while (1); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index c2cc33f3d888..dd3ad52b7dfe 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1442,16 +1442,33 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); - /* BSpec says "Do not use DisplayPort with CDCLK less than - * 432 MHz, audio enabled, port width x4, and link rate - * HBR2 (5.4 GHz), or else there may be audio corruption or - * screen corruption." + /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, + * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else + * there may be audio corruption or screen corruption." This cdclk + * restriction for GLK is 316.8 MHz and since GLK can output two + * pixels per clock, the pixel rate becomes 2 * 316.8 MHz. */ if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio && crtc_state->port_clock >= 540000 && - crtc_state->lane_count == 4) - pixel_rate = max(432000, pixel_rate); + crtc_state->lane_count == 4) { + if (IS_GEMINILAKE(dev_priv)) + pixel_rate = max(2 * 316800, pixel_rate); + else + pixel_rate = max(432000, pixel_rate); + } + + /* According to BSpec, "The CD clock frequency must be at least twice + * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. + * The check for GLK has to be adjusted as the platform can output + * two pixels per clock. + */ + if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) { + if (IS_GEMINILAKE(dev_priv)) + pixel_rate = max(2 * 2 * 96000, pixel_rate); + else + pixel_rate = max(2 * 96000, pixel_rate); + } return pixel_rate; } diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 36832257cc9b..1575bde0cf90 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -49,7 +49,7 @@ MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_BXT); #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) -#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" +#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware" diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index d8214ba8da14..0914ad96a71b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -539,7 +539,7 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, * values in advance. This function programs the correct values for * DP/eDP/FDI use cases. */ -void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) +static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 iboost_bit = 0; @@ -806,7 +806,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc, DP_TP_CTL_ENABLE); } -void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) +static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *intel_dig_port = @@ -837,7 +837,8 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) return ret; } -static struct intel_encoder * +/* Finds the only possible encoder associated with the given CRTC. */ +struct intel_encoder * intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); @@ -1127,72 +1128,6 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, bxt_ddi_clock_get(encoder, pipe_config); } -static bool -hsw_ddi_pll_select(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) -{ - struct intel_shared_dpll *pll; - - pll = intel_get_shared_dpll(intel_crtc, crtc_state, - encoder); - if (!pll) - DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(intel_crtc->pipe)); - - return pll; -} - -static bool -skl_ddi_pll_select(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) -{ - struct intel_shared_dpll *pll; - - pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(intel_crtc->pipe)); - return false; - } - - return true; -} - -static bool -bxt_ddi_pll_select(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) -{ - return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder); -} - -/* - * Tries to find a *shared* PLL for the CRTC and store it in - * intel_crtc->ddi_pll_sel. - * - * For private DPLLs, compute_config() should do the selection for us. This - * function should be folded into compute_config() eventually. - */ -bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_encoder *encoder = - intel_ddi_get_crtc_new_encoder(crtc_state); - - if (IS_GEN9_BC(dev_priv)) - return skl_ddi_pll_select(intel_crtc, crtc_state, - encoder); - else if (IS_GEN9_LP(dev_priv)) - return bxt_ddi_pll_select(intel_crtc, crtc_state, - encoder); - else - return hsw_ddi_pll_select(intel_crtc, crtc_state, - encoder); -} - void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); @@ -1309,6 +1244,11 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) temp |= TRANS_DDI_MODE_SELECT_HDMI; else temp |= TRANS_DDI_MODE_SELECT_DVI; + + if (crtc_state->hdmi_scrambling) + temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK; + if (crtc_state->hdmi_high_tmds_clock_ratio) + temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE; } else if (type == INTEL_OUTPUT_ANALOG) { temp |= TRANS_DDI_MODE_SELECT_FDI; temp |= (crtc_state->fdi_lanes - 1) << 1; @@ -1676,8 +1616,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) return DDI_BUF_TRANS_SELECT(level); } -void intel_ddi_clk_select(struct intel_encoder *encoder, - struct intel_shared_dpll *pll) +static void intel_ddi_clk_select(struct intel_encoder *encoder, + struct intel_shared_dpll *pll) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_ddi_get_encoder_port(encoder); @@ -1881,6 +1821,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, if (type == INTEL_OUTPUT_HDMI) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio; + bool scrambling = pipe_config->hdmi_scrambling; + + intel_hdmi_handle_sink_scrambling(intel_encoder, + conn_state->connector, + clock_ratio, scrambling); /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides @@ -1914,6 +1860,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder, if (old_crtc_state->has_audio) intel_audio_codec_disable(intel_encoder); + if (type == INTEL_OUTPUT_HDMI) { + intel_hdmi_handle_sink_scrambling(intel_encoder, + old_conn_state->connector, + false, false); + } + if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2040,6 +1992,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder, if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; + + if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == + TRANS_DDI_HDMI_SCRAMBLING_MASK) + pipe_config->hdmi_scrambling = true; + if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE) + pipe_config->hdmi_high_tmds_clock_ratio = true; /* fall through */ case TRANS_DDI_MODE_SELECT_DVI: pipe_config->lane_count = 4; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e217d04133e6..3617927af269 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1997,7 +1997,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) unsigned int cpp = fb->format->cpp[plane]; switch (fb->modifier) { - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: return cpp; case I915_FORMAT_MOD_X_TILED: if (IS_GEN2(dev_priv)) @@ -2033,7 +2033,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) static unsigned int intel_tile_height(const struct drm_framebuffer *fb, int plane) { - if (fb->modifier == DRM_FORMAT_MOD_NONE) + if (fb->modifier == DRM_FORMAT_MOD_LINEAR) return 1; else return intel_tile_size(to_i915(fb->dev)) / @@ -2107,7 +2107,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, return 4096; switch (fb->modifier) { - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: return intel_linear_alignment(dev_priv); case I915_FORMAT_MOD_X_TILED: if (INTEL_GEN(dev_priv) >= 9) @@ -2290,7 +2290,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y, WARN_ON(new_offset > old_offset); - if (fb->modifier != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { unsigned int tile_size, tile_width, tile_height; unsigned int pitch_tiles; @@ -2345,7 +2345,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, if (alignment) alignment--; - if (fb_modifier != DRM_FORMAT_MOD_NONE) { + if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { unsigned int tile_size, tile_width, tile_height; unsigned int tile_rows, tiles, pitch_tiles; @@ -2471,7 +2471,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, DRM_ROTATE_0, tile_size); offset /= tile_size; - if (fb->modifier != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { unsigned int tile_width, tile_height; unsigned int pitch_tiles; struct drm_rect r; @@ -2803,7 +2803,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, int cpp = fb->format->cpp[plane]; switch (fb->modifier) { - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: switch (cpp) { case 8: @@ -2962,28 +2962,27 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; } -static void i9xx_update_primary_plane(struct drm_plane *primary, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(primary->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_framebuffer *fb = plane_state->base.fb; - int plane = intel_crtc->plane; - u32 linear_offset; - u32 dspcntr; - i915_reg_t reg = DSPCNTR(plane); + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; - int x = plane_state->base.src.x1 >> 16; - int y = plane_state->base.src.y1 >> 16; - unsigned long irqflags; + u32 dspcntr; - dspcntr = DISPPLANE_GAMMA_ENABLE; + dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPLAY_PLANE_ENABLE; + if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || + IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; if (INTEL_GEN(dev_priv) < 4) { - if (intel_crtc->pipe == PIPE_B) + if (crtc->pipe == PIPE_B) dspcntr |= DISPPLANE_SEL_PIPE_B; } @@ -3010,7 +3009,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, dspcntr |= DISPPLANE_RGBX101010; break; default: - BUG(); + MISSING_CASE(fb->format->format); + return 0; } if (INTEL_GEN(dev_priv) >= 4 && @@ -3023,25 +3023,66 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, if (rotation & DRM_REFLECT_X) dspcntr |= DISPPLANE_MIRROR; - if (IS_G4X(dev_priv)) - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + return dspcntr; +} - intel_add_fb_offsets(&x, &y, plane_state, 0); +int i9xx_check_plane_surface(struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + int src_x = plane_state->base.src.x1 >> 16; + int src_y = plane_state->base.src.y1 >> 16; + u32 offset; + + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); if (INTEL_GEN(dev_priv) >= 4) - intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, plane_state, 0); + offset = intel_compute_tile_offset(&src_x, &src_y, + plane_state, 0); + else + offset = 0; - if (rotation & DRM_ROTATE_180) { - x += crtc_state->pipe_src_w - 1; - y += crtc_state->pipe_src_h - 1; - } else if (rotation & DRM_REFLECT_X) { - x += crtc_state->pipe_src_w - 1; + /* HSW/BDW do this automagically in hardware */ + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { + unsigned int rotation = plane_state->base.rotation; + int src_w = drm_rect_width(&plane_state->base.src) >> 16; + int src_h = drm_rect_height(&plane_state->base.src) >> 16; + + if (rotation & DRM_ROTATE_180) { + src_x += src_w - 1; + src_y += src_h - 1; + } else if (rotation & DRM_REFLECT_X) { + src_x += src_w - 1; + } } + plane_state->main.offset = offset; + plane_state->main.x = src_x; + plane_state->main.y = src_y; + + return 0; +} + +static void i9xx_update_primary_plane(struct drm_plane *primary, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(primary->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_framebuffer *fb = plane_state->base.fb; + int plane = intel_crtc->plane; + u32 linear_offset; + u32 dspcntr = plane_state->ctl; + i915_reg_t reg = DSPCNTR(plane); + int x = plane_state->main.x; + int y = plane_state->main.y; + unsigned long irqflags; + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (INTEL_GEN(dev_priv) < 4) + if (INTEL_GEN(dev_priv) >= 4) + intel_crtc->dspaddr_offset = plane_state->main.offset; + else intel_crtc->dspaddr_offset = linear_offset; intel_crtc->adjusted_x = x; @@ -3068,7 +3109,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE_FW(reg, dspcntr); I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); - if (INTEL_GEN(dev_priv) >= 4) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + I915_WRITE_FW(DSPSURF(plane), + intel_plane_ggtt_offset(plane_state) + + intel_crtc->dspaddr_offset); + I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); + } else if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE_FW(DSPSURF(plane), intel_plane_ggtt_offset(plane_state) + intel_crtc->dspaddr_offset); @@ -3105,101 +3151,10 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void ironlake_update_primary_plane(struct drm_plane *primary, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_framebuffer *fb = plane_state->base.fb; - int plane = intel_crtc->plane; - u32 linear_offset; - u32 dspcntr; - i915_reg_t reg = DSPCNTR(plane); - unsigned int rotation = plane_state->base.rotation; - int x = plane_state->base.src.x1 >> 16; - int y = plane_state->base.src.y1 >> 16; - unsigned long irqflags; - - dspcntr = DISPPLANE_GAMMA_ENABLE; - dspcntr |= DISPLAY_PLANE_ENABLE; - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; - - switch (fb->format->format) { - case DRM_FORMAT_C8: - dspcntr |= DISPPLANE_8BPP; - break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_XRGB2101010: - dspcntr |= DISPPLANE_BGRX101010; - break; - case DRM_FORMAT_XBGR2101010: - dspcntr |= DISPPLANE_RGBX101010; - break; - default: - BUG(); - } - - if (fb->modifier == I915_FORMAT_MOD_X_TILED) - dspcntr |= DISPPLANE_TILED; - - if (rotation & DRM_ROTATE_180) - dspcntr |= DISPPLANE_ROTATE_180; - - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - - intel_add_fb_offsets(&x, &y, plane_state, 0); - - intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, plane_state, 0); - - /* HSW+ does this automagically in hardware */ - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) && - rotation & DRM_ROTATE_180) { - x += crtc_state->pipe_src_w - 1; - y += crtc_state->pipe_src_h - 1; - } - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - intel_crtc->adjusted_x = x; - intel_crtc->adjusted_y = y; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - I915_WRITE_FW(reg, dspcntr); - - I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); - I915_WRITE_FW(DSPSURF(plane), - intel_plane_ggtt_offset(plane_state) + - intel_crtc->dspaddr_offset); - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); - } else { - I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); - I915_WRITE_FW(DSPLINOFF(plane), linear_offset); - } - POSTING_READ_FW(reg); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - static u32 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) { - if (fb->modifier == DRM_FORMAT_MOD_NONE) + if (fb->modifier == DRM_FORMAT_MOD_LINEAR) return 64; else return intel_tile_width_bytes(fb, plane); @@ -3254,7 +3209,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, return stride; } -u32 skl_plane_ctl_format(uint32_t pixel_format) +static u32 skl_plane_ctl_format(uint32_t pixel_format) { switch (pixel_format) { case DRM_FORMAT_C8: @@ -3295,10 +3250,10 @@ u32 skl_plane_ctl_format(uint32_t pixel_format) return 0; } -u32 skl_plane_ctl_tiling(uint64_t fb_modifier) +static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) { switch (fb_modifier) { - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: break; case I915_FORMAT_MOD_X_TILED: return PLANE_CTL_TILED_X; @@ -3313,7 +3268,7 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier) return 0; } -u32 skl_plane_ctl_rotation(unsigned int rotation) +static u32 skl_plane_ctl_rotation(unsigned int rotation) { switch (rotation) { case DRM_ROTATE_0: @@ -3335,6 +3290,37 @@ u32 skl_plane_ctl_rotation(unsigned int rotation) return 0; } +u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 plane_ctl; + + plane_ctl = PLANE_CTL_ENABLE; + + if (!IS_GEMINILAKE(dev_priv)) { + plane_ctl |= + PLANE_CTL_PIPE_GAMMA_ENABLE | + PLANE_CTL_PIPE_CSC_ENABLE | + PLANE_CTL_PLANE_GAMMA_DISABLE; + } + + plane_ctl |= skl_plane_ctl_format(fb->format->format); + plane_ctl |= skl_plane_ctl_tiling(fb->modifier); + plane_ctl |= skl_plane_ctl_rotation(rotation); + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; + + return plane_ctl; +} + static void skylake_update_primary_plane(struct drm_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) @@ -3345,7 +3331,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_framebuffer *fb = plane_state->base.fb; enum plane_id plane_id = to_intel_plane(plane)->id; enum pipe pipe = to_intel_plane(plane)->pipe; - u32 plane_ctl; + u32 plane_ctl = plane_state->ctl; unsigned int rotation = plane_state->base.rotation; u32 stride = skl_plane_stride(fb, 0, rotation); u32 surf_addr = plane_state->main.offset; @@ -3360,19 +3346,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane, int dst_h = drm_rect_height(&plane_state->base.dst); unsigned long irqflags; - plane_ctl = PLANE_CTL_ENABLE; - - if (!IS_GEMINILAKE(dev_priv)) { - plane_ctl |= - PLANE_CTL_PIPE_GAMMA_ENABLE | - PLANE_CTL_PIPE_CSC_ENABLE | - PLANE_CTL_PLANE_GAMMA_DISABLE; - } - - plane_ctl |= skl_plane_ctl_format(fb->format->format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier); - plane_ctl |= skl_plane_ctl_rotation(rotation); - /* Sizes are 0 based */ src_w--; src_h--; @@ -6306,6 +6279,17 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) static void compute_m_n(unsigned int m, unsigned int n, uint32_t *ret_m, uint32_t *ret_n) { + /* + * Reduce M/N as much as possible without loss in precision. Several DP + * dongles in particular seem to be fussy about too large *link* M/N + * values. The passed in values are more likely to have the least + * significant bits zero than M after rounding below, so do this first. + */ + while ((m & 1) == 0 && (n & 1) == 0) { + m >>= 1; + n >>= 1; + } + *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); *ret_m = div_u64((uint64_t) m * *ret_n, n); intel_reduce_m_n_ratio(ret_m, ret_n); @@ -8395,7 +8379,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { case PLANE_CTL_TILED_LINEAR: - fb->modifier = DRM_FORMAT_MOD_NONE; + fb->modifier = DRM_FORMAT_MOD_LINEAR; break; case PLANE_CTL_TILED_X: plane_config->tiling = I915_TILING_X; @@ -8851,8 +8835,14 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { - if (!intel_ddi_pll_select(crtc, crtc_state)) + struct intel_encoder *encoder = + intel_ddi_get_crtc_new_encoder(crtc_state); + + if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(crtc->pipe)); return -EINVAL; + } } crtc->lowfreq_avail = false; @@ -9148,6 +9138,31 @@ out: return active; } +static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int width = plane_state->base.crtc_w; + unsigned int stride = roundup_pow_of_two(width) * 4; + + switch (stride) { + default: + WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", + width, stride); + stride = 256; + /* fallthrough */ + case 256: + case 512: + case 1024: + case 2048: + break; + } + + return CURSOR_ENABLE | + CURSOR_GAMMA_ENABLE | + CURSOR_FORMAT_ARGB | + CURSOR_STRIDE(stride); +} + static void i845_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { @@ -9159,26 +9174,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, if (plane_state && plane_state->base.visible) { unsigned int width = plane_state->base.crtc_w; unsigned int height = plane_state->base.crtc_h; - unsigned int stride = roundup_pow_of_two(width) * 4; - - switch (stride) { - default: - WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", - width, stride); - stride = 256; - /* fallthrough */ - case 256: - case 512: - case 1024: - case 2048: - break; - } - - cntl |= CURSOR_ENABLE | - CURSOR_GAMMA_ENABLE | - CURSOR_FORMAT_ARGB | - CURSOR_STRIDE(stride); + cntl = plane_state->ctl; size = (height << 12) | width; } @@ -9211,6 +9208,43 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, } } +static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + enum pipe pipe = crtc->pipe; + u32 cntl; + + cntl = MCURSOR_GAMMA_ENABLE; + + if (HAS_DDI(dev_priv)) + cntl |= CURSOR_PIPE_CSC_ENABLE; + + cntl |= pipe << 28; /* Connect to correct pipe */ + + switch (plane_state->base.crtc_w) { + case 64: + cntl |= CURSOR_MODE_64_ARGB_AX; + break; + case 128: + cntl |= CURSOR_MODE_128_ARGB_AX; + break; + case 256: + cntl |= CURSOR_MODE_256_ARGB_AX; + break; + default: + MISSING_CASE(plane_state->base.crtc_w); + return 0; + } + + if (plane_state->base.rotation & DRM_ROTATE_180) + cntl |= CURSOR_ROTATE_180; + + return cntl; +} + static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, const struct intel_plane_state *plane_state) { @@ -9220,30 +9254,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (plane_state && plane_state->base.visible) { - cntl = MCURSOR_GAMMA_ENABLE; - switch (plane_state->base.crtc_w) { - case 64: - cntl |= CURSOR_MODE_64_ARGB_AX; - break; - case 128: - cntl |= CURSOR_MODE_128_ARGB_AX; - break; - case 256: - cntl |= CURSOR_MODE_256_ARGB_AX; - break; - default: - MISSING_CASE(plane_state->base.crtc_w); - return; - } - cntl |= pipe << 28; /* Connect to correct pipe */ - - if (HAS_DDI(dev_priv)) - cntl |= CURSOR_PIPE_CSC_ENABLE; - - if (plane_state->base.rotation & DRM_ROTATE_180) - cntl |= CURSOR_ROTATE_180; - } + if (plane_state && plane_state->base.visible) + cntl = plane_state->ctl; if (intel_crtc->cursor_cntl != cntl) { I915_WRITE_FW(CURCNTR(pipe), cntl); @@ -10338,7 +10350,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; switch (fb->modifier) { - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: break; case I915_FORMAT_MOD_X_TILED: ctl |= PLANE_CTL_TILED_X; @@ -11692,6 +11704,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) PIPE_CONF_CHECK_I(limited_color_range); + + PIPE_CONF_CHECK_I(hdmi_scrambling); + PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_I(has_infoframe); PIPE_CONF_CHECK_I(has_audio); @@ -12992,17 +13007,6 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - /* - * The intel_legacy_cursor_update() fast path takes care - * of avoiding the vblank waits for simple cursor - * movement and flips. For cursor on/off and size changes, - * we want to perform the vblank waits so that watermark - * updates happen during the correct frames. Gen9+ have - * double buffered watermarks and so shouldn't need this. - */ - if (INTEL_GEN(dev_priv) < 9) - state->legacy_cursor_update = false; - ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; @@ -13018,6 +13022,26 @@ static int intel_atomic_commit(struct drm_device *dev, return ret; } + /* + * The intel_legacy_cursor_update() fast path takes care + * of avoiding the vblank waits for simple cursor + * movement and flips. For cursor on/off and size changes, + * we want to perform the vblank waits so that watermark + * updates happen during the correct frames. Gen9+ have + * double buffered watermarks and so shouldn't need this. + * + * Do this after drm_atomic_helper_setup_commit() and + * intel_atomic_prepare_commit() because we still want + * to skip the flip and fb cleanup waits. Although that + * does risk yanking the mapping from under the display + * engine. + * + * FIXME doing watermarks and fb cleanup from a vblank worker + * (assuming we had any) would solve these problems. + */ + if (INTEL_GEN(dev_priv) < 9) + state->legacy_cursor_update = false; + drm_atomic_helper_swap_state(state, true); dev_priv->wm.distrust_bios_wm = false; intel_shared_dpll_swap_state(state); @@ -13285,6 +13309,14 @@ intel_check_primary_plane(struct drm_plane *plane, ret = skl_check_plane_surface(state); if (ret) return ret; + + state->ctl = skl_plane_ctl(crtc_state, state); + } else { + ret = i9xx_check_plane_surface(state); + if (ret) + return ret; + + state->ctl = i9xx_plane_ctl(crtc_state, state); } return 0; @@ -13544,12 +13576,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; - } else if (HAS_PCH_SPLIT(dev_priv)) { - intel_primary_formats = i965_primary_formats; - num_formats = ARRAY_SIZE(i965_primary_formats); - - primary->update_plane = ironlake_update_primary_plane; - primary->disable_plane = i9xx_disable_primary_plane; } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -13621,6 +13647,7 @@ intel_check_cursor_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_framebuffer *fb = state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = to_intel_plane(plane)->pipe; @@ -13640,7 +13667,7 @@ intel_check_cursor_plane(struct drm_plane *plane, return 0; /* Check for which cursor types we support */ - if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w, + if (!cursor_size_ok(dev_priv, state->base.crtc_w, state->base.crtc_h)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", state->base.crtc_w, state->base.crtc_h); @@ -13653,7 +13680,7 @@ intel_check_cursor_plane(struct drm_plane *plane, return -ENOMEM; } - if (fb->modifier != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { DRM_DEBUG_KMS("cursor cannot be tiled\n"); return -EINVAL; } @@ -13668,12 +13695,17 @@ intel_check_cursor_plane(struct drm_plane *plane, * display power well must be turned off and on again. * Refuse the put the cursor into that compromised position. */ - if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C && + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && state->base.visible && state->base.crtc_x < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) + state->ctl = i845_cursor_ctl(crtc_state, state); + else + state->ctl = i9xx_cursor_ctl(crtc_state, state); + return 0; } @@ -14309,7 +14341,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, mode_cmd->modifier[0]); goto err; } - case DRM_FORMAT_MOD_NONE: + case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: break; default: @@ -14332,7 +14364,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, mode_cmd->pixel_format); if (mode_cmd->pitches[0] > pitch_limit) { DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", - mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? + mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? "tiled" : "linear", mode_cmd->pitches[0], pitch_limit); goto err; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6e04cb54e3ff..ee77b519835c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -4636,9 +4636,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) */ status = connector_status_disconnected; goto out; - } else if (connector->status == connector_status_connected) { + } else { + /* + * If display is now connected check links status, + * there has been known issues of link loss triggerring + * long pulse. + * + * Some sinks (eg. ASUS PB287Q) seem to perform some + * weird HPD ping pong during modesets. So we can apparently + * end up with HPD going low during a modeset, and then + * going back up soon after. And once that happens we must + * retrain the link to get a picture. That's in case no + * userspace component reacted to intermittent HPD dip. + */ intel_dp_check_link_status(intel_dp); - goto out; } /* diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fb7afcf9e8be..aaee3949a422 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -398,6 +398,9 @@ struct intel_plane_state { int x, y; } aux; + /* plane control register */ + u32 ctl; + /* * scaler_id * = -1 : not using a scaler @@ -729,6 +732,12 @@ struct intel_crtc_state { /* bitmask of visible planes (enum plane_id) */ u8 active_planes; + + /* HDMI scrambling status */ + bool hdmi_scrambling; + + /* HDMI High TMDS char rate ratio */ + bool hdmi_high_tmds_clock_ratio; }; struct intel_crtc { @@ -1220,12 +1229,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv); void intel_crt_reset(struct drm_encoder *encoder); /* intel_ddi.c */ -void intel_ddi_clk_select(struct intel_encoder *encoder, - struct intel_shared_dpll *pll); void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, struct intel_crtc_state *old_crtc_state, struct drm_connector_state *old_conn_state); -void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder); void hsw_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); @@ -1236,8 +1242,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); -bool intel_ddi_pll_select(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state); +struct intel_encoder * +intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); @@ -1246,7 +1252,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); -void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder); void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, @@ -1445,12 +1450,12 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) return i915_ggtt_offset(state->vma); } -u32 skl_plane_ctl_format(uint32_t pixel_format); -u32 skl_plane_ctl_tiling(uint64_t fb_modifier); -u32 skl_plane_ctl_rotation(unsigned int rotation); +u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, unsigned int rotation); int skl_check_plane_surface(struct intel_plane_state *plane_state); +int i9xx_check_plane_surface(struct intel_plane_state *plane_state); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); @@ -1620,6 +1625,10 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); +void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder, + struct drm_connector *connector, + bool high_tmds_clock_ratio, + bool scrambling); void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 4200faa520c7..854e8e0c836b 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -36,45 +36,45 @@ static const struct engine_info { int (*init_execlists)(struct intel_engine_cs *engine); } intel_engines[] = { [RCS] = { - .name = "render ring", - .exec_id = I915_EXEC_RENDER, + .name = "rcs", .hw_id = RCS_HW, + .exec_id = I915_EXEC_RENDER, .mmio_base = RENDER_RING_BASE, .irq_shift = GEN8_RCS_IRQ_SHIFT, .init_execlists = logical_render_ring_init, .init_legacy = intel_init_render_ring_buffer, }, [BCS] = { - .name = "blitter ring", - .exec_id = I915_EXEC_BLT, + .name = "bcs", .hw_id = BCS_HW, + .exec_id = I915_EXEC_BLT, .mmio_base = BLT_RING_BASE, .irq_shift = GEN8_BCS_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, .init_legacy = intel_init_blt_ring_buffer, }, [VCS] = { - .name = "bsd ring", - .exec_id = I915_EXEC_BSD, + .name = "vcs", .hw_id = VCS_HW, + .exec_id = I915_EXEC_BSD, .mmio_base = GEN6_BSD_RING_BASE, .irq_shift = GEN8_VCS1_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, .init_legacy = intel_init_bsd_ring_buffer, }, [VCS2] = { - .name = "bsd2 ring", - .exec_id = I915_EXEC_BSD, + .name = "vcs2", .hw_id = VCS2_HW, + .exec_id = I915_EXEC_BSD, .mmio_base = GEN8_BSD2_RING_BASE, .irq_shift = GEN8_VCS2_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, .init_legacy = intel_init_bsd2_ring_buffer, }, [VECS] = { - .name = "video enhancement ring", - .exec_id = I915_EXEC_VEBOX, + .name = "vecs", .hw_id = VECS_HW, + .exec_id = I915_EXEC_VEBOX, .mmio_base = VEBOX_RING_BASE, .irq_shift = GEN8_VECS_IRQ_SHIFT, .init_execlists = logical_xcs_ring_init, @@ -242,12 +242,12 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) void *semaphores; /* Semaphores are in noncoherent memory, flush to be safe */ - semaphores = kmap(page); + semaphores = kmap_atomic(page); memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), I915_NUM_ENGINES * gen8_semaphore_seqno_size); - kunmap(page); + kunmap_atomic(semaphores); } intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); @@ -1111,6 +1111,15 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; enum intel_engine_id id; + if (READ_ONCE(dev_priv->gt.active_requests)) + return false; + + /* If the driver is wedged, HW state may be very inconsistent and + * report that it is still busy, even though we have stopped using it. + */ + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return true; + for_each_engine(engine, dev_priv, id) { if (!intel_engine_is_idle(engine)) return false; diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 25691f0e4c50..cb36cbf3818f 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -26,14 +26,14 @@ #define GFXCORE_FAMILY_GEN9 12 #define GFXCORE_FAMILY_UNKNOWN 0x7fffffff -#define GUC_CTX_PRIORITY_KMD_HIGH 0 -#define GUC_CTX_PRIORITY_HIGH 1 -#define GUC_CTX_PRIORITY_KMD_NORMAL 2 -#define GUC_CTX_PRIORITY_NORMAL 3 -#define GUC_CTX_PRIORITY_NUM 4 +#define GUC_CLIENT_PRIORITY_KMD_HIGH 0 +#define GUC_CLIENT_PRIORITY_HIGH 1 +#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 +#define GUC_CLIENT_PRIORITY_NORMAL 3 +#define GUC_CLIENT_PRIORITY_NUM 4 -#define GUC_MAX_GPU_CONTEXTS 1024 -#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS +#define GUC_MAX_STAGE_DESCRIPTORS 1024 +#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS #define GUC_RENDER_ENGINE 0 #define GUC_VIDEO_ENGINE 1 @@ -68,14 +68,14 @@ #define GUC_DOORBELL_ENABLED 1 #define GUC_DOORBELL_DISABLED 0 -#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0) -#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1) -#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2) -#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3) -#define GUC_CTX_DESC_ATTR_RESET (1 << 4) -#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5) -#define GUC_CTX_DESC_ATTR_PCH (1 << 6) -#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7) +#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) +#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) +#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) +#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3) +#define GUC_STAGE_DESC_ATTR_RESET BIT(4) +#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5) +#define GUC_STAGE_DESC_ATTR_PCH BIT(6) +#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) /* The guc control data is 10 DWORDs */ #define GUC_CTL_CTXINFO 0 @@ -241,8 +241,8 @@ union guc_doorbell_qw { u64 value_qw; } __packed; -#define GUC_MAX_DOORBELLS 256 -#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS) +#define GUC_NUM_DOORBELLS 256 +#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS) #define GUC_DB_SIZE (PAGE_SIZE) #define GUC_WQ_SIZE (PAGE_SIZE * 2) @@ -251,12 +251,12 @@ union guc_doorbell_qw { struct guc_wq_item { u32 header; u32 context_desc; - u32 ring_tail; + u32 submit_element_info; u32 fence_id; } __packed; struct guc_process_desc { - u32 context_id; + u32 stage_id; u64 db_base_addr; u32 head; u32 tail; @@ -278,7 +278,7 @@ struct guc_execlist_context { u32 context_desc; u32 context_id; u32 ring_status; - u32 ring_lcra; + u32 ring_lrca; u32 ring_begin; u32 ring_end; u32 ring_next_free_location; @@ -289,10 +289,18 @@ struct guc_execlist_context { u16 engine_submit_queue_count; } __packed; -/*Context descriptor for communicating between uKernel and Driver*/ -struct guc_context_desc { +/* + * This structure describes a stage set arranged for a particular communication + * between uKernel (GuC) and Driver (KMD). Technically, this is known as a + * "GuC Context descriptor" in the specs, but we use the term "stage descriptor" + * to avoid confusion with all the other things already named "context" in the + * driver. A static pool of these descriptors are stored inside a GEM object + * (stage_desc_pool) which is held for the entire lifetime of our interaction + * with the GuC, being allocated before the GuC is loaded with its firmware. + */ +struct guc_stage_desc { u32 sched_common_area; - u32 context_id; + u32 stage_id; u32 pas_id; u8 engines_used; u64 db_trigger_cpu; @@ -359,7 +367,7 @@ struct guc_policy { } __packed; struct guc_policies { - struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; + struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; /* In micro seconds. How much time to allow before DPC processing is * called back via interrupt (to prevent DPC queue drain starving). @@ -401,16 +409,17 @@ struct guc_mmio_regset { u32 number_of_registers; } __packed; +/* MMIO registers that are set as non privileged */ +struct mmio_white_list { + u32 mmio_start; + u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; + u32 count; +} __packed; + struct guc_mmio_reg_state { struct guc_mmio_regset global_reg; struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; - - /* MMIO registers that are set as non privileged */ - struct __packed { - u32 mmio_start; - u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; - u32 count; - } mmio_white_list[GUC_MAX_ENGINES_NUM]; + struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM]; } __packed; /* GuC Additional Data Struct */ diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 2f270d02894c..8a1a023e48b2 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -73,22 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE); #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); -/* User-friendly representation of an enum */ -const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) -{ - switch (status) { - case INTEL_UC_FIRMWARE_FAIL: - return "FAIL"; - case INTEL_UC_FIRMWARE_NONE: - return "NONE"; - case INTEL_UC_FIRMWARE_PENDING: - return "PENDING"; - case INTEL_UC_FIRMWARE_SUCCESS: - return "SUCCESS"; - default: - return "UNKNOWN!"; - } -}; static u32 get_gttype(struct drm_i915_private *dev_priv) { @@ -148,16 +132,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) } else params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; - if (guc->ads_vma) { + /* If GuC submission is enabled, set up additional parameters here */ + if (i915.enable_guc_submission) { u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; + u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); + u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; + params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; - } - - /* If GuC submission is enabled, set up additional parameters here */ - if (i915.enable_guc_submission) { - u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma); - u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16; pgs >>= PAGE_SHIFT; params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) | @@ -430,24 +412,3 @@ int intel_guc_select_fw(struct intel_guc *guc) return 0; } - -/** - * intel_guc_fini() - clean up all allocated resources - * @dev_priv: i915 device private - */ -void intel_guc_fini(struct drm_i915_private *dev_priv) -{ - struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; - struct drm_i915_gem_object *obj; - - mutex_lock(&dev_priv->drm.struct_mutex); - i915_guc_submission_disable(dev_priv); - i915_guc_submission_fini(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - obj = fetch_and_zero(&guc_fw->obj); - if (obj) - i915_gem_object_put(obj); - - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; -} diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 5c0f9a49da0e..6fb63a3c65b0 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } - /* * Sub buffer switch callback. Called whenever relay has to switch to a new * sub buffer, relay stays on the same sub buffer if 0 is returned. @@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = { .remove_buf_file = remove_buf_file_callback, }; -static void guc_log_remove_relay_file(struct intel_guc *guc) -{ - relay_close(guc->log.relay_chan); -} - -static int guc_log_create_relay_channel(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct rchan *guc_log_relay_chan; - size_t n_subbufs, subbuf_size; - - /* Keep the size of sub buffers same as shared log buffer */ - subbuf_size = guc->log.vma->obj->base.size; - - /* Store up to 8 snapshots, which is large enough to buffer sufficient - * boot time logs and provides enough leeway to User, in terms of - * latency, for consuming the logs from relay. Also doesn't take - * up too much memory. - */ - n_subbufs = 8; - - guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size, - n_subbufs, &relay_callbacks, dev_priv); - if (!guc_log_relay_chan) { - DRM_ERROR("Couldn't create relay chan for GuC logging\n"); - return -ENOMEM; - } - - GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); - guc->log.relay_chan = guc_log_relay_chan; - return 0; -} - -static int guc_log_create_relay_file(struct intel_guc *guc) +static int guc_log_relay_file_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); struct dentry *log_dir; int ret; + if (i915.guc_log_level < 0) + return 0; + /* For now create the log file in /sys/kernel/debug/dri/0 dir */ log_dir = dev_priv->drm.primary->debugfs_root; @@ -197,8 +166,8 @@ static int guc_log_create_relay_file(struct intel_guc *guc) return -ENODEV; } - ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir); - if (ret) { + ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir); + if (ret < 0 && ret != -EEXIST) { DRM_ERROR("Couldn't associate relay chan with file %d\n", ret); return ret; } @@ -214,15 +183,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc) smp_wmb(); /* All data has been written, so now move the offset of sub buffer. */ - relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size); + relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size); /* Switch to the next sub buffer */ - relay_flush(guc->log.relay_chan); + relay_flush(guc->log.runtime.relay_chan); } static void *guc_get_write_buffer(struct intel_guc *guc) { - if (!guc->log.relay_chan) + if (!guc->log.runtime.relay_chan) return NULL; /* Just get the base address of a new sub buffer and copy data into it @@ -233,7 +202,7 @@ static void *guc_get_write_buffer(struct intel_guc *guc) * done without using relay_reserve() along with relay_write(). So its * better to use relay_reserve() alone. */ - return relay_reserve(guc->log.relay_chan, 0); + return relay_reserve(guc->log.runtime.relay_chan, 0); } static bool guc_check_log_buf_overflow(struct intel_guc *guc, @@ -284,11 +253,11 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) void *src_data, *dst_data; bool new_overflow; - if (WARN_ON(!guc->log.buf_addr)) + if (WARN_ON(!guc->log.runtime.buf_addr)) return; /* Get the pointer to shared GuC log buffer */ - log_buf_state = src_data = guc->log.buf_addr; + log_buf_state = src_data = guc->log.runtime.buf_addr; /* Get the pointer to local buffer to store the logs */ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc); @@ -371,153 +340,113 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) } } -static void guc_log_cleanup(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - /* First disable the flush interrupt */ - gen9_disable_guc_interrupts(dev_priv); - - if (guc->log.flush_wq) - destroy_workqueue(guc->log.flush_wq); - - guc->log.flush_wq = NULL; - - if (guc->log.relay_chan) - guc_log_remove_relay_file(guc); - - guc->log.relay_chan = NULL; - - if (guc->log.buf_addr) - i915_gem_object_unpin_map(guc->log.vma->obj); - - guc->log.buf_addr = NULL; -} - static void capture_logs_work(struct work_struct *work) { struct intel_guc *guc = - container_of(work, struct intel_guc, log.flush_work); + container_of(work, struct intel_guc, log.runtime.flush_work); guc_log_capture_logs(guc); } -static int guc_log_create_extras(struct intel_guc *guc) +static bool guc_log_has_runtime(struct intel_guc *guc) +{ + return guc->log.runtime.buf_addr != NULL; +} + +static int guc_log_runtime_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); void *vaddr; - int ret; + struct rchan *guc_log_relay_chan; + size_t n_subbufs, subbuf_size; + int ret = 0; lockdep_assert_held(&dev_priv->drm.struct_mutex); - /* Nothing to do */ - if (i915.guc_log_level < 0) - return 0; - - if (!guc->log.buf_addr) { - /* Create a WC (Uncached for read) vmalloc mapping of log - * buffer pages, so that we can directly get the data - * (up-to-date) from memory. - */ - vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - DRM_ERROR("Couldn't map log buffer pages %d\n", ret); - return ret; - } + GEM_BUG_ON(guc_log_has_runtime(guc)); - guc->log.buf_addr = vaddr; + /* Create a WC (Uncached for read) vmalloc mapping of log + * buffer pages, so that we can directly get the data + * (up-to-date) from memory. + */ + vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + DRM_ERROR("Couldn't map log buffer pages %d\n", ret); + return PTR_ERR(vaddr); } - if (!guc->log.relay_chan) { - /* Create a relay channel, so that we have buffers for storing - * the GuC firmware logs, the channel will be linked with a file - * later on when debugfs is registered. - */ - ret = guc_log_create_relay_channel(guc); - if (ret) - return ret; - } + guc->log.runtime.buf_addr = vaddr; - if (!guc->log.flush_wq) { - INIT_WORK(&guc->log.flush_work, capture_logs_work); - - /* - * GuC log buffer flush work item has to do register access to - * send the ack to GuC and this work item, if not synced before - * suspend, can potentially get executed after the GFX device is - * suspended. - * By marking the WQ as freezable, we don't have to bother about - * flushing of this work item from the suspend hooks, the pending - * work item if any will be either executed before the suspend - * or scheduled later on resume. This way the handling of work - * item can be kept same between system suspend & rpm suspend. - */ - guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log", - WQ_HIGHPRI | WQ_FREEZABLE); - if (guc->log.flush_wq == NULL) { - DRM_ERROR("Couldn't allocate the wq for GuC logging\n"); - return -ENOMEM; - } - } - - return 0; -} - -void intel_guc_log_create(struct intel_guc *guc) -{ - struct i915_vma *vma; - unsigned long offset; - uint32_t size, flags; + /* Keep the size of sub buffers same as shared log buffer */ + subbuf_size = guc->log.vma->obj->base.size; - if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) - i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; + /* Store up to 8 snapshots, which is large enough to buffer sufficient + * boot time logs and provides enough leeway to User, in terms of + * latency, for consuming the logs from relay. Also doesn't take + * up too much memory. + */ + n_subbufs = 8; - /* The first page is to save log buffer state. Allocate one - * extra page for others in case for overlap */ - size = (1 + GUC_LOG_DPC_PAGES + 1 + - GUC_LOG_ISR_PAGES + 1 + - GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; + /* Create a relay channel, so that we have buffers for storing + * the GuC firmware logs, the channel will be linked with a file + * later on when debugfs is registered. + */ + guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size, + n_subbufs, &relay_callbacks, dev_priv); + if (!guc_log_relay_chan) { + DRM_ERROR("Couldn't create relay chan for GuC logging\n"); - vma = guc->log.vma; - if (!vma) { - /* We require SSE 4.1 for fast reads from the GuC log buffer and - * it should be present on the chipsets supporting GuC based - * submisssions. - */ - if (WARN_ON(!i915_has_memcpy_from_wc())) { - /* logging will not be enabled */ - i915.guc_log_level = -1; - return; - } + ret = -ENOMEM; + goto err_vaddr; + } - vma = intel_guc_allocate_vma(guc, size); - if (IS_ERR(vma)) { - /* logging will be off */ - i915.guc_log_level = -1; - return; - } + GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); + guc->log.runtime.relay_chan = guc_log_relay_chan; + + INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work); + + /* + * GuC log buffer flush work item has to do register access to + * send the ack to GuC and this work item, if not synced before + * suspend, can potentially get executed after the GFX device is + * suspended. + * By marking the WQ as freezable, we don't have to bother about + * flushing of this work item from the suspend hooks, the pending + * work item if any will be either executed before the suspend + * or scheduled later on resume. This way the handling of work + * item can be kept same between system suspend & rpm suspend. + */ + guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log", + WQ_HIGHPRI | WQ_FREEZABLE); + if (!guc->log.runtime.flush_wq) { + DRM_ERROR("Couldn't allocate the wq for GuC logging\n"); + ret = -ENOMEM; + goto err_relaychan; + } - guc->log.vma = vma; + return 0; - if (guc_log_create_extras(guc)) { - guc_log_cleanup(guc); - i915_vma_unpin_and_release(&guc->log.vma); - i915.guc_log_level = -1; - return; - } - } +err_relaychan: + relay_close(guc->log.runtime.relay_chan); +err_vaddr: + i915_gem_object_unpin_map(guc->log.vma->obj); + guc->log.runtime.buf_addr = NULL; + return ret; +} - /* each allocated unit is a page */ - flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | - (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | - (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | - (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); +static void guc_log_runtime_destroy(struct intel_guc *guc) +{ + /* + * It's possible that the runtime stuff was never allocated because + * guc_log_level was < 0 at the time + **/ + if (!guc_log_has_runtime(guc)) + return; - offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ - guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; + destroy_workqueue(guc->log.runtime.flush_wq); + relay_close(guc->log.runtime.relay_chan); + i915_gem_object_unpin_map(guc->log.vma->obj); + guc->log.runtime.buf_addr = NULL; } static int guc_log_late_setup(struct intel_guc *guc) @@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc) lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (i915.guc_log_level < 0) - return -EINVAL; - - /* If log_level was set as -1 at boot time, then setup needed to - * handle log buffer flush interrupts would not have been done yet, - * so do that now. - */ - ret = guc_log_create_extras(guc); - if (ret) - goto err; + if (!guc_log_has_runtime(guc)) { + /* If log_level was set as -1 at boot time, then setup needed to + * handle log buffer flush interrupts would not have been done yet, + * so do that now. + */ + ret = guc_log_runtime_create(guc); + if (ret) + goto err; + } - ret = guc_log_create_relay_file(guc); + ret = guc_log_relay_file_create(guc); if (ret) - goto err; + goto err_runtime; return 0; + +err_runtime: + guc_log_runtime_destroy(guc); err: - guc_log_cleanup(guc); /* logging will remain off */ i915.guc_log_level = -1; return ret; @@ -577,7 +507,7 @@ static void guc_flush_logs(struct intel_guc *guc) /* Before initiating the forceful flush, wait for any pending/ongoing * flush to complete otherwise forceful flush may not actually happen. */ - flush_work(&guc->log.flush_work); + flush_work(&guc->log.runtime.flush_work); /* Ask GuC to update the log buffer state */ guc_log_flush(guc); @@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc) guc_log_capture_logs(guc); } +int intel_guc_log_create(struct intel_guc *guc) +{ + struct i915_vma *vma; + unsigned long offset; + uint32_t size, flags; + int ret; + + GEM_BUG_ON(guc->log.vma); + + if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) + i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; + + /* The first page is to save log buffer state. Allocate one + * extra page for others in case for overlap */ + size = (1 + GUC_LOG_DPC_PAGES + 1 + + GUC_LOG_ISR_PAGES + 1 + + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT; + + /* We require SSE 4.1 for fast reads from the GuC log buffer and + * it should be present on the chipsets supporting GuC based + * submisssions. + */ + if (WARN_ON(!i915_has_memcpy_from_wc())) { + ret = -EINVAL; + goto err; + } + + vma = intel_guc_allocate_vma(guc, size); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + guc->log.vma = vma; + + if (i915.guc_log_level >= 0) { + ret = guc_log_runtime_create(guc); + if (ret < 0) + goto err_vma; + } + + /* each allocated unit is a page */ + flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | + (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | + (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | + (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); + + offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ + guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; + + return 0; + +err_vma: + i915_vma_unpin_and_release(&guc->log.vma); +err: + /* logging will be off */ + i915.guc_log_level = -1; + return ret; +} + +void intel_guc_log_destroy(struct intel_guc *guc) +{ + guc_log_runtime_destroy(guc); + i915_vma_unpin_and_release(&guc->log.vma); +} + int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) { struct intel_guc *guc = &dev_priv->guc; @@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) return ret; } - i915.guc_log_level = log_param.verbosity; + if (log_param.logging_enabled) { + i915.guc_log_level = log_param.verbosity; - /* If log_level was set as -1 at boot time, then the relay channel file - * wouldn't have been created by now and interrupts also would not have - * been enabled. - */ - if (!dev_priv->guc.log.relay_chan) { + /* If log_level was set as -1 at boot time, then the relay channel file + * wouldn't have been created by now and interrupts also would not have + * been enabled. Try again now, just in case. + */ ret = guc_log_late_setup(guc); - if (!ret) - gen9_enable_guc_interrupts(dev_priv); - } else if (!log_param.logging_enabled) { + if (ret < 0) { + DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret); + return ret; + } + + /* GuC logging is currently the only user of Guc2Host interrupts */ + gen9_enable_guc_interrupts(dev_priv); + } else { /* Once logging is disabled, GuC won't generate logs & send an * interrupt. But there could be some data in the log buffer * which is yet to be captured. So request GuC to update the log @@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) /* As logging is disabled, update log level to reflect that */ i915.guc_log_level = -1; - } else { - /* In case interrupts were disabled, enable them now */ - gen9_enable_guc_interrupts(dev_priv); } return ret; @@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) void i915_guc_log_register(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission) + if (!i915.enable_guc_submission || i915.guc_log_level < 0) return; mutex_lock(&dev_priv->drm.struct_mutex); @@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv) return; mutex_lock(&dev_priv->drm.struct_mutex); - guc_log_cleanup(&dev_priv->guc); + /* GuC logging is currently the only user of Guc2Host interrupts */ + gen9_disable_guc_interrupts(dev_priv); + guc_log_runtime_destroy(&dev_priv->guc); mutex_unlock(&dev_priv->drm.struct_mutex); } diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 8c04eca84351..e1ab6432a914 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -45,6 +45,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_SKYLAKE(dev_priv)) return true; + if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) + return true; return false; } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 3eec74ca5116..1d623b5e09d6 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -34,6 +34,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_scdc_helper.h> #include "intel_drv.h" #include <drm/i915_drm.h> #include <drm/intel_lpe_audio.h> @@ -1208,6 +1209,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) return 165000; + else if (IS_GEMINILAKE(dev_priv)) + return 594000; else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) return 300000; else @@ -1334,6 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc; int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; int clock_12bpc = clock_8bpc * 3 / 2; int desired_bpp; @@ -1403,6 +1407,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->lane_count = 4; + if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) { + if (scdc->scrambling.low_rates) + pipe_config->hdmi_scrambling = true; + + if (pipe_config->port_clock > 340000) { + pipe_config->hdmi_scrambling = true; + pipe_config->hdmi_high_tmds_clock_ratio = true; + } + } + return true; } @@ -1812,6 +1826,57 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } +/* + * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup + * @encoder: intel_encoder + * @connector: drm_connector + * @high_tmds_clock_ratio = bool to indicate if the function needs to set + * or reset the high tmds clock ratio for scrambling + * @scrambling: bool to Indicate if the function needs to set or reset + * sink scrambling + * + * This function handles scrambling on HDMI 2.0 capable sinks. + * If required clock rate is > 340 Mhz && scrambling is supported by sink + * it enables scrambling. This should be called before enabling the HDMI + * 2.0 port, as the sink can choose to disable the scrambling if it doesn't + * detect a scrambled clock within 100 ms. + */ +void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, + struct drm_connector *connector, + bool high_tmds_clock_ratio, + bool scrambling) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct drm_i915_private *dev_priv = connector->dev->dev_private; + struct drm_scrambling *sink_scrambling = + &connector->display_info.hdmi.scdc.scrambling; + struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv, + intel_hdmi->ddc_bus); + bool ret; + + if (!sink_scrambling->supported) + return; + + DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n", + encoder->base.name, connector->name); + + /* Set TMDS bit clock ratio to 1/40 or 1/10 */ + ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio); + if (!ret) { + DRM_ERROR("Set TMDS ratio failed\n"); + return; + } + + /* Enable/disable sink scrambling */ + ret = drm_scdc_set_scrambling(adptr, scrambling); + if (!ret) { + DRM_ERROR("Set sink scrambling failed\n"); + return; + } + + DRM_DEBUG_KMS("sink scrambling handled\n"); +} + static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 7af900bcdc05..9ee819666a4c 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -251,24 +251,6 @@ fail: } /** - * intel_huc_fini() - clean up resources allocated for HuC - * @dev_priv: the drm_i915_private device - * - * Cleans up by releasing the huc firmware GEM obj. - */ -void intel_huc_fini(struct drm_i915_private *dev_priv) -{ - struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; - struct drm_i915_gem_object *obj; - - obj = fetch_and_zero(&huc_fw->obj); - if (obj) - i915_gem_object_put(obj); - - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; -} - -/** * intel_guc_auth_huc() - authenticate ucode * @dev_priv: the drm_i915_device * diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 7a5b41b1c024..25d8e76489e4 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -131,8 +131,15 @@ err: static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) { + /* XXX Note that platform_device_register_full() allocates a dma_mask + * and never frees it. We can't free it here as we cannot guarantee + * this is the last reference (i.e. that the dma_mask will not be + * used after our unregister). So ee choose to leak the sizeof(u64) + * allocation here - it should be fixed in the platform_device rather + * than us fiddle with its internals. + */ + platform_device_unregister(dev_priv->lpe_audio.platdev); - kfree(dev_priv->lpe_audio.platdev->dev.dma_mask); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -331,6 +338,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) * audio driver and i915 * @dev_priv: the i915 drm device private data * @eld : ELD data + * @pipe: pipe id * @port: port id * @tmds_clk_speed: tmds clock frequency in Hz * diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 77168e673e0a..c8f7c631fc1f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -326,7 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; - GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8)); + assert_ring_tail_valid(rq->ring, rq->tail); reg_state[CTX_RING_TAIL+1] = rq->tail; /* True 32b PPGTT with dynamic page allocation: update PDP @@ -399,22 +399,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *last; struct execlist_port *port = engine->execlist_port; - unsigned long flags; struct rb_node *rb; bool submit = false; - /* After execlist_first is updated, the tasklet will be rescheduled. - * - * If we are currently running (inside the tasklet) and a third - * party queues a request and so updates engine->execlist_first under - * the spinlock (which we have elided), it will atomically set the - * TASKLET_SCHED flag causing the us to be re-executed and pick up - * the change in state (the update to TASKLET_SCHED incurs a memory - * barrier making this cross-cpu checking safe). - */ - if (!READ_ONCE(engine->execlist_first)) - return; - last = port->request; if (last) /* WaIdleLiteRestore:bdw,skl @@ -448,7 +435,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * and context switches) submission. */ - spin_lock_irqsave(&engine->timeline->lock, flags); + spin_lock_irq(&engine->timeline->lock); rb = engine->execlist_first; while (rb) { struct drm_i915_gem_request *cursor = @@ -500,7 +487,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) i915_gem_request_assign(&port->request, last); engine->execlist_first = rb; } - spin_unlock_irqrestore(&engine->timeline->lock, flags); + spin_unlock_irq(&engine->timeline->lock); if (submit) execlists_submit_ports(engine); @@ -530,24 +517,36 @@ static void intel_lrc_irq_handler(unsigned long data) intel_uncore_forcewake_get(dev_priv, engine->fw_domains); - while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { + /* Prefer doing test_and_clear_bit() as a two stage operation to avoid + * imposing the cost of a locked atomic transaction when submitting a + * new request (outside of the context-switch interrupt). + */ + while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { u32 __iomem *csb_mmio = dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); u32 __iomem *buf = dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); - unsigned int csb, head, tail; - - csb = readl(csb_mmio); - head = GEN8_CSB_READ_PTR(csb); - tail = GEN8_CSB_WRITE_PTR(csb); - if (head == tail) - break; + unsigned int head, tail; + + /* The write will be ordered by the uncached read (itself + * a memory barrier), so we do not need another in the form + * of a locked instruction. The race between the interrupt + * handler and the split test/clear is harmless as we order + * our clear before the CSB read. If the interrupt arrived + * first between the test and the clear, we read the updated + * CSB and clear the bit. If the interrupt arrives as we read + * the CSB or later (i.e. after we had cleared the bit) the bit + * is set and we do a new loop. + */ + __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + head = readl(csb_mmio); + tail = GEN8_CSB_WRITE_PTR(head); + head = GEN8_CSB_READ_PTR(head); + while (head != tail) { + unsigned int status; - if (tail < head) - tail += GEN8_CSB_ENTRIES; - do { - unsigned int idx = ++head % GEN8_CSB_ENTRIES; - unsigned int status = readl(buf + 2 * idx); + if (++head == GEN8_CSB_ENTRIES) + head = 0; /* We are flying near dragons again. * @@ -566,11 +565,12 @@ static void intel_lrc_irq_handler(unsigned long data) * status notifier. */ + status = readl(buf + 2 * head); if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) continue; /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) != + GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) != port[0].context_id); GEM_BUG_ON(port[0].count == 0); @@ -588,10 +588,9 @@ static void intel_lrc_irq_handler(unsigned long data) GEM_BUG_ON(port[0].count == 0 && !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); - } while (head < tail); + } - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, - GEN8_CSB_WRITE_PTR(csb) << 8), + writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), csb_mmio); } @@ -647,15 +646,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) static struct intel_engine_cs * pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) { - struct intel_engine_cs *engine; + struct intel_engine_cs *engine = + container_of(pt, struct drm_i915_gem_request, priotree)->engine; + + GEM_BUG_ON(!locked); - engine = container_of(pt, - struct drm_i915_gem_request, - priotree)->engine; if (engine != locked) { - if (locked) - spin_unlock_irq(&locked->timeline->lock); - spin_lock_irq(&engine->timeline->lock); + spin_unlock(&locked->timeline->lock); + spin_lock(&engine->timeline->lock); } return engine; @@ -663,7 +661,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) static void execlists_schedule(struct drm_i915_gem_request *request, int prio) { - struct intel_engine_cs *engine = NULL; + struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; LIST_HEAD(dfs); @@ -697,26 +695,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) list_for_each_entry_safe(dep, p, &dfs, dfs_link) { struct i915_priotree *pt = dep->signaler; - list_for_each_entry(p, &pt->signalers_list, signal_link) + /* Within an engine, there can be no cycle, but we may + * refer to the same dependency chain multiple times + * (redundant dependencies are not eliminated) and across + * engines. + */ + list_for_each_entry(p, &pt->signalers_list, signal_link) { + GEM_BUG_ON(p->signaler->priority < pt->priority); if (prio > READ_ONCE(p->signaler->priority)) list_move_tail(&p->dfs_link, &dfs); + } list_safe_reset_next(dep, p, dfs_link); - if (!RB_EMPTY_NODE(&pt->node)) - continue; - - engine = pt_lock_engine(pt, engine); - - /* If it is not already in the rbtree, we can update the - * priority inplace and skip over it (and its dependencies) - * if it is referenced *again* as we descend the dfs. - */ - if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) { - pt->priority = prio; - list_del_init(&dep->dfs_link); - } } + engine = request->engine; + spin_lock_irq(&engine->timeline->lock); + /* Fifo and depth-first replacement ensure our deps execute before us */ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { struct i915_priotree *pt = dep->signaler; @@ -728,16 +723,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) if (prio <= pt->priority) continue; - GEM_BUG_ON(RB_EMPTY_NODE(&pt->node)); - pt->priority = prio; - rb_erase(&pt->node, &engine->execlist_queue); - if (insert_request(pt, &engine->execlist_queue)) - engine->execlist_first = &pt->node; + if (!RB_EMPTY_NODE(&pt->node)) { + rb_erase(&pt->node, &engine->execlist_queue); + if (insert_request(pt, &engine->execlist_queue)) + engine->execlist_first = &pt->node; + } } - if (engine) - spin_unlock_irq(&engine->timeline->lock); + spin_unlock_irq(&engine->timeline->lock); /* XXX Do we need to preempt to make room for us and our deps? */ } @@ -1255,7 +1249,6 @@ static void reset_common_ring(struct intel_engine_cs *engine, ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix; request->ring->head = request->postfix; - request->ring->last_retired_head = -1; intel_ring_update_space(request->ring); /* Catch up with any missed context-switch interrupts */ @@ -1268,8 +1261,10 @@ static void reset_common_ring(struct intel_engine_cs *engine, GEM_BUG_ON(request->ctx != port[0].request->ctx); /* Reset WaIdleLiteRestore:bdw,skl as well */ - request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32); - GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); + request->tail = + intel_ring_wrap(request->ring, + request->wa_tail - WA_TAIL_DWORDS*sizeof(u32)); + assert_ring_tail_valid(request->ring, request->tail); } static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) @@ -1480,7 +1475,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; request->tail = intel_ring_offset(request, cs); - GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); + assert_ring_tail_valid(request->ring, request->tail); gen8_emit_wa_tail(request, cs); } @@ -1508,7 +1503,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request, *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; request->tail = intel_ring_offset(request, cs); - GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); + assert_ring_tail_valid(request->ring, request->tail); gen8_emit_wa_tail(request, cs); } @@ -1575,6 +1570,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; engine->schedule = execlists_schedule; + engine->irq_tasklet.func = intel_lrc_irq_handler; } static void @@ -2041,7 +2037,6 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv) i915_gem_object_unpin_map(ce->state->obj); ce->ring->head = ce->ring->tail = 0; - ce->ring->last_retired_head = -1; intel_ring_update_space(ce->ring); } } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 441c01466384..d44465190dc1 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -920,6 +920,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; void *base; + const void *vbt; + u32 vbt_size; BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); @@ -972,45 +974,46 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) if (mboxes & MBOX_ASLE_EXT) DRM_DEBUG_DRIVER("ASLE extension supported\n"); - if (!dmi_check_system(intel_no_opregion_vbt)) { - const void *vbt = NULL; - u32 vbt_size = 0; - - if (opregion->header->opregion_ver >= 2 && opregion->asle && - opregion->asle->rvda && opregion->asle->rvds) { - opregion->rvda = memremap(opregion->asle->rvda, - opregion->asle->rvds, - MEMREMAP_WB); - vbt = opregion->rvda; - vbt_size = opregion->asle->rvds; - } + if (dmi_check_system(intel_no_opregion_vbt)) + goto out; + if (opregion->header->opregion_ver >= 2 && opregion->asle && + opregion->asle->rvda && opregion->asle->rvds) { + opregion->rvda = memremap(opregion->asle->rvda, + opregion->asle->rvds, + MEMREMAP_WB); + vbt = opregion->rvda; + vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; + goto out; } else { - vbt = base + OPREGION_VBT_OFFSET; - /* - * The VBT specification says that if the ASLE ext - * mailbox is not used its area is reserved, but - * on some CHT boards the VBT extends into the - * ASLE ext area. Allow this even though it is - * against the spec, so we do not end up rejecting - * the VBT on those boards (and end up not finding the - * LCD panel because of this). - */ - vbt_size = (mboxes & MBOX_ASLE_EXT) ? - OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; - vbt_size -= OPREGION_VBT_OFFSET; - if (intel_bios_is_valid_vbt(vbt, vbt_size)) { - DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); - opregion->vbt = vbt; - opregion->vbt_size = vbt_size; - } + DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); } } + vbt = base + OPREGION_VBT_OFFSET; + /* + * The VBT specification says that if the ASLE ext mailbox is not used + * its area is reserved, but on some CHT boards the VBT extends into the + * ASLE ext area. Allow this even though it is against the spec, so we + * do not end up rejecting the VBT on those boards (and end up not + * finding the LCD panel because of this). + */ + vbt_size = (mboxes & MBOX_ASLE_EXT) ? + OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; + vbt_size -= OPREGION_VBT_OFFSET; + if (intel_bios_is_valid_vbt(vbt, vbt_size)) { + DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); + opregion->vbt = vbt; + opregion->vbt_size = vbt_size; + } else { + DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n"); + } + +out: return 0; err_out: diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index aece0ff88a5d..570bd603f401 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -655,6 +655,29 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, return wm_size; } +static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + + /* FIXME check the 'enable' instead */ + if (!crtc_state->base.active) + return false; + + /* + * Treat cursor with fb as always visible since cursor updates + * can happen faster than the vrefresh rate, and the current + * watermark code doesn't handle that correctly. Cursor updates + * which set/clear the fb or change the cursor size are going + * to get throttled by intel_legacy_cursor_update() to work + * around this problem with the watermark code. + */ + if (plane->id == PLANE_CURSOR) + return plane_state->base.fb != NULL; + else + return plane_state->base.visible; +} + static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc, *enabled = NULL; @@ -1961,7 +1984,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; - if (!cstate->base.active || !pstate->base.visible) + if (!intel_wm_plane_visible(cstate, pstate)) return 0; cpp = pstate->base.fb->format->cpp[0]; @@ -1990,7 +2013,7 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, uint32_t method1, method2; int cpp; - if (!cstate->base.active || !pstate->base.visible) + if (!intel_wm_plane_visible(cstate, pstate)) return 0; cpp = pstate->base.fb->format->cpp[0]; @@ -2013,15 +2036,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, { int cpp; - /* - * Treat cursor with fb as always visible since cursor updates - * can happen faster than the vrefresh rate, and the current - * watermark code doesn't handle that correctly. Cursor updates - * which set/clear the fb or change the cursor size are going - * to get throttled by intel_legacy_cursor_update() to work - * around this problem with the watermark code. - */ - if (!cstate->base.active || !pstate->base.fb) + if (!intel_wm_plane_visible(cstate, pstate)) return 0; cpp = pstate->base.fb->format->cpp[0]; @@ -2038,7 +2053,7 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, { int cpp; - if (!cstate->base.active || !pstate->base.visible) + if (!intel_wm_plane_visible(cstate, pstate)) return 0; cpp = pstate->base.fb->format->cpp[0]; @@ -3346,19 +3361,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, * Caller should take care of dividing & rounding off the value. */ static uint32_t -skl_plane_downscale_amount(const struct intel_plane_state *pstate) +skl_plane_downscale_amount(const struct intel_crtc_state *cstate, + const struct intel_plane_state *pstate) { + struct intel_plane *plane = to_intel_plane(pstate->base.plane); uint32_t downscale_h, downscale_w; uint32_t src_w, src_h, dst_w, dst_h; - if (WARN_ON(!pstate->base.visible)) + if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) return DRM_PLANE_HELPER_NO_SCALING; /* n.b., src is 16.16 fixed point, dst is whole integer */ - src_w = drm_rect_width(&pstate->base.src); - src_h = drm_rect_height(&pstate->base.src); - dst_w = drm_rect_width(&pstate->base.dst); - dst_h = drm_rect_height(&pstate->base.dst); + if (plane->id == PLANE_CURSOR) { + src_w = pstate->base.src_w; + src_h = pstate->base.src_h; + dst_w = pstate->base.crtc_w; + dst_h = pstate->base.crtc_h; + } else { + src_w = drm_rect_width(&pstate->base.src); + src_h = drm_rect_height(&pstate->base.src); + dst_w = drm_rect_width(&pstate->base.dst); + dst_h = drm_rect_height(&pstate->base.dst); + } + if (drm_rotation_90_or_270(pstate->base.rotation)) swap(dst_w, dst_h); @@ -3374,6 +3399,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, int y) { + struct intel_plane *plane = to_intel_plane(pstate->plane); struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); uint32_t down_scale_amount, data_rate; uint32_t width = 0, height = 0; @@ -3386,7 +3412,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, fb = pstate->fb; format = fb->format->format; - if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) + if (plane->id == PLANE_CURSOR) return 0; if (y && format != DRM_FORMAT_NV12) return 0; @@ -3410,7 +3436,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, data_rate = width * height * fb->format->cpp[0]; } - down_scale_amount = skl_plane_downscale_amount(intel_pstate); + down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); return (uint64_t)data_rate * down_scale_amount >> 16; } @@ -3702,7 +3728,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst uint64_t pixel_rate; /* Shouldn't reach here on disabled planes... */ - if (WARN_ON(!pstate->base.visible)) + if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) return 0; /* @@ -3710,7 +3736,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst * with additional adjustments for plane-specific scaling. */ adjusted_pixel_rate = cstate->pixel_rate; - downscale_amount = skl_plane_downscale_amount(pstate); + downscale_amount = skl_plane_downscale_amount(cstate, pstate); pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); @@ -3727,6 +3753,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint8_t *out_lines, /* out */ bool *enabled /* out */) { + struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); struct drm_plane_state *pstate = &intel_pstate->base; struct drm_framebuffer *fb = pstate->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; @@ -3746,7 +3773,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); bool y_tiled, x_tiled; - if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) { + if (latency == 0 || + !intel_wm_plane_visible(cstate, intel_pstate)) { *enabled = false; return 0; } @@ -3762,8 +3790,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (apply_memory_bw_wa && x_tiled) latency += 15; - width = drm_rect_width(&intel_pstate->base.src) >> 16; - height = drm_rect_height(&intel_pstate->base.src) >> 16; + if (plane->id == PLANE_CURSOR) { + width = intel_pstate->base.crtc_w; + height = intel_pstate->base.crtc_h; + } else { + width = drm_rect_width(&intel_pstate->base.src) >> 16; + height = drm_rect_height(&intel_pstate->base.src) >> 16; + } if (drm_rotation_90_or_270(pstate->rotation)) swap(width, height); @@ -8055,7 +8088,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) case GEN6_PCODE_TIMEOUT: return -ETIMEDOUT; default: - MISSING_CASE(flags) + MISSING_CASE(flags); return 0; } } @@ -8355,6 +8388,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, const i915_reg_t reg) { u32 lower, upper, tmp; + int loop = 2; /* The register accessed do not need forcewake. We borrow * uncore lock to prevent concurrent access to range reg. @@ -8383,7 +8417,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, I915_WRITE_FW(VLV_COUNTER_CONTROL, _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); upper = I915_READ_FW(reg); - } while (upper != tmp); + } while (upper != tmp && --loop); /* Everywhere else we always use VLV_COUNTER_CONTROL with the * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f7c6383ecd06..66a2b8b83972 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,13 +49,7 @@ static int __intel_ring_space(int head, int tail, int size) void intel_ring_update_space(struct intel_ring *ring) { - if (ring->last_retired_head != -1) { - ring->head = ring->last_retired_head; - ring->last_retired_head = -1; - } - - ring->space = __intel_ring_space(ring->head & HEAD_ADDR, - ring->tail, ring->size); + ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); } static int @@ -618,12 +612,8 @@ static void reset_ring_common(struct intel_engine_cs *engine, } /* If the rq hung, jump to its breadcrumb and skip the batch */ - if (request->fence.error == -EIO) { - struct intel_ring *ring = request->ring; - - ring->head = request->postfix; - ring->last_retired_head = -1; - } + if (request->fence.error == -EIO) + request->ring->head = request->postfix; } else { engine->legacy_active_context = NULL; } @@ -784,7 +774,7 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request) i915_gem_request_submit(request); - GEM_BUG_ON(!IS_ALIGNED(request->tail, 8)); + assert_ring_tail_valid(request->ring, request->tail); I915_WRITE_TAIL(request->engine, request->tail); } @@ -796,7 +786,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) *cs++ = MI_USER_INTERRUPT; req->tail = intel_ring_offset(req, cs); - GEM_BUG_ON(!IS_ALIGNED(req->tail, 8)); + assert_ring_tail_valid(req->ring, req->tail); } static const int i9xx_emit_breadcrumb_sz = 4; @@ -835,7 +825,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req, *cs++ = MI_NOOP; req->tail = intel_ring_offset(req, cs); - GEM_BUG_ON(!IS_ALIGNED(req->tail, 8)); + assert_ring_tail_valid(req->ring, req->tail); } static const int gen8_render_emit_breadcrumb_sz = 8; @@ -1392,7 +1382,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) if (IS_I830(engine->i915) || IS_I845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; - ring->last_retired_head = -1; intel_ring_update_space(ring); vma = intel_ring_create_vma(engine->i915, size); @@ -1573,10 +1562,8 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, dev_priv, id) engine->buffer->head = engine->buffer->tail; - engine->buffer->last_retired_head = -1; - } } static int ring_request_alloc(struct drm_i915_gem_request *request) @@ -2130,7 +2117,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1; - engine->emit_breadcrumb_sz += num_rings * 6; + engine->emit_breadcrumb_sz += num_rings * 8; } } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 847aea554464..a82a0807f64d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -149,16 +149,6 @@ struct intel_ring { int space; int size; int effective_size; - - /** We track the position of the requests in the ring buffer, and - * when each is retired we increment last_retired_head as the GPU - * must have finished processing the request and so we know we - * can advance the ringbuffer up to that position. - * - * last_retired_head is set to -1 after the value is consumed so - * we can detect new retirements. - */ - u32 last_retired_head; }; struct i915_gem_context; @@ -442,18 +432,10 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); }; -static inline unsigned +static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { - return 1 << engine->id; -} - -static inline void -intel_flush_status_page(struct intel_engine_cs *engine, int reg) -{ - mb(); - clflush(&engine->status_page.page_addr[reg]); - mb(); + return BIT(engine->id); } static inline u32 @@ -464,14 +446,22 @@ intel_read_status_page(struct intel_engine_cs *engine, int reg) } static inline void -intel_write_status_page(struct intel_engine_cs *engine, - int reg, u32 value) +intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) { - mb(); - clflush(&engine->status_page.page_addr[reg]); - engine->status_page.page_addr[reg] = value; - clflush(&engine->status_page.page_addr[reg]); - mb(); + /* Writing into the status page should be done sparingly. Since + * we do when we are uncertain of the device state, we take a bit + * of extra paranoia to try and ensure that the HWS takes the value + * we give and that it doesn't end up trapped inside the CPU! + */ + if (static_cpu_has(X86_FEATURE_CLFLUSH)) { + mb(); + clflush(&engine->status_page.page_addr[reg]); + engine->status_page.page_addr[reg] = value; + clflush(&engine->status_page.page_addr[reg]); + mb(); + } else { + WRITE_ONCE(engine->status_page.page_addr[reg], value); + } } /* @@ -525,12 +515,29 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) } static inline u32 -intel_ring_offset(struct drm_i915_gem_request *req, void *addr) +intel_ring_wrap(const struct intel_ring *ring, u32 pos) +{ + return pos & (ring->size - 1); +} + +static inline u32 +intel_ring_offset(const struct drm_i915_gem_request *req, void *addr) { /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ u32 offset = addr - req->ring->vaddr; GEM_BUG_ON(offset > req->ring->size); - return offset & (req->ring->size - 1); + return intel_ring_wrap(req->ring, offset); +} + +static inline void +assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) +{ + /* We could combine these into a single tail operation, but keeping + * them as seperate tests will help identify the cause should one + * ever fire. + */ + GEM_BUG_ON(!IS_ALIGNED(tail, 8)); + GEM_BUG_ON(tail >= ring->size); } void intel_ring_update_space(struct intel_ring *ring); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 012bc358a33a..f8a375f8dde6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2840,8 +2840,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; struct device *kdev = &pdev->dev; + int ret; - pm_runtime_get_sync(kdev); + ret = pm_runtime_get_sync(kdev); + WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); atomic_inc(&dev_priv->pm.wakeref_count); assert_rpm_wakelock_held(dev_priv); @@ -2871,7 +2873,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) * function, since the power state is undefined. This applies * atm to the late/early system suspend/resume handlers. */ - WARN_ON_ONCE(ret < 0); + WARN_ONCE(ret < 0, + "pm_runtime_get_if_in_use() failed: %d\n", ret); if (ret <= 0) return false; } @@ -2955,8 +2958,11 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) * platforms without RPM support. */ if (!HAS_RUNTIME_PM(dev_priv)) { + int ret; + pm_runtime_dont_use_autosuspend(kdev); - pm_runtime_get_sync(kdev); + ret = pm_runtime_get_sync(kdev); + WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); } else { pm_runtime_use_autosuspend(kdev); } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index b931d0bd7a64..f7d431427115 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -217,7 +217,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_framebuffer *fb = plane_state->base.fb; enum plane_id plane_id = intel_plane->id; enum pipe pipe = intel_plane->pipe; - u32 plane_ctl; + u32 plane_ctl = plane_state->ctl; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr = plane_state->main.offset; unsigned int rotation = plane_state->base.rotation; @@ -232,24 +232,6 @@ skl_update_plane(struct drm_plane *drm_plane, uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; - plane_ctl = PLANE_CTL_ENABLE; - - if (!IS_GEMINILAKE(dev_priv)) { - plane_ctl |= - PLANE_CTL_PIPE_GAMMA_ENABLE | - PLANE_CTL_PIPE_CSC_ENABLE | - PLANE_CTL_PLANE_GAMMA_DISABLE; - } - - plane_ctl |= skl_plane_ctl_format(fb->format->format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier); - plane_ctl |= skl_plane_ctl_rotation(rotation); - - if (key->flags & I915_SET_COLORKEY_DESTINATION) - plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; - else if (key->flags & I915_SET_COLORKEY_SOURCE) - plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; - /* Sizes are 0 based */ src_w--; src_h--; @@ -361,32 +343,15 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } -static void -vlv_update_plane(struct drm_plane *dplane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *intel_plane = to_intel_plane(dplane); - struct drm_framebuffer *fb = plane_state->base.fb; - enum pipe pipe = intel_plane->pipe; - enum plane_id plane_id = intel_plane->id; - u32 sprctl; - u32 sprsurf_offset, linear_offset; + const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->base.src.x1 >> 16; - uint32_t y = plane_state->base.src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; - unsigned long irqflags; + u32 sprctl; - sprctl = SP_ENABLE; + sprctl = SP_ENABLE | SP_GAMMA_ENABLE; switch (fb->format->format) { case DRM_FORMAT_YUYV: @@ -423,20 +388,10 @@ vlv_update_plane(struct drm_plane *dplane, sprctl |= SP_FORMAT_RGBA8888; break; default: - /* - * If we get here one of the upper layers failed to filter - * out the unsupported plane formats - */ - BUG(); - break; + MISSING_CASE(fb->format->format); + return 0; } - /* - * Enable gamma to match primary/cursor plane behaviour. - * FIXME should be user controllable via propertiesa. - */ - sprctl |= SP_GAMMA_ENABLE; - if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SP_TILED; @@ -449,22 +404,36 @@ vlv_update_plane(struct drm_plane *dplane, if (key->flags & I915_SET_COLORKEY_SOURCE) sprctl |= SP_SOURCE_KEY; + return sprctl; +} + +static void +vlv_update_plane(struct drm_plane *dplane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_device *dev = dplane->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_plane *intel_plane = to_intel_plane(dplane); + struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = intel_plane->pipe; + enum plane_id plane_id = intel_plane->id; + u32 sprctl = plane_state->ctl; + u32 sprsurf_offset = plane_state->main.offset; + u32 linear_offset; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + unsigned long irqflags; + /* Sizes are 0 based */ - src_w--; - src_h--; crtc_w--; crtc_h--; - intel_add_fb_offsets(&x, &y, plane_state, 0); - sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - - if (rotation & DRM_ROTATE_180) { - x += src_w; - y += src_h; - } else if (rotation & DRM_REFLECT_X) { - x += src_w; - } - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -516,31 +485,23 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void -ivb_update_plane(struct drm_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *intel_plane = to_intel_plane(plane); - struct drm_framebuffer *fb = plane_state->base.fb; - enum pipe pipe = intel_plane->pipe; - u32 sprctl, sprscale = 0; - u32 sprsurf_offset, linear_offset; + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->base.src.x1 >> 16; - uint32_t y = plane_state->base.src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; - unsigned long irqflags; + u32 sprctl; + + sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE; + + if (IS_IVYBRIDGE(dev_priv)) + sprctl |= SPRITE_TRICKLE_FEED_DISABLE; - sprctl = SPRITE_ENABLE; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + sprctl |= SPRITE_PIPE_CSC_ENABLE; switch (fb->format->format) { case DRM_FORMAT_XBGR8888: @@ -562,34 +523,48 @@ ivb_update_plane(struct drm_plane *plane, sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; break; default: - BUG(); + MISSING_CASE(fb->format->format); + return 0; } - /* - * Enable gamma to match primary/cursor plane behaviour. - * FIXME should be user controllable via propertiesa. - */ - sprctl |= SPRITE_GAMMA_ENABLE; - if (fb->modifier == I915_FORMAT_MOD_X_TILED) sprctl |= SPRITE_TILED; if (rotation & DRM_ROTATE_180) sprctl |= SPRITE_ROTATE_180; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; - else - sprctl |= SPRITE_TRICKLE_FEED_DISABLE; - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - sprctl |= SPRITE_PIPE_CSC_ENABLE; - if (key->flags & I915_SET_COLORKEY_DESTINATION) sprctl |= SPRITE_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) sprctl |= SPRITE_SOURCE_KEY; + return sprctl; +} + +static void +ivb_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = intel_plane->pipe; + u32 sprctl = plane_state->ctl, sprscale = 0; + u32 sprsurf_offset = plane_state->main.offset; + u32 linear_offset; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + unsigned long irqflags; + /* Sizes are 0 based */ src_w--; src_h--; @@ -599,16 +574,6 @@ ivb_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - intel_add_fb_offsets(&x, &y, plane_state, 0); - sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - - /* HSW+ does this automagically in hardware */ - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) && - rotation & DRM_ROTATE_180) { - x += src_w; - y += src_h; - } - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -664,31 +629,20 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void -ilk_update_plane(struct drm_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane *intel_plane = to_intel_plane(plane); - struct drm_framebuffer *fb = plane_state->base.fb; - int pipe = intel_plane->pipe; - u32 dvscntr, dvsscale; - u32 dvssurf_offset, linear_offset; + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; unsigned int rotation = plane_state->base.rotation; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - int crtc_x = plane_state->base.dst.x1; - int crtc_y = plane_state->base.dst.y1; - uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); - uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); - uint32_t x = plane_state->base.src.x1 >> 16; - uint32_t y = plane_state->base.src.y1 >> 16; - uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; - uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; - unsigned long irqflags; + u32 dvscntr; - dvscntr = DVS_ENABLE; + dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE; + + if (IS_GEN6(dev_priv)) + dvscntr |= DVS_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_XBGR8888: @@ -710,47 +664,57 @@ ilk_update_plane(struct drm_plane *plane, dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; break; default: - BUG(); + MISSING_CASE(fb->format->format); + return 0; } - /* - * Enable gamma to match primary/cursor plane behaviour. - * FIXME should be user controllable via propertiesa. - */ - dvscntr |= DVS_GAMMA_ENABLE; - if (fb->modifier == I915_FORMAT_MOD_X_TILED) dvscntr |= DVS_TILED; if (rotation & DRM_ROTATE_180) dvscntr |= DVS_ROTATE_180; - if (IS_GEN6(dev_priv)) - dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ - if (key->flags & I915_SET_COLORKEY_DESTINATION) dvscntr |= DVS_DEST_KEY; else if (key->flags & I915_SET_COLORKEY_SOURCE) dvscntr |= DVS_SOURCE_KEY; + return dvscntr; +} + +static void +ilk_update_plane(struct drm_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *fb = plane_state->base.fb; + int pipe = intel_plane->pipe; + u32 dvscntr = plane_state->ctl, dvsscale = 0; + u32 dvssurf_offset = plane_state->main.offset; + u32 linear_offset; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); + uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; + uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; + unsigned long irqflags; + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - dvsscale = 0; if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - intel_add_fb_offsets(&x, &y, plane_state, 0); - dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0); - - if (rotation & DRM_ROTATE_180) { - x += src_w; - y += src_h; - } - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -981,6 +945,26 @@ intel_check_sprite_plane(struct drm_plane *plane, ret = skl_check_plane_surface(state); if (ret) return ret; + + state->ctl = skl_plane_ctl(crtc_state, state); + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + ret = i9xx_check_plane_surface(state); + if (ret) + return ret; + + state->ctl = vlv_sprite_ctl(crtc_state, state); + } else if (INTEL_GEN(dev_priv) >= 7) { + ret = i9xx_check_plane_surface(state); + if (ret) + return ret; + + state->ctl = ivb_sprite_ctl(crtc_state, state); + } else { + ret = i9xx_check_plane_surface(state); + if (ret) + return ret; + + state->ctl = ilk_sprite_ctl(crtc_state, state); } return 0; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index d15a7d9d4eb0..c117424f1f50 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -26,6 +26,19 @@ #include "intel_uc.h" #include <linux/firmware.h> +/* Cleans up uC firmware by releasing the firmware GEM obj. + */ +static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_gem_object *obj; + + obj = fetch_and_zero(&uc_fw->obj); + if (obj) + i915_gem_object_put(obj); + + uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; +} + /* Reset GuC providing us with fresh state for both GuC and HuC. */ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) @@ -83,23 +96,166 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) void intel_uc_init_early(struct drm_i915_private *dev_priv) { - mutex_init(&dev_priv->guc.send_mutex); + struct intel_guc *guc = &dev_priv->guc; + + mutex_init(&guc->send_mutex); + guc->send = intel_guc_send_mmio; +} + +static void fetch_uc_fw(struct drm_i915_private *dev_priv, + struct intel_uc_fw *uc_fw) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + struct drm_i915_gem_object *obj; + const struct firmware *fw = NULL; + struct uc_css_header *css; + size_t size; + int err; + + if (!uc_fw->path) + return; + + uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; + + DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n", + intel_uc_fw_status_repr(uc_fw->fetch_status)); + + err = request_firmware(&fw, uc_fw->path, &pdev->dev); + if (err) + goto fail; + if (!fw) + goto fail; + + DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n", + uc_fw->path, fw); + + /* Check the size of the blob before examining buffer contents */ + if (fw->size < sizeof(struct uc_css_header)) { + DRM_NOTE("Firmware header is missing\n"); + goto fail; + } + + css = (struct uc_css_header *)fw->data; + + /* Firmware bits always start from header */ + uc_fw->header_offset = 0; + uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - + css->key_size_dw - css->exponent_size_dw) * sizeof(u32); + + if (uc_fw->header_size != sizeof(struct uc_css_header)) { + DRM_NOTE("CSS header definition mismatch\n"); + goto fail; + } + + /* then, uCode */ + uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; + uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); + + /* now RSA */ + if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) { + DRM_NOTE("RSA key size is bad\n"); + goto fail; + } + uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; + uc_fw->rsa_size = css->key_size_dw * sizeof(u32); + + /* At least, it should have header, uCode and RSA. Size of all three. */ + size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; + if (fw->size < size) { + DRM_NOTE("Missing firmware components\n"); + goto fail; + } + + /* + * The GuC firmware image has the version number embedded at a + * well-known offset within the firmware blob; note that major / minor + * version are TWO bytes each (i.e. u16), although all pointers and + * offsets are defined in terms of bytes (u8). + */ + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + /* Header and uCode will be loaded to WOPCM. Size of the two. */ + size = uc_fw->header_size + uc_fw->ucode_size; + + /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ + if (size > intel_guc_wopcm_size(dev_priv)) { + DRM_ERROR("Firmware is too large to fit in WOPCM\n"); + goto fail; + } + uc_fw->major_ver_found = css->guc.sw_version >> 16; + uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF; + break; + + case INTEL_UC_FW_TYPE_HUC: + uc_fw->major_ver_found = css->huc.sw_version >> 16; + uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF; + break; + + default: + DRM_ERROR("Unknown firmware type %d\n", uc_fw->type); + err = -ENOEXEC; + goto fail; + } + + if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { + DRM_NOTE("Skipping %s firmware version check\n", + intel_uc_fw_type_repr(uc_fw->type)); + } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || + uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { + DRM_NOTE("%s firmware version %d.%d, required %d.%d\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + err = -ENOEXEC; + goto fail; + } + + DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n", + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + + obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto fail; + } + + uc_fw->obj = obj; + uc_fw->size = fw->size; + + DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n", + uc_fw->obj); + + release_firmware(fw); + uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; + return; + +fail: + DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n", + uc_fw->path, err); + DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n", + err, fw, uc_fw->obj); + + release_firmware(fw); /* OK even if fw is NULL */ + uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; } void intel_uc_init_fw(struct drm_i915_private *dev_priv) { - if (dev_priv->huc.fw.path) - intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw); + fetch_uc_fw(dev_priv, &dev_priv->huc.fw); + fetch_uc_fw(dev_priv, &dev_priv->guc.fw); +} - if (dev_priv->guc.fw.path) - intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw); +void intel_uc_fini_fw(struct drm_i915_private *dev_priv) +{ + __intel_uc_fw_fini(&dev_priv->guc.fw); + __intel_uc_fw_fini(&dev_priv->huc.fw); } int intel_uc_init_hw(struct drm_i915_private *dev_priv) { int ret, attempts; - /* GuC not enabled, nothing to do */ if (!i915.enable_guc_loading) return 0; @@ -109,9 +265,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) i915_ggtt_enable_guc(dev_priv); if (i915.enable_guc_submission) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ ret = i915_guc_submission_init(dev_priv); if (ret) - goto err; + goto err_guc; } /* WaEnableuKernelHeaderValidFix:skl */ @@ -150,7 +310,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ret = i915_guc_submission_enable(dev_priv); if (ret) - goto err_submission; + goto err_interrupts; } return 0; @@ -164,11 +324,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) * nonfatal error (i.e. it doesn't prevent driver load, but * marks the GPU as wedged until reset). */ +err_interrupts: + gen9_disable_guc_interrupts(dev_priv); err_submission: if (i915.enable_guc_submission) i915_guc_submission_fini(dev_priv); - -err: +err_guc: i915_ggtt_disable_guc(dev_priv); DRM_ERROR("GuC init failed\n"); @@ -185,11 +346,24 @@ err: return ret; } +void intel_uc_fini_hw(struct drm_i915_private *dev_priv) +{ + if (!i915.enable_guc_loading) + return; + + if (i915.enable_guc_submission) { + i915_guc_submission_disable(dev_priv); + gen9_disable_guc_interrupts(dev_priv); + i915_guc_submission_fini(dev_priv); + } + i915_ggtt_disable_guc(dev_priv); +} + /* * Read GuC command/status register (SOFT_SCRATCH_0) * Return true if it contains a response rather than a command */ -static bool intel_guc_recv(struct intel_guc *guc, u32 *status) +static bool guc_recv(struct intel_guc *guc, u32 *status) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -198,7 +372,10 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status) return INTEL_GUC_RECV_IS_RESPONSE(val); } -int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) +/* + * This function implements the MMIO based host to GuC interface. + */ +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len) { struct drm_i915_private *dev_priv = guc_to_i915(guc); u32 status; @@ -209,7 +386,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) return -EINVAL; mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER); dev_priv->guc.action_count += 1; dev_priv->guc.action_cmd = action[0]; @@ -226,9 +403,9 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) * up to that length of time, then switch to a slower sleep-wait loop. * No inte_guc_send command should ever take longer than 10ms. */ - ret = wait_for_us(intel_guc_recv(guc, &status), 10); + ret = wait_for_us(guc_recv(guc, &status), 10); if (ret) - ret = wait_for(intel_guc_recv(guc, &status), 10); + ret = wait_for(guc_recv(guc, &status), 10); if (status != INTEL_GUC_STATUS_SUCCESS) { /* * Either the GuC explicitly returned an error (which @@ -247,7 +424,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) } dev_priv->guc.action_status = status; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER); mutex_unlock(&guc->send_mutex); return ret; @@ -268,136 +445,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc) return intel_guc_send(guc, action, ARRAY_SIZE(action)); } - -void intel_uc_prepare_fw(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - struct drm_i915_gem_object *obj; - const struct firmware *fw = NULL; - struct uc_css_header *css; - size_t size; - int err; - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; - - DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n", - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - err = request_firmware(&fw, uc_fw->path, &pdev->dev); - if (err) - goto fail; - if (!fw) - goto fail; - - DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n", - uc_fw->path, fw); - - /* Check the size of the blob before examining buffer contents */ - if (fw->size < sizeof(struct uc_css_header)) { - DRM_NOTE("Firmware header is missing\n"); - goto fail; - } - - css = (struct uc_css_header *)fw->data; - - /* Firmware bits always start from header */ - uc_fw->header_offset = 0; - uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - - css->key_size_dw - css->exponent_size_dw) * sizeof(u32); - - if (uc_fw->header_size != sizeof(struct uc_css_header)) { - DRM_NOTE("CSS header definition mismatch\n"); - goto fail; - } - - /* then, uCode */ - uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; - uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); - - /* now RSA */ - if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) { - DRM_NOTE("RSA key size is bad\n"); - goto fail; - } - uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; - uc_fw->rsa_size = css->key_size_dw * sizeof(u32); - - /* At least, it should have header, uCode and RSA. Size of all three. */ - size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; - if (fw->size < size) { - DRM_NOTE("Missing firmware components\n"); - goto fail; - } - - /* - * The GuC firmware image has the version number embedded at a - * well-known offset within the firmware blob; note that major / minor - * version are TWO bytes each (i.e. u16), although all pointers and - * offsets are defined in terms of bytes (u8). - */ - switch (uc_fw->type) { - case INTEL_UC_FW_TYPE_GUC: - /* Header and uCode will be loaded to WOPCM. Size of the two. */ - size = uc_fw->header_size + uc_fw->ucode_size; - - /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ - if (size > intel_guc_wopcm_size(dev_priv)) { - DRM_ERROR("Firmware is too large to fit in WOPCM\n"); - goto fail; - } - uc_fw->major_ver_found = css->guc.sw_version >> 16; - uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF; - break; - - case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = css->huc.sw_version >> 16; - uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF; - break; - - default: - DRM_ERROR("Unknown firmware type %d\n", uc_fw->type); - err = -ENOEXEC; - goto fail; - } - - if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { - DRM_NOTE("Skipping uC firmware version check\n"); - } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || - uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { - DRM_NOTE("uC firmware version %d.%d, required %d.%d\n", - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - err = -ENOEXEC; - goto fail; - } - - DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n", - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - - obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto fail; - } - - uc_fw->obj = obj; - uc_fw->size = fw->size; - - DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n", - uc_fw->obj); - - release_firmware(fw); - uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; - return; - -fail: - DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n", - uc_fw->path, err); - DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n", - err, fw, uc_fw->obj); - - release_firmware(fw); /* OK even if fw is NULL */ - uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; -} diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index a35ededfaa40..4b7f73aeddac 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -34,7 +34,9 @@ struct drm_i915_gem_request; /* * This structure primarily describes the GEM object shared with the GuC. - * The GEM object is held for the entire lifetime of our interaction with + * The specs sometimes refer to this object as a "GuC context", but we use + * the term "client" to avoid confusion with hardware contexts. This + * GEM object is held for the entire lifetime of our interaction with * the GuC, being allocated before the GuC is loaded with its firmware. * Because there's no way to update the address used by the GuC after * initialisation, the shared object must stay pinned into the GGTT as @@ -44,7 +46,7 @@ struct drm_i915_gem_request; * * The single GEM object described here is actually made up of several * separate areas, as far as the GuC is concerned. The first page (kept - * kmap'd) includes the "process decriptor" which holds sequence data for + * kmap'd) includes the "process descriptor" which holds sequence data for * the doorbell, and one cacheline which actually *is* the doorbell; a * write to this will "ring the doorbell" (i.e. send an interrupt to the * GuC). The subsequent pages of the client object constitute the work @@ -72,13 +74,12 @@ struct i915_guc_client { uint32_t engines; /* bitmap of (host) engine ids */ uint32_t priority; - uint32_t ctx_index; + u32 stage_id; uint32_t proc_desc_offset; - uint32_t doorbell_offset; - uint32_t doorbell_cookie; - uint16_t doorbell_id; - uint16_t padding[3]; /* Maintain alignment */ + u16 doorbell_id; + unsigned long doorbell_offset; + u32 doorbell_cookie; spinlock_t wq_lock; uint32_t wq_offset; @@ -100,11 +101,40 @@ enum intel_uc_fw_status { INTEL_UC_FIRMWARE_SUCCESS }; +/* User-friendly representation of an enum */ +static inline +const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) +{ + switch (status) { + case INTEL_UC_FIRMWARE_FAIL: + return "FAIL"; + case INTEL_UC_FIRMWARE_NONE: + return "NONE"; + case INTEL_UC_FIRMWARE_PENDING: + return "PENDING"; + case INTEL_UC_FIRMWARE_SUCCESS: + return "SUCCESS"; + } + return "<invalid>"; +} + enum intel_uc_fw_type { INTEL_UC_FW_TYPE_GUC, INTEL_UC_FW_TYPE_HUC }; +/* User-friendly representation of an enum */ +static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) +{ + switch (type) { + case INTEL_UC_FW_TYPE_GUC: + return "GuC"; + case INTEL_UC_FW_TYPE_HUC: + return "HuC"; + } + return "uC"; +} + /* * This structure encapsulates all the data needed during the process * of fetching, caching, and loading the firmware image into the GuC. @@ -133,11 +163,13 @@ struct intel_uc_fw { struct intel_guc_log { uint32_t flags; struct i915_vma *vma; - void *buf_addr; - struct workqueue_struct *flush_wq; - struct work_struct flush_work; - struct rchan *relay_chan; - + /* The runtime stuff gets created only when GuC logging gets enabled */ + struct { + void *buf_addr; + struct workqueue_struct *flush_wq; + struct work_struct flush_work; + struct rchan *relay_chan; + } runtime; /* logging related stats */ u32 capture_miss_count; u32 flush_interrupt_count; @@ -154,12 +186,13 @@ struct intel_guc { bool interrupts_enabled; struct i915_vma *ads_vma; - struct i915_vma *ctx_pool_vma; - struct ida ctx_ids; + struct i915_vma *stage_desc_pool; + void *stage_desc_pool_vaddr; + struct ida stage_ids; struct i915_guc_client *execbuf_client; - DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS); + DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); uint32_t db_cacheline; /* Cyclic counter mod pagesize */ /* Action status & statistics */ @@ -174,6 +207,9 @@ struct intel_guc { /* To serialize the intel_guc_send actions */ struct mutex send_mutex; + + /* GuC's FW specific send function */ + int (*send)(struct intel_guc *guc, const u32 *data, u32 len); }; struct intel_huc { @@ -187,17 +223,19 @@ struct intel_huc { void intel_uc_sanitize_options(struct drm_i915_private *dev_priv); void intel_uc_init_early(struct drm_i915_private *dev_priv); void intel_uc_init_fw(struct drm_i915_private *dev_priv); +void intel_uc_fini_fw(struct drm_i915_private *dev_priv); int intel_uc_init_hw(struct drm_i915_private *dev_priv); -void intel_uc_prepare_fw(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw); -int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len); +void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_guc_sample_forcewake(struct intel_guc *guc); +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); +static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) +{ + return guc->send(guc, action, len); +} /* intel_guc_loader.c */ int intel_guc_select_fw(struct intel_guc *guc); int intel_guc_init_hw(struct intel_guc *guc); -void intel_guc_fini(struct drm_i915_private *dev_priv); -const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status); int intel_guc_suspend(struct drm_i915_private *dev_priv); int intel_guc_resume(struct drm_i915_private *dev_priv); u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); @@ -212,10 +250,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); /* intel_guc_log.c */ -void intel_guc_log_create(struct intel_guc *guc); +int intel_guc_log_create(struct intel_guc *guc); +void intel_guc_log_destroy(struct intel_guc *guc); +int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); void i915_guc_log_register(struct drm_i915_private *dev_priv); void i915_guc_log_unregister(struct drm_i915_private *dev_priv); -int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val); static inline u32 guc_ggtt_offset(struct i915_vma *vma) { @@ -227,7 +266,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma) /* intel_huc.c */ void intel_huc_select_fw(struct intel_huc *huc); -void intel_huc_fini(struct drm_i915_private *dev_priv); int intel_huc_init_hw(struct intel_huc *huc); void intel_guc_auth_huc(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 09f5f02d7901..6d1ea26b2493 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -52,10 +52,10 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) } static inline void -fw_domain_reset(const struct intel_uncore_forcewake_domain *d) +fw_domain_reset(struct drm_i915_private *i915, + const struct intel_uncore_forcewake_domain *d) { - WARN_ON(!i915_mmio_reg_valid(d->reg_set)); - __raw_i915_write32(d->i915, d->reg_set, d->val_reset); + __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); } static inline void @@ -69,9 +69,10 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) } static inline void -fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_clear(const struct drm_i915_private *i915, + const struct intel_uncore_forcewake_domain *d) { - if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & + if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & FORCEWAKE_KERNEL) == 0, FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", @@ -79,15 +80,17 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) } static inline void -fw_domain_get(const struct intel_uncore_forcewake_domain *d) +fw_domain_get(struct drm_i915_private *i915, + const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(d->i915, d->reg_set, d->val_set); + __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); } static inline void -fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack(const struct drm_i915_private *i915, + const struct intel_uncore_forcewake_domain *d) { - if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & + if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & FORCEWAKE_KERNEL), FORCEWAKE_ACK_TIMEOUT_MS)) DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", @@ -95,72 +98,59 @@ fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) } static inline void -fw_domain_put(const struct intel_uncore_forcewake_domain *d) +fw_domain_put(const struct drm_i915_private *i915, + const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(d->i915, d->reg_set, d->val_clear); -} - -static inline void -fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) -{ - /* something from same cacheline, but not from the set register */ - if (i915_mmio_reg_valid(d->reg_post)) - __raw_posting_read(d->i915, d->reg_post); + __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); } static void -fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) +fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; + unsigned int tmp; - for_each_fw_domain_masked(d, fw_domains, dev_priv) { - fw_domain_wait_ack_clear(d); - fw_domain_get(d); + GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + + for_each_fw_domain_masked(d, fw_domains, i915, tmp) { + fw_domain_wait_ack_clear(i915, d); + fw_domain_get(i915, d); } - for_each_fw_domain_masked(d, fw_domains, dev_priv) - fw_domain_wait_ack(d); + for_each_fw_domain_masked(d, fw_domains, i915, tmp) + fw_domain_wait_ack(i915, d); - dev_priv->uncore.fw_domains_active |= fw_domains; + i915->uncore.fw_domains_active |= fw_domains; } static void -fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) +fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; + unsigned int tmp; - for_each_fw_domain_masked(d, fw_domains, dev_priv) { - fw_domain_put(d); - fw_domain_posting_read(d); - } - - dev_priv->uncore.fw_domains_active &= ~fw_domains; -} + GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); -static void -fw_domains_posting_read(struct drm_i915_private *dev_priv) -{ - struct intel_uncore_forcewake_domain *d; + for_each_fw_domain_masked(d, fw_domains, i915, tmp) + fw_domain_put(i915, d); - /* No need to do for all, just do for first found */ - for_each_fw_domain(d, dev_priv) { - fw_domain_posting_read(d); - break; - } + i915->uncore.fw_domains_active &= ~fw_domains; } static void -fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) +fw_domains_reset(struct drm_i915_private *i915, + enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; + unsigned int tmp; - if (dev_priv->uncore.fw_domains == 0) + if (!fw_domains) return; - for_each_fw_domain_masked(d, fw_domains, dev_priv) - fw_domain_reset(d); + GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); - fw_domains_posting_read(dev_priv); + for_each_fw_domain_masked(d, fw_domains, i915, tmp) + fw_domain_reset(i915, d); } static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) @@ -236,7 +226,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) { struct intel_uncore_forcewake_domain *domain = container_of(timer, struct intel_uncore_forcewake_domain, timer); - struct drm_i915_private *dev_priv = domain->i915; + struct drm_i915_private *dev_priv = + container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); unsigned long irqflags; assert_rpm_device_not_suspended(dev_priv); @@ -266,9 +257,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, * timers are run before holding. */ while (1) { + unsigned int tmp; + active_domains = 0; - for_each_fw_domain(domain, dev_priv) { + for_each_fw_domain(domain, dev_priv, tmp) { if (hrtimer_cancel(&domain->timer) == 0) continue; @@ -277,7 +270,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - for_each_fw_domain(domain, dev_priv) { + for_each_fw_domain(domain, dev_priv, tmp) { if (hrtimer_active(&domain->timer)) active_domains |= domain->mask; } @@ -300,7 +293,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, if (fw) dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); - fw_domains_reset(dev_priv, FORCEWAKE_ALL); + fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); if (restore) { /* If reset with a user forcewake, try to restore */ if (fw) @@ -457,13 +450,13 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; fw_domains &= dev_priv->uncore.fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv) { + for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) if (domain->wake_count++) fw_domains &= ~domain->mask; - } if (fw_domains) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); @@ -520,10 +513,11 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; fw_domains &= dev_priv->uncore.fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv) { + for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { if (WARN_ON(domain->wake_count == 0)) continue; @@ -928,8 +922,11 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; + + GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); - for_each_fw_domain_masked(domain, fw_domains, dev_priv) + for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) fw_domain_arm_timer(domain); dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); @@ -1141,41 +1138,27 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, WARN_ON(d->wake_count); + WARN_ON(!i915_mmio_reg_valid(reg_set)); + WARN_ON(!i915_mmio_reg_valid(reg_ack)); + d->wake_count = 0; d->reg_set = reg_set; d->reg_ack = reg_ack; - if (IS_GEN6(dev_priv)) { - d->val_reset = 0; - d->val_set = FORCEWAKE_KERNEL; - d->val_clear = 0; - } else { - /* WaRsClearFWBitsAtReset:bdw,skl */ - d->val_reset = _MASKED_BIT_DISABLE(0xffff); - d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); - d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); - } - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - d->reg_post = FORCEWAKE_ACK_VLV; - else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) - d->reg_post = ECOBUS; - - d->i915 = dev_priv; d->id = domain_id; BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); - d->mask = 1 << domain_id; + d->mask = BIT(domain_id); hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); d->timer.function = intel_uncore_fw_release_timer; - dev_priv->uncore.fw_domains |= (1 << domain_id); + dev_priv->uncore.fw_domains |= BIT(domain_id); - fw_domain_reset(d); + fw_domain_reset(dev_priv, d); } static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) @@ -1183,6 +1166,17 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) return; + if (IS_GEN6(dev_priv)) { + dev_priv->uncore.fw_reset = 0; + dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; + dev_priv->uncore.fw_clear = 0; + } else { + /* WaRsClearFWBitsAtReset:bdw,skl */ + dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); + dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); + dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); + } + if (IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; @@ -1246,9 +1240,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) FORCEWAKE_MT, FORCEWAKE_MT_ACK); spin_lock_irq(&dev_priv->uncore.lock); - fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); + fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); ecobus = __raw_i915_read32(dev_priv, ECOBUS); - fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); + fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER); spin_unlock_irq(&dev_priv->uncore.lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c index 926b24c117d6..98b7aac41eec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c @@ -291,8 +291,6 @@ static int begin_live_test(struct live_test *t, return err; } - i915_gem_retire_requests(i915); - i915->gpu_error.missed_irq_rings = 0; t->reset_count = i915_reset_count(&i915->gpu_error); @@ -303,7 +301,9 @@ static int end_live_test(struct live_test *t) { struct drm_i915_private *i915 = t->i915; - if (wait_for(intel_engines_are_idle(i915), 1)) { + i915_gem_retire_requests(i915); + + if (wait_for(intel_engines_are_idle(i915), 10)) { pr_err("%s(%s): GPU not idle\n", t->func, t->name); return -EIO; } diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 6ec7c731a267..aa31d6c0cdfb 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -235,7 +235,6 @@ static void hang_fini(struct hang *h) i915_gem_object_put(h->hws); i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED); - i915_gem_retire_requests(h->i915); } static int igt_hang_sanitycheck(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c index 99da8f4ef497..302f7d103635 100644 --- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c @@ -129,10 +129,10 @@ static const struct dma_buf_ops mock_dmabuf_ops = { .map_dma_buf = mock_map_dma_buf, .unmap_dma_buf = mock_unmap_dma_buf, .release = mock_dmabuf_release, - .kmap = mock_dmabuf_kmap, - .kmap_atomic = mock_dmabuf_kmap_atomic, - .kunmap = mock_dmabuf_kunmap, - .kunmap_atomic = mock_dmabuf_kunmap_atomic, + .map = mock_dmabuf_kmap, + .map_atomic = mock_dmabuf_kmap_atomic, + .unmap = mock_dmabuf_kunmap, + .unmap_atomic = mock_dmabuf_kunmap_atomic, .mmap = mock_dmabuf_mmap, .vmap = mock_dmabuf_vmap, .vunmap = mock_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c index 113dec05c7dc..09c704153456 100644 --- a/drivers/gpu/drm/i915/selftests/mock_drm.c +++ b/drivers/gpu/drm/i915/selftests/mock_drm.c @@ -24,31 +24,50 @@ #include "mock_drm.h" -static inline struct inode fake_inode(struct drm_i915_private *i915) -{ - return (struct inode){ .i_rdev = i915->drm.primary->index }; -} - struct drm_file *mock_file(struct drm_i915_private *i915) { - struct inode inode = fake_inode(i915); - struct file filp = {}; + struct file *filp; + struct inode *inode; struct drm_file *file; int err; - err = drm_open(&inode, &filp); - if (unlikely(err)) - return ERR_PTR(err); + inode = kzalloc(sizeof(*inode), GFP_KERNEL); + if (!inode) { + err = -ENOMEM; + goto err; + } + + inode->i_rdev = i915->drm.primary->index; - file = filp.private_data; + filp = kzalloc(sizeof(*filp), GFP_KERNEL); + if (!filp) { + err = -ENOMEM; + goto err_inode; + } + + err = drm_open(inode, filp); + if (err) + goto err_filp; + + file = filp->private_data; + memset(&file->filp, POISON_INUSE, sizeof(file->filp)); file->authenticated = true; + + kfree(filp); + kfree(inode); return file; + +err_filp: + kfree(filp); +err_inode: + kfree(inode); +err: + return ERR_PTR(err); } void mock_file_free(struct drm_i915_private *i915, struct drm_file *file) { - struct inode inode = fake_inode(i915); struct file filp = { .private_data = file }; - drm_release(&inode, &filp); + drm_release(NULL, &filp); } diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 8d5ba037064c..0ad624a1db90 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -118,7 +118,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->vaddr = (void *)(ring + 1); INIT_LIST_HEAD(&ring->request_list); - ring->last_retired_head = -1; intel_ring_update_space(ring); return ring; diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 0e8d2e7f8c70..8097e3693ec4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -35,7 +35,7 @@ mock_request(struct intel_engine_cs *engine, /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = i915_gem_request_alloc(engine, context); - if (!request) + if (IS_ERR(request)) return NULL; mock = container_of(request, typeof(*mock), base); diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c index eb2cda8e2b9f..1cc5d2931753 100644 --- a/drivers/gpu/drm/i915/selftests/scatterlist.c +++ b/drivers/gpu/drm/i915/selftests/scatterlist.c @@ -189,6 +189,13 @@ static unsigned int random(unsigned long n, return 1 + (prandom_u32_state(rnd) % 1024); } +static inline bool page_contiguous(struct page *first, + struct page *last, + unsigned long npages) +{ + return first + npages == last; +} + static int alloc_table(struct pfn_table *pt, unsigned long count, unsigned long max, npages_fn_t npages_fn, @@ -216,7 +223,9 @@ static int alloc_table(struct pfn_table *pt, unsigned long npages = npages_fn(n, count, rnd); /* Nobody expects the Sparse Memmap! */ - if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) { + if (!page_contiguous(pfn_to_page(pfn), + pfn_to_page(pfn + npages), + npages)) { sg_free_table(&pt->st); return -ENOSPC; } diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index f2c9ae822149..c9e439c82241 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -31,13 +31,6 @@ config DRM_IMX_LDB Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. -config DRM_IMX_IPUV3 - tristate - depends on DRM_IMX - depends on IMX_IPUV3_CORE - default y if DRM_IMX=y - default m if DRM_IMX=m - config DRM_IMX_HDMI tristate "Freescale i.MX DRM HDMI" select DRM_DW_HDMI diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile index f3ecd8903d97..16ecef33e008 100644 --- a/drivers/gpu/drm/imx/Makefile +++ b/drivers/gpu/drm/imx/Makefile @@ -1,5 +1,5 @@ -imxdrm-objs := imx-drm-core.o +imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o obj-$(CONFIG_DRM_IMX) += imxdrm.o @@ -7,6 +7,5 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o -imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1888bf3920fc..50add2f9e250 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -422,7 +422,23 @@ static struct platform_driver imx_drm_pdrv = { .of_match_table = imx_drm_dt_ids, }, }; -module_platform_driver(imx_drm_pdrv); + +static struct platform_driver * const drivers[] = { + &imx_drm_pdrv, + &ipu_drm_driver, +}; + +static int __init imx_drm_init(void) +{ + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); +} +module_init(imx_drm_init); + +static void __exit imx_drm_exit(void) +{ + platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); +} +module_exit(imx_drm_exit); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("i.MX drm driver core"); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 295434b199db..f6dd64be9cd5 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -29,6 +29,8 @@ int imx_drm_init_drm(struct platform_device *pdev, int preferred_bpp); int imx_drm_exit_drm(void); +extern struct platform_driver ipu_drm_driver; + void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index dab9d50ffd8c..5456c15d962c 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -465,16 +465,10 @@ static int ipu_drm_remove(struct platform_device *pdev) return 0; } -static struct platform_driver ipu_drm_driver = { +struct platform_driver ipu_drm_driver = { .driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, .remove = ipu_drm_remove, }; -module_platform_driver(ipu_drm_driver); - -MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); -MODULE_DESCRIPTION(DRIVER_DESC); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-ipuv3-crtc"); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index c70310206ac5..a14d7d64d7b1 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -35,18 +35,28 @@ #define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n)) #define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n)) #define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n)) -#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n)) +#define DISP_REG_OVL_ADDR_MT2701 0x0040 +#define DISP_REG_OVL_ADDR_MT8173 0x0f40 +#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n)) #define OVL_RDMA_MEM_GMC 0x40402020 #define OVL_CON_BYTE_SWAP BIT(24) -#define OVL_CON_CLRFMT_RGB565 (0 << 12) -#define OVL_CON_CLRFMT_RGB888 (1 << 12) +#define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_RGBA8888 (2 << 12) #define OVL_CON_CLRFMT_ARGB8888 (3 << 12) +#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ + 0 : OVL_CON_CLRFMT_RGB) +#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ + OVL_CON_CLRFMT_RGB : 0) #define OVL_CON_AEN BIT(8) #define OVL_CON_ALPHA 0xff +struct mtk_disp_ovl_data { + unsigned int addr; + bool fmt_rgb565_is_0; +}; + /** * struct mtk_disp_ovl - DISP_OVL driver structure * @ddp_comp - structure containing type enum and hardware resources @@ -55,8 +65,14 @@ struct mtk_disp_ovl { struct mtk_ddp_comp ddp_comp; struct drm_crtc *crtc; + const struct mtk_disp_ovl_data *data; }; +static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp) +{ + return container_of(comp, struct mtk_disp_ovl, ddp_comp); +} + static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id) { struct mtk_disp_ovl *priv = dev_id; @@ -76,20 +92,18 @@ static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id) static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp, struct drm_crtc *crtc) { - struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl, - ddp_comp); + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); - priv->crtc = crtc; + ovl->crtc = crtc; writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); } static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp) { - struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl, - ddp_comp); + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); - priv->crtc = NULL; + ovl->crtc = NULL; writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN); } @@ -138,18 +152,18 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx) writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); } -static unsigned int ovl_fmt_convert(unsigned int fmt) +static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) { switch (fmt) { default: case DRM_FORMAT_RGB565: - return OVL_CON_CLRFMT_RGB565; + return OVL_CON_CLRFMT_RGB565(ovl); case DRM_FORMAT_BGR565: - return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP; + return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP; case DRM_FORMAT_RGB888: - return OVL_CON_CLRFMT_RGB888; + return OVL_CON_CLRFMT_RGB888(ovl); case DRM_FORMAT_BGR888: - return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP; + return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP; case DRM_FORMAT_RGBX8888: case DRM_FORMAT_RGBA8888: return OVL_CON_CLRFMT_ARGB8888; @@ -168,6 +182,7 @@ static unsigned int ovl_fmt_convert(unsigned int fmt) static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) { + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); struct mtk_plane_pending_state *pending = &state->pending; unsigned int addr = pending->addr; unsigned int pitch = pending->pitch & 0xffff; @@ -179,7 +194,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, if (!pending->enable) mtk_ovl_layer_off(comp, idx); - con = ovl_fmt_convert(fmt); + con = ovl_fmt_convert(ovl, fmt); if (idx != 0) con |= OVL_CON_AEN | OVL_CON_ALPHA; @@ -187,7 +202,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx)); - writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx)); + writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx)); if (pending->enable) mtk_ovl_layer_on(comp, idx); @@ -264,6 +279,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) return ret; } + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, @@ -287,8 +304,21 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev) return 0; } +static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT2701, + .fmt_rgb565_is_0 = false, +}; + +static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = { + .addr = DISP_REG_OVL_ADDR_MT8173, + .fmt_rgb565_is_0 = true, +}; + static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = { - { .compatible = "mediatek,mt8173-disp-ovl", }, + { .compatible = "mediatek,mt2701-disp-ovl", + .data = &mt2701_ovl_driver_data}, + { .compatible = "mediatek,mt8173-disp-ovl", + .data = &mt8173_ovl_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c index 0df05f95b916..b68a51376f83 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c @@ -38,6 +38,11 @@ #define RDMA_FIFO_UNDERFLOW_EN BIT(31) #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16) #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16) +#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) + +struct mtk_disp_rdma_data { + unsigned int fifo_size; +}; /** * struct mtk_disp_rdma - DISP_RDMA driver structure @@ -47,8 +52,14 @@ struct mtk_disp_rdma { struct mtk_ddp_comp ddp_comp; struct drm_crtc *crtc; + const struct mtk_disp_rdma_data *data; }; +static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp) +{ + return container_of(comp, struct mtk_disp_rdma, ddp_comp); +} + static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id) { struct mtk_disp_rdma *priv = dev_id; @@ -77,20 +88,18 @@ static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg, static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp, struct drm_crtc *crtc) { - struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma, - ddp_comp); + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); - priv->crtc = crtc; + rdma->crtc = crtc; rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, RDMA_FRAME_END_INT); } static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp) { - struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma, - ddp_comp); + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); - priv->crtc = NULL; + rdma->crtc = NULL; rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0); } @@ -111,6 +120,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, { unsigned int threshold; unsigned int reg; + struct mtk_disp_rdma *rdma = comp_to_rdma(comp); rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width); rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height); @@ -123,7 +133,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width, */ threshold = width * height * vrefresh * 4 * 7 / 1000000; reg = RDMA_FIFO_UNDERFLOW_EN | - RDMA_FIFO_PSEUDO_SIZE(SZ_8K) | + RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) | RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold); writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON); } @@ -208,6 +218,8 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev) return ret; } + priv->data = of_device_get_match_data(dev); + platform_set_drvdata(pdev, priv); ret = component_add(dev, &mtk_disp_rdma_component_ops); @@ -224,8 +236,19 @@ static int mtk_disp_rdma_remove(struct platform_device *pdev) return 0; } +static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = { + .fifo_size = SZ_4K, +}; + +static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = { + .fifo_size = SZ_8K, +}; + static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = { - { .compatible = "mediatek,mt8173-disp-rdma", }, + { .compatible = "mediatek,mt2701-disp-rdma", + .data = &mt2701_rdma_driver_data}, + { .compatible = "mediatek,mt8173-disp-rdma", + .data = &mt8173_rdma_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 69982f5a6198..6b08774e5501 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -327,6 +327,42 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) pm_runtime_put(drm->dev); } +static void mtk_crtc_ddp_config(struct drm_crtc *crtc) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); + struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0]; + unsigned int i; + + /* + * TODO: instead of updating the registers here, we should prepare + * working registers in atomic_commit and let the hardware command + * queue update module registers on vblank. + */ + if (state->pending_config) { + mtk_ddp_comp_config(ovl, state->pending_width, + state->pending_height, + state->pending_vrefresh, 0); + + state->pending_config = false; + } + + if (mtk_crtc->pending_planes) { + for (i = 0; i < OVL_LAYER_NR; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (plane_state->pending.config) { + mtk_ddp_comp_layer_config(ovl, i, plane_state); + plane_state->pending.config = false; + } + } + mtk_crtc->pending_planes = false; + } +} + static void mtk_drm_crtc_enable(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); @@ -403,6 +439,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_drm_private *priv = crtc->dev->dev_private; unsigned int pending_planes = 0; int i; @@ -424,6 +461,12 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, if (crtc->state->color_mgmt_changed) for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); + + if (priv->data->shadow_register) { + mtk_disp_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc); + mtk_disp_mutex_release(mtk_crtc->mutex); + } } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -471,36 +514,10 @@ err_cleanup_crtc: void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); - unsigned int i; + struct mtk_drm_private *priv = crtc->dev->dev_private; - /* - * TODO: instead of updating the registers here, we should prepare - * working registers in atomic_commit and let the hardware command - * queue update module registers on vblank. - */ - if (state->pending_config) { - mtk_ddp_comp_config(ovl, state->pending_width, - state->pending_height, - state->pending_vrefresh, 0); - - state->pending_config = false; - } - - if (mtk_crtc->pending_planes) { - for (i = 0; i < OVL_LAYER_NR; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - - if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(ovl, i, plane_state); - plane_state->pending.config = false; - } - } - mtk_crtc->pending_planes = false; - } + if (!priv->data->shadow_register) + mtk_crtc_ddp_config(crtc); mtk_drm_finish_page_flip(mtk_crtc); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 17ba9355a49c..8130f3dab661 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -12,6 +12,7 @@ */ #include <linux/clk.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> @@ -31,26 +32,40 @@ #define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8 #define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100 +#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030 +#define DISP_REG_CONFIG_OUT_SEL 0x04c +#define DISP_REG_CONFIG_DSI_SEL 0x050 + #define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) +#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) #define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) #define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) #define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) -#define MUTEX_MOD_DISP_OVL0 BIT(11) -#define MUTEX_MOD_DISP_OVL1 BIT(12) -#define MUTEX_MOD_DISP_RDMA0 BIT(13) -#define MUTEX_MOD_DISP_RDMA1 BIT(14) -#define MUTEX_MOD_DISP_RDMA2 BIT(15) -#define MUTEX_MOD_DISP_WDMA0 BIT(16) -#define MUTEX_MOD_DISP_WDMA1 BIT(17) -#define MUTEX_MOD_DISP_COLOR0 BIT(18) -#define MUTEX_MOD_DISP_COLOR1 BIT(19) -#define MUTEX_MOD_DISP_AAL BIT(20) -#define MUTEX_MOD_DISP_GAMMA BIT(21) -#define MUTEX_MOD_DISP_UFOE BIT(22) -#define MUTEX_MOD_DISP_PWM0 BIT(23) -#define MUTEX_MOD_DISP_PWM1 BIT(24) -#define MUTEX_MOD_DISP_OD BIT(25) +#define INT_MUTEX BIT(1) + +#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11) +#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12) +#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13) +#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14) +#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15) +#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16) +#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17) +#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18) +#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19) +#define MT8173_MUTEX_MOD_DISP_AAL BIT(20) +#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21) +#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22) +#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23) +#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24) +#define MT8173_MUTEX_MOD_DISP_OD BIT(25) + +#define MT2701_MUTEX_MOD_DISP_OVL BIT(3) +#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6) +#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7) +#define MT2701_MUTEX_MOD_DISP_BLS BIT(9) +#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10) +#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12) #define MUTEX_SOF_SINGLE_MODE 0 #define MUTEX_SOF_DSI0 1 @@ -67,6 +82,10 @@ #define DPI0_SEL_IN_RDMA1 0x1 #define COLOR1_SEL_IN_OVL1 0x1 +#define OVL_MOUT_EN_RDMA 0x1 +#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8 +#define DSI_SEL_IN_BLS 0x0 + struct mtk_disp_mutex { int id; bool claimed; @@ -77,24 +96,34 @@ struct mtk_ddp { struct clk *clk; void __iomem *regs; struct mtk_disp_mutex mutex[10]; + const unsigned int *mutex_mod; +}; + +static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS, + [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR, + [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL, + [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA, }; -static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = { - [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL, - [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0, - [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1, - [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA, - [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD, - [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0, - [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1, - [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0, - [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1, - [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0, - [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1, - [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2, - [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE, - [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0, - [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1, +static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1, + [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA, + [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD, + [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1, + [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0, + [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1, + [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2, + [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE, + [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0, + [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1, }; static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, @@ -106,6 +135,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) { *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN; value = OVL0_MOUT_EN_COLOR0; + } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) { + *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN; + value = OVL_MOUT_EN_RDMA; } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) { *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN; value = OD_MOUT_EN_RDMA0; @@ -143,6 +175,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) { *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN; value = COLOR1_SEL_IN_OVL1; + } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) { + *addr = DISP_REG_CONFIG_DSI_SEL; + value = DSI_SEL_IN_BLS; } else { value = 0; } @@ -150,6 +185,15 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur, return value; } +static void mtk_ddp_sout_sel(void __iomem *config_regs, + enum mtk_ddp_comp_id cur, + enum mtk_ddp_comp_id next) +{ + if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) + writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1, + config_regs + DISP_REG_CONFIG_OUT_SEL); +} + void mtk_ddp_add_comp_to_path(void __iomem *config_regs, enum mtk_ddp_comp_id cur, enum mtk_ddp_comp_id next) @@ -162,6 +206,8 @@ void mtk_ddp_add_comp_to_path(void __iomem *config_regs, writel_relaxed(reg, config_regs + addr); } + mtk_ddp_sout_sel(config_regs, cur, next); + value = mtk_ddp_sel_in(cur, next, &addr); if (value) { reg = readl_relaxed(config_regs + addr) | value; @@ -247,7 +293,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, break; default: reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); - reg |= mutex_mod[id]; + reg |= ddp->mutex_mod[id]; writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); return; } @@ -273,7 +319,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, break; default: reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); - reg &= ~mutex_mod[id]; + reg &= ~(ddp->mutex_mod[id]); writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id)); break; } @@ -299,6 +345,27 @@ void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex) writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); } +void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex) +{ + struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, + mutex[mutex->id]); + u32 tmp; + + writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); + writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id)); + if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id), + tmp, tmp & INT_MUTEX, 1, 10000)) + pr_err("could not acquire mutex %d\n", mutex->id); +} + +void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex) +{ + struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, + mutex[mutex->id]); + + writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id)); +} + static int mtk_ddp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -326,6 +393,8 @@ static int mtk_ddp_probe(struct platform_device *pdev) return PTR_ERR(ddp->regs); } + ddp->mutex_mod = of_device_get_match_data(dev); + platform_set_drvdata(pdev, ddp); return 0; @@ -337,7 +406,8 @@ static int mtk_ddp_remove(struct platform_device *pdev) } static const struct of_device_id ddp_driver_dt_match[] = { - { .compatible = "mediatek,mt8173-disp-mutex" }, + { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod}, + { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod}, {}, }; MODULE_DEVICE_TABLE(of, ddp_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h index 92c11752ff65..f9a799168077 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h @@ -37,5 +37,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, enum mtk_ddp_comp_id id); void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex); void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex); +void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex); +void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex); #endif /* MTK_DRM_DDP_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 48cc01fd20c7..8b52416b6e41 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -39,9 +39,11 @@ #define DISP_REG_UFO_START 0x0000 #define DISP_COLOR_CFG_MAIN 0x0400 -#define DISP_COLOR_START 0x0c00 -#define DISP_COLOR_WIDTH 0x0c50 -#define DISP_COLOR_HEIGHT 0x0c54 +#define DISP_COLOR_START_MT2701 0x0f00 +#define DISP_COLOR_START_MT8173 0x0c00 +#define DISP_COLOR_START(comp) ((comp)->data->color_offset) +#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) +#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 @@ -80,6 +82,20 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) +struct mtk_disp_color_data { + unsigned int color_offset; +}; + +struct mtk_disp_color { + struct mtk_ddp_comp ddp_comp; + const struct mtk_disp_color_data *data; +}; + +static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) +{ + return container_of(comp, struct mtk_disp_color, ddp_comp); +} + void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, unsigned int CFG) { @@ -107,15 +123,19 @@ static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc) { - writel(w, comp->regs + DISP_COLOR_WIDTH); - writel(h, comp->regs + DISP_COLOR_HEIGHT); + struct mtk_disp_color *color = comp_to_color(comp); + + writel(w, comp->regs + DISP_COLOR_WIDTH(color)); + writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); } static void mtk_color_start(struct mtk_ddp_comp *comp) { + struct mtk_disp_color *color = comp_to_color(comp); + writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, comp->regs + DISP_COLOR_CFG_MAIN); - writel(0x1, comp->regs + DISP_COLOR_START); + writel(0x1, comp->regs + DISP_COLOR_START(color)); } static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, @@ -236,6 +256,7 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { [MTK_DISP_PWM] = "pwm", [MTK_DISP_MUTEX] = "mutex", [MTK_DISP_OD] = "od", + [MTK_DISP_BLS] = "bls", }; struct mtk_ddp_comp_match { @@ -246,6 +267,7 @@ struct mtk_ddp_comp_match { static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, + [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, @@ -264,6 +286,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; +static const struct mtk_disp_color_data mt2701_color_driver_data = { + .color_offset = DISP_COLOR_START_MT2701, +}; + +static const struct mtk_disp_color_data mt8173_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8173, +}; + +static const struct of_device_id mtk_disp_color_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-color", + .data = &mt2701_color_driver_data}, + { .compatible = "mediatek,mt8173-disp-color", + .data = &mt8173_color_driver_data}, + {}, +}; + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type) { @@ -286,14 +324,29 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; + const struct of_device_id *match; + struct mtk_disp_color *color; if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; + type = mtk_ddp_matches[comp_id].type; + if (type == MTK_DISP_COLOR) { + devm_kfree(dev, comp); + color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL); + if (!color) + return -ENOMEM; + + match = of_match_node(mtk_disp_color_driver_dt_match, node); + color->data = match->data; + comp = &color->ddp_comp; + } + comp->id = comp_id; comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs; - if (comp_id == DDP_COMPONENT_DPI0 || + if (comp_id == DDP_COMPONENT_BLS || + comp_id == DDP_COMPONENT_DPI0 || comp_id == DDP_COMPONENT_DSI0 || comp_id == DDP_COMPONENT_PWM0) { comp->regs = NULL; @@ -308,8 +361,6 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, if (IS_ERR(comp->clk)) comp->clk = NULL; - type = mtk_ddp_matches[comp_id].type; - /* Only DMA capable components need the LARB property */ comp->larb_dev = NULL; if (type != MTK_DISP_OVL && diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 22a33ee451c4..0828cf8bf85c 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -36,11 +36,13 @@ enum mtk_ddp_comp_type { MTK_DISP_PWM, MTK_DISP_MUTEX, MTK_DISP_OD, + MTK_DISP_BLS, MTK_DDP_COMP_TYPE_MAX, }; enum mtk_ddp_comp_id { DDP_COMPONENT_AAL, + DDP_COMPONENT_BLS, DDP_COMPONENT_COLOR0, DDP_COMPONENT_COLOR1, DDP_COMPONENT_DPI0, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index f5a1fd9b3ecc..f6c8ec4c7dbc 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -128,7 +128,20 @@ static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { .atomic_commit = mtk_atomic_commit, }; -static const enum mtk_ddp_comp_id mtk_ddp_main[] = { +static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = { + DDP_COMPONENT_OVL0, + DDP_COMPONENT_RDMA0, + DDP_COMPONENT_COLOR0, + DDP_COMPONENT_BLS, + DDP_COMPONENT_DSI0, +}; + +static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = { + DDP_COMPONENT_RDMA1, + DDP_COMPONENT_DPI0, +}; + +static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, DDP_COMPONENT_AAL, @@ -139,7 +152,7 @@ static const enum mtk_ddp_comp_id mtk_ddp_main[] = { DDP_COMPONENT_PWM0, }; -static const enum mtk_ddp_comp_id mtk_ddp_ext[] = { +static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = { DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1, DDP_COMPONENT_GAMMA, @@ -147,6 +160,21 @@ static const enum mtk_ddp_comp_id mtk_ddp_ext[] = { DDP_COMPONENT_DPI0, }; +static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { + .main_path = mt2701_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main), + .ext_path = mt2701_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext), + .shadow_register = true, +}; + +static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = { + .main_path = mt8173_mtk_ddp_main, + .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main), + .ext_path = mt8173_mtk_ddp_ext, + .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext), +}; + static int mtk_drm_kms_init(struct drm_device *drm) { struct mtk_drm_private *private = drm->dev_private; @@ -189,17 +217,19 @@ static int mtk_drm_kms_init(struct drm_device *drm) * and each statically assigned to a crtc: * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ... */ - ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main)); + ret = mtk_drm_crtc_create(drm, private->data->main_path, + private->data->main_len); if (ret < 0) goto err_component_unbind; /* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */ - ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext)); + ret = mtk_drm_crtc_create(drm, private->data->ext_path, + private->data->ext_len); if (ret < 0) goto err_component_unbind; /* Use OVL device for all DMA memory allocations */ - np = private->comp_node[mtk_ddp_main[0]] ?: - private->comp_node[mtk_ddp_ext[0]]; + np = private->comp_node[private->data->main_path[0]] ?: + private->comp_node[private->data->ext_path[0]]; pdev = of_find_device_by_node(np); if (!pdev) { ret = -ENODEV; @@ -328,16 +358,22 @@ static const struct component_master_ops mtk_drm_ops = { }; static const struct of_device_id mtk_ddp_comp_dt_ids[] = { + { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL }, { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL }, + { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA }, { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA }, { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA }, + { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR }, { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL}, { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, }, { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE }, + { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI }, { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI }, { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI }, + { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX }, + { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS }, { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM }, { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD }, { } @@ -359,6 +395,7 @@ static int mtk_drm_probe(struct platform_device *pdev) mutex_init(&private->commit.lock); INIT_WORK(&private->commit.work, mtk_atomic_work); + private->data = of_device_get_match_data(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); private->config_regs = devm_ioremap_resource(dev, mem); @@ -510,7 +547,10 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend, mtk_drm_sys_resume); static const struct of_device_id mtk_drm_of_ids[] = { - { .compatible = "mediatek,mt8173-mmsys", }, + { .compatible = "mediatek,mt2701-mmsys", + .data = &mt2701_mmsys_driver_data}, + { .compatible = "mediatek,mt8173-mmsys", + .data = &mt8173_mmsys_driver_data}, { } }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index df322a7a5fcb..aef8747d810b 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -28,6 +28,14 @@ struct drm_fb_helper; struct drm_property; struct regmap; +struct mtk_mmsys_driver_data { + const enum mtk_ddp_comp_id *main_path; + unsigned int main_len; + const enum mtk_ddp_comp_id *ext_path; + unsigned int ext_len; + bool shadow_register; +}; + struct mtk_drm_private { struct drm_device *drm; struct device *dma_dev; @@ -39,6 +47,7 @@ struct mtk_drm_private { void __iomem *config_regs; struct device_node *comp_node[DDP_COMPONENT_ID_MAX]; struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX]; + const struct mtk_mmsys_driver_data *data; struct { struct drm_atomic_state *state; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 4d837b29ee9e..808b995a990f 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -19,19 +19,28 @@ #include <drm/drm_of.h> #include <linux/clk.h> #include <linux/component.h> +#include <linux/irq.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <video/mipi_display.h> #include <video/videomode.h> #include "mtk_drm_ddp_comp.h" -#define DSI_VIDEO_FIFO_DEPTH (1920 / 4) -#define DSI_HOST_FIFO_DEPTH 64 - #define DSI_START 0x00 +#define DSI_INTEN 0x08 + +#define DSI_INTSTA 0x0c +#define LPRX_RD_RDY_INT_FLAG BIT(0) +#define CMD_DONE_INT_FLAG BIT(1) +#define TE_RDY_INT_FLAG BIT(2) +#define VM_DONE_INT_FLAG BIT(3) +#define EXT_TE_RDY_INT_FLAG BIT(4) +#define DSI_BUSY BIT(31) + #define DSI_CON_CTRL 0x10 #define DSI_RESET BIT(0) #define DSI_EN BIT(1) @@ -46,7 +55,7 @@ #define MIX_MODE BIT(17) #define DSI_TXRX_CTRL 0x18 -#define VC_NUM (2 << 0) +#define VC_NUM BIT(1) #define LANE_NUM (0xf << 2) #define DIS_EOT BIT(6) #define NULL_EN BIT(7) @@ -72,8 +81,19 @@ #define DSI_HBP_WC 0x54 #define DSI_HFP_WC 0x58 +#define DSI_CMDQ_SIZE 0x60 +#define CMDQ_SIZE 0x3f + #define DSI_HSTX_CKL_WC 0x64 +#define DSI_RX_DATA0 0x74 +#define DSI_RX_DATA1 0x78 +#define DSI_RX_DATA2 0x7c +#define DSI_RX_DATA3 0x80 + +#define DSI_RACK 0x84 +#define RACK BIT(0) + #define DSI_PHY_LCCON 0x104 #define LC_HS_TX_EN BIT(0) #define LC_ULPM_EN BIT(1) @@ -106,6 +126,19 @@ #define CLK_HS_POST (0xff << 8) #define CLK_HS_EXIT (0xff << 16) +#define DSI_VM_CMD_CON 0x130 +#define VM_CMD_EN BIT(0) +#define TS_VFP_EN BIT(5) + +#define DSI_CMDQ0 0x180 +#define CONFIG (0xff << 0) +#define SHORT_PACKET 0 +#define LONG_PACKET 2 +#define BTA BIT(2) +#define DATA_ID (0xff << 8) +#define DATA_0 (0xff << 16) +#define DATA_1 (0xff << 24) + #define T_LPX 5 #define T_HS_PREP 6 #define T_HS_TRAIL 8 @@ -114,6 +147,12 @@ #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) +#define MTK_DSI_HOST_IS_READ(type) \ + ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \ + (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \ + (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ + (type == MIPI_DSI_DCS_READ)) + struct phy; struct mtk_dsi { @@ -140,6 +179,8 @@ struct mtk_dsi { struct videomode vm; int refcount; bool enabled; + u32 irq_data; + wait_queue_head_t irq_wait_queue; }; static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e) @@ -164,7 +205,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) writel((temp & ~mask) | (data & mask), dsi->regs + offset); } -static void dsi_phy_timconfig(struct mtk_dsi *dsi) +static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; u32 ui, cycle_time; @@ -196,118 +237,39 @@ static void mtk_dsi_disable(struct mtk_dsi *dsi) mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); } -static void mtk_dsi_reset(struct mtk_dsi *dsi) +static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); } -static int mtk_dsi_poweron(struct mtk_dsi *dsi) -{ - struct device *dev = dsi->dev; - int ret; - u64 pixel_clock, total_bits; - u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; - - if (++dsi->refcount != 1) - return 0; - - switch (dsi->format) { - case MIPI_DSI_FMT_RGB565: - bit_per_pixel = 16; - break; - case MIPI_DSI_FMT_RGB666_PACKED: - bit_per_pixel = 18; - break; - case MIPI_DSI_FMT_RGB666: - case MIPI_DSI_FMT_RGB888: - default: - bit_per_pixel = 24; - break; - } - - /** - * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000 - * htotal_time = htotal * byte_per_pixel / num_lanes - * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit - * mipi_ratio = (htotal_time + overhead_time) / htotal_time - * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; - */ - pixel_clock = dsi->vm.pixelclock * 1000; - htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + - dsi->vm.hsync_len; - htotal_bits = htotal * bit_per_pixel; - - overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + - T_HS_EXIT; - overhead_bits = overhead_cycles * dsi->lanes * 8; - total_bits = htotal_bits + overhead_bits; - - dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, - htotal * dsi->lanes); - - ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); - if (ret < 0) { - dev_err(dev, "Failed to set data rate: %d\n", ret); - goto err_refcount; - } - - phy_power_on(dsi->phy); - - ret = clk_prepare_enable(dsi->engine_clk); - if (ret < 0) { - dev_err(dev, "Failed to enable engine clock: %d\n", ret); - goto err_phy_power_off; - } - - ret = clk_prepare_enable(dsi->digital_clk); - if (ret < 0) { - dev_err(dev, "Failed to enable digital clock: %d\n", ret); - goto err_disable_engine_clk; - } - - mtk_dsi_enable(dsi); - mtk_dsi_reset(dsi); - dsi_phy_timconfig(dsi); - - return 0; - -err_disable_engine_clk: - clk_disable_unprepare(dsi->engine_clk); -err_phy_power_off: - phy_power_off(dsi->phy); -err_refcount: - dsi->refcount--; - return ret; -} - -static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) +static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); } -static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) +static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); } -static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) +static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); } -static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) +static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); } -static bool dsi_clk_hs_state(struct mtk_dsi *dsi) +static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) { u32 tmp_reg1; @@ -315,30 +277,37 @@ static bool dsi_clk_hs_state(struct mtk_dsi *dsi) return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; } -static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) +static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) { - if (enter && !dsi_clk_hs_state(dsi)) + if (enter && !mtk_dsi_clk_hs_state(dsi)) mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); - else if (!enter && dsi_clk_hs_state(dsi)) + else if (!enter && mtk_dsi_clk_hs_state(dsi)) mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); } -static void dsi_set_mode(struct mtk_dsi *dsi) +static void mtk_dsi_set_mode(struct mtk_dsi *dsi) { u32 vid_mode = CMD_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { - vid_mode = SYNC_PULSE_MODE; - - if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && - !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) vid_mode = BURST_MODE; + else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + vid_mode = SYNC_PULSE_MODE; + else + vid_mode = SYNC_EVENT_MODE; } writel(vid_mode, dsi->regs + DSI_MODE_CTRL); } -static void dsi_ps_control_vact(struct mtk_dsi *dsi) +static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN); + mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN); +} + +static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi) { struct videomode *vm = &dsi->vm; u32 dsi_buf_bpp, ps_wc; @@ -372,7 +341,7 @@ static void dsi_ps_control_vact(struct mtk_dsi *dsi) writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); } -static void dsi_rxtx_control(struct mtk_dsi *dsi) +static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) { u32 tmp_reg; @@ -394,12 +363,15 @@ static void dsi_rxtx_control(struct mtk_dsi *dsi) break; } + tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6; + tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3; + writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); } -static void dsi_ps_control(struct mtk_dsi *dsi) +static void mtk_dsi_ps_control(struct mtk_dsi *dsi) { - unsigned int dsi_tmp_buf_bpp; + u32 dsi_tmp_buf_bpp; u32 tmp_reg; switch (dsi->format) { @@ -429,12 +401,12 @@ static void dsi_ps_control(struct mtk_dsi *dsi) writel(tmp_reg, dsi->regs + DSI_PSCTRL); } -static void dsi_config_vdo_timing(struct mtk_dsi *dsi) +static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) { - unsigned int horizontal_sync_active_byte; - unsigned int horizontal_backporch_byte; - unsigned int horizontal_frontporch_byte; - unsigned int dsi_tmp_buf_bpp; + u32 horizontal_sync_active_byte; + u32 horizontal_backporch_byte; + u32 horizontal_frontporch_byte; + u32 dsi_tmp_buf_bpp; struct videomode *vm = &dsi->vm; @@ -463,7 +435,7 @@ static void dsi_config_vdo_timing(struct mtk_dsi *dsi) writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); - dsi_ps_control(dsi); + mtk_dsi_ps_control(dsi); } static void mtk_dsi_start(struct mtk_dsi *dsi) @@ -472,6 +444,184 @@ static void mtk_dsi_start(struct mtk_dsi *dsi) writel(1, dsi->regs + DSI_START); } +static void mtk_dsi_stop(struct mtk_dsi *dsi) +{ + writel(0, dsi->regs + DSI_START); +} + +static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi) +{ + writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL); +} + +static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi) +{ + u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; + + writel(inten, dsi->regs + DSI_INTEN); +} + +static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit) +{ + dsi->irq_data |= irq_bit; +} + +static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit) +{ + dsi->irq_data &= ~irq_bit; +} + +static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag, + unsigned int timeout) +{ + s32 ret = 0; + unsigned long jiffies = msecs_to_jiffies(timeout); + + ret = wait_event_interruptible_timeout(dsi->irq_wait_queue, + dsi->irq_data & irq_flag, + jiffies); + if (ret == 0) { + DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag); + + mtk_dsi_enable(dsi); + mtk_dsi_reset_engine(dsi); + } + + return ret; +} + +static irqreturn_t mtk_dsi_irq(int irq, void *dev_id) +{ + struct mtk_dsi *dsi = dev_id; + u32 status, tmp; + u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; + + status = readl(dsi->regs + DSI_INTSTA) & flag; + + if (status) { + do { + mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK); + tmp = readl(dsi->regs + DSI_INTSTA); + } while (tmp & DSI_BUSY); + + mtk_dsi_mask(dsi, DSI_INTSTA, status, 0); + mtk_dsi_irq_data_set(dsi, status); + wake_up_interruptible(&dsi->irq_wait_queue); + } + + return IRQ_HANDLED; +} + +static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) +{ + mtk_dsi_irq_data_clear(dsi, irq_flag); + mtk_dsi_set_cmd_mode(dsi); + + if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) { + DRM_ERROR("failed to switch cmd mode\n"); + return -ETIME; + } else { + return 0; + } +} + +static int mtk_dsi_poweron(struct mtk_dsi *dsi) +{ + struct device *dev = dsi->dev; + int ret; + u64 pixel_clock, total_bits; + u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; + + if (++dsi->refcount != 1) + return 0; + + switch (dsi->format) { + case MIPI_DSI_FMT_RGB565: + bit_per_pixel = 16; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + bit_per_pixel = 18; + break; + case MIPI_DSI_FMT_RGB666: + case MIPI_DSI_FMT_RGB888: + default: + bit_per_pixel = 24; + break; + } + + /** + * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000 + * htotal_time = htotal * byte_per_pixel / num_lanes + * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit + * mipi_ratio = (htotal_time + overhead_time) / htotal_time + * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; + */ + pixel_clock = dsi->vm.pixelclock * 1000; + htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + + dsi->vm.hsync_len; + htotal_bits = htotal * bit_per_pixel; + + overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + + T_HS_EXIT; + overhead_bits = overhead_cycles * dsi->lanes * 8; + total_bits = htotal_bits + overhead_bits; + + dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, + htotal * dsi->lanes); + + ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); + if (ret < 0) { + dev_err(dev, "Failed to set data rate: %d\n", ret); + goto err_refcount; + } + + phy_power_on(dsi->phy); + + ret = clk_prepare_enable(dsi->engine_clk); + if (ret < 0) { + dev_err(dev, "Failed to enable engine clock: %d\n", ret); + goto err_phy_power_off; + } + + ret = clk_prepare_enable(dsi->digital_clk); + if (ret < 0) { + dev_err(dev, "Failed to enable digital clock: %d\n", ret); + goto err_disable_engine_clk; + } + + mtk_dsi_enable(dsi); + mtk_dsi_reset_engine(dsi); + mtk_dsi_phy_timconfig(dsi); + + mtk_dsi_rxtx_control(dsi); + mtk_dsi_ps_control_vact(dsi); + mtk_dsi_set_vm_cmd(dsi); + mtk_dsi_config_vdo_timing(dsi); + mtk_dsi_set_interrupt_enable(dsi); + + mtk_dsi_clk_ulp_mode_leave(dsi); + mtk_dsi_lane0_ulp_mode_leave(dsi); + mtk_dsi_clk_hs_mode(dsi, 0); + + if (dsi->panel) { + if (drm_panel_prepare(dsi->panel)) { + DRM_ERROR("failed to prepare the panel\n"); + goto err_disable_digital_clk; + } + } + + return 0; +err_disable_digital_clk: + clk_disable_unprepare(dsi->digital_clk); +err_disable_engine_clk: + clk_disable_unprepare(dsi->engine_clk); +err_phy_power_off: + phy_power_off(dsi->phy); +err_refcount: + dsi->refcount--; + return ret; +} + static void mtk_dsi_poweroff(struct mtk_dsi *dsi) { if (WARN_ON(dsi->refcount == 0)) @@ -480,8 +630,18 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi) if (--dsi->refcount != 0) return; - dsi_lane0_ulp_mode_enter(dsi); - dsi_clk_ulp_mode_enter(dsi); + if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) { + if (dsi->panel) { + if (drm_panel_unprepare(dsi->panel)) { + DRM_ERROR("failed to unprepare the panel\n"); + return; + } + } + } + + mtk_dsi_reset_engine(dsi); + mtk_dsi_lane0_ulp_mode_enter(dsi); + mtk_dsi_clk_ulp_mode_enter(dsi); mtk_dsi_disable(dsi); @@ -498,35 +658,30 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi) if (dsi->enabled) return; - if (dsi->panel) { - if (drm_panel_prepare(dsi->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return; - } - } - ret = mtk_dsi_poweron(dsi); if (ret < 0) { DRM_ERROR("failed to power on dsi\n"); return; } - dsi_rxtx_control(dsi); - - dsi_clk_ulp_mode_leave(dsi); - dsi_lane0_ulp_mode_leave(dsi); - dsi_clk_hs_mode(dsi, 0); - dsi_set_mode(dsi); - - dsi_ps_control_vact(dsi); - dsi_config_vdo_timing(dsi); - - dsi_set_mode(dsi); - dsi_clk_hs_mode(dsi, 1); + mtk_dsi_set_mode(dsi); + mtk_dsi_clk_hs_mode(dsi, 1); mtk_dsi_start(dsi); + if (dsi->panel) { + if (drm_panel_enable(dsi->panel)) { + DRM_ERROR("failed to enable the panel\n"); + goto err_dsi_power_off; + } + } + dsi->enabled = true; + + return; +err_dsi_power_off: + mtk_dsi_stop(dsi); + mtk_dsi_poweroff(dsi); } static void mtk_output_dsi_disable(struct mtk_dsi *dsi) @@ -541,6 +696,7 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) } } + mtk_dsi_stop(dsi); mtk_dsi_poweroff(dsi); dsi->enabled = false; @@ -742,9 +898,149 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host, return 0; } +static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) +{ + u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ + + while (timeout_ms--) { + if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY)) + break; + + usleep_range(2, 4); + } + + if (timeout_ms == 0) { + DRM_WARN("polling dsi wait not busy timeout!\n"); + + mtk_dsi_enable(dsi); + mtk_dsi_reset_engine(dsi); + } +} + +static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) +{ + switch (type) { + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: + return 1; + case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: + case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: + return 2; + case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: + case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: + return read_data[1] + read_data[2] * 16; + case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: + DRM_INFO("type is 0x02, try again\n"); + break; + default: + DRM_INFO("type(0x%x) cannot be non-recognite\n", type); + break; + } + + return 0; +} + +static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) +{ + const char *tx_buf = msg->tx_buf; + u8 config, cmdq_size, cmdq_off, type = msg->type; + u32 reg_val, cmdq_mask, i; + + if (MTK_DSI_HOST_IS_READ(type)) + config = BTA; + else + config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET; + + if (msg->tx_len > 2) { + cmdq_size = 1 + (msg->tx_len + 3) / 4; + cmdq_off = 4; + cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1; + reg_val = (msg->tx_len << 16) | (type << 8) | config; + } else { + cmdq_size = 1; + cmdq_off = 2; + cmdq_mask = CONFIG | DATA_ID; + reg_val = (type << 8) | config; + } + + for (i = 0; i < msg->tx_len; i++) + writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i); + + mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val); + mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); +} + +static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi, + const struct mipi_dsi_msg *msg, u8 flag) +{ + mtk_dsi_wait_for_idle(dsi); + mtk_dsi_irq_data_clear(dsi, flag); + mtk_dsi_cmdq(dsi, msg); + mtk_dsi_start(dsi); + + if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000)) + return -ETIME; + else + return 0; +} + +static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct mtk_dsi *dsi = host_to_dsi(host); + u32 recv_cnt, i; + u8 read_data[16]; + void *src_addr; + u8 irq_flag = CMD_DONE_INT_FLAG; + + if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) { + DRM_ERROR("dsi engine is not command mode\n"); + return -EINVAL; + } + + if (MTK_DSI_HOST_IS_READ(msg->type)) + irq_flag |= LPRX_RD_RDY_INT_FLAG; + + if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0) + return -ETIME; + + if (!MTK_DSI_HOST_IS_READ(msg->type)) + return 0; + + if (!msg->rx_buf) { + DRM_ERROR("dsi receive buffer size may be NULL\n"); + return -EINVAL; + } + + for (i = 0; i < 16; i++) + *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i); + + recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data); + + if (recv_cnt > 2) + src_addr = &read_data[4]; + else + src_addr = &read_data[1]; + + if (recv_cnt > 10) + recv_cnt = 10; + + if (recv_cnt > msg->rx_len) + recv_cnt = msg->rx_len; + + if (recv_cnt) + memcpy(msg->rx_buf, src_addr, recv_cnt); + + DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", + recv_cnt, *((u8 *)(msg->tx_buf))); + + return recv_cnt; +} + static const struct mipi_dsi_host_ops mtk_dsi_ops = { .attach = mtk_dsi_host_attach, .detach = mtk_dsi_host_detach, + .transfer = mtk_dsi_host_transfer, }; static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) @@ -802,6 +1098,7 @@ static int mtk_dsi_probe(struct platform_device *pdev) struct mtk_dsi *dsi; struct device *dev = &pdev->dev; struct resource *regs; + int irq_num; int comp_id; int ret; @@ -866,6 +1163,22 @@ static int mtk_dsi_probe(struct platform_device *pdev) return ret; } + irq_num = platform_get_irq(pdev, 0); + if (irq_num < 0) { + dev_err(&pdev->dev, "failed to request dsi irq resource\n"); + return -EPROBE_DEFER; + } + + irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW); + ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq, + IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi); + if (ret) { + dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); + return -EPROBE_DEFER; + } + + init_waitqueue_head(&dsi->irq_wait_queue); + platform_set_drvdata(pdev, dsi); return component_add(&pdev->dev, &mtk_dsi_component_ops); @@ -882,6 +1195,7 @@ static int mtk_dsi_remove(struct platform_device *pdev) } static const struct of_device_id mtk_dsi_of_match[] = { + { .compatible = "mediatek,mt2701-dsi" }, { .compatible = "mediatek,mt8173-dsi" }, { }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index 1c366f8cb2d0..90e913108950 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -16,6 +16,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> @@ -87,6 +88,9 @@ #define MIPITX_DSI_PLL_CON2 0x58 +#define MIPITX_DSI_PLL_TOP 0x64 +#define RG_DSI_MPPLL_PRESERVE (0xff << 8) + #define MIPITX_DSI_PLL_PWR 0x68 #define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) #define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) @@ -123,10 +127,15 @@ #define SW_LNT2_HSTX_PRE_OE BIT(24) #define SW_LNT2_HSTX_OE BIT(25) +struct mtk_mipitx_data { + const u32 mppll_preserve; +}; + struct mtk_mipi_tx { struct device *dev; void __iomem *regs; - unsigned int data_rate; + u32 data_rate; + const struct mtk_mipitx_data *driver_data; struct clk_hw pll_hw; struct clk *pll; }; @@ -163,7 +172,7 @@ static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) { struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - unsigned int txdiv, txdiv0, txdiv1; + u8 txdiv, txdiv0, txdiv1; u64 pcw; dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); @@ -243,6 +252,10 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, RG_DSI_MPPLL_SDM_SSC_EN); + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, + mipi_tx->driver_data->mppll_preserve); + return 0; } @@ -255,6 +268,9 @@ static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, 0); + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, RG_DSI_MPPLL_SDM_ISO_EN | RG_DSI_MPPLL_SDM_PWR_ON, @@ -310,7 +326,7 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = { static int mtk_mipi_tx_power_on_signal(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - unsigned int reg; + u32 reg; for (reg = MIPITX_DSI_CLOCK_LANE; reg <= MIPITX_DSI_DATA_LANE3; reg += 4) @@ -341,7 +357,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy) static void mtk_mipi_tx_power_off_signal(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - unsigned int reg; + u32 reg; mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, RG_DSI_PAD_TIE_LOW_EN); @@ -391,6 +407,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) if (!mipi_tx) return -ENOMEM; + mipi_tx->driver_data = of_device_get_match_data(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mipi_tx->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(mipi_tx->regs)) { @@ -448,8 +465,19 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev) return 0; } +static const struct mtk_mipitx_data mt2701_mipitx_data = { + .mppll_preserve = (3 << 8) +}; + +static const struct mtk_mipitx_data mt8173_mipitx_data = { + .mppll_preserve = (0 << 8) +}; + static const struct of_device_id mtk_mipi_tx_match[] = { - { .compatible = "mediatek,mt8173-mipi-tx", }, + { .compatible = "mediatek,mt2701-mipi-tx", + .data = &mt2701_mipitx_data }, + { .compatible = "mediatek,mt8173-mipi-tx", + .data = &mt8173_mipitx_data }, {}, }; diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 657598bb1e6b..565a217b46f2 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = { .verify_access = mgag200_bo_verify_access, .io_mem_reserve = &mgag200_ttm_io_mem_reserve, .io_mem_free = &mgag200_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int mgag200_mm_init(struct mga_device *mdev) diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 39055362da95..5241ac8803ba 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -40,6 +40,7 @@ msm-y := \ mdp/mdp5/mdp5_mdss.o \ mdp/mdp5/mdp5_kms.o \ mdp/mdp5/mdp5_pipe.o \ + mdp/mdp5/mdp5_mixer.o \ mdp/mdp5/mdp5_plane.o \ mdp/mdp5/mdp5_smp.o \ msm_atomic.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index b999349b7d2d..7fd77958a436 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = { #ifdef CONFIG_DEBUG_FS static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m) { - gpu->funcs->pm_resume(gpu); seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A3XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); adreno_show(gpu, m); } #endif diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 511bc855cc7f..dfe0eceaae3b 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = { #ifdef CONFIG_DEBUG_FS static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m) { - gpu->funcs->pm_resume(gpu); - seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A4XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); - adreno_show(gpu, m); } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 4414cf73735d..31a9bceed32c 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu) } if (a5xx_gpu->gpmu_bo) { - if (a5xx_gpu->gpmu_bo) + if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); } @@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu) } } -static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) +static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status) { - u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); - if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) { u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS); @@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu) /* Clear the error */ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4)); + + /* Clear the interrupt */ + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, + A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); } if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT) @@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) { u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS); - gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status); + /* + * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it + * before the source is cleared the interrupt will storm. + */ + gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, + status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR); + /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */ if (status & RBBM_ERROR_MASK) - a5xx_rbbm_err_irq(gpu); + a5xx_rbbm_err_irq(gpu, status); if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) a5xx_cp_err_irq(gpu); @@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) #ifdef CONFIG_DEBUG_FS static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { - gpu->funcs->pm_resume(gpu); - seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); - gpu->funcs->pm_suspend(gpu); - adreno_show(gpu, m); } #endif @@ -860,7 +864,9 @@ static const struct adreno_gpu_funcs funcs = { .idle = a5xx_idle, .irq = a5xx_irq, .destroy = a5xx_destroy, +#ifdef CONFIG_DEBUG_FS .show = a5xx_show, +#endif }, .get_timestamp = a5xx_get_timestamp, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index ece39b16a864..c0fa5d1c75ff 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -2,7 +2,7 @@ * Copyright (C) 2013-2014 Red Hat * Author: Rob Clark <robdclark@gmail.com> * - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by @@ -17,6 +17,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/pm_opp.h> #include "adreno_gpu.h" #define ANY_ID 0xff @@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) if (gpu) { int ret; - mutex_lock(&dev->struct_mutex); - gpu->funcs->pm_resume(gpu); - mutex_unlock(&dev->struct_mutex); - disable_irq(gpu->irq); - - ret = gpu->funcs->hw_init(gpu); + pm_runtime_get_sync(&pdev->dev); + ret = msm_gpu_hw_init(gpu); + pm_runtime_put_sync(&pdev->dev); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); gpu->funcs->destroy(gpu); gpu = NULL; - } else { - enable_irq(gpu->irq); - /* give inactive pm a chance to kick in: */ - msm_gpu_retire(gpu); } } @@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid) return 0; } +/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */ +static int adreno_get_legacy_pwrlevels(struct device *dev) +{ + struct device_node *child, *node; + int ret; + + node = of_find_compatible_node(dev->of_node, NULL, + "qcom,gpu-pwrlevels"); + if (!node) { + dev_err(dev, "Could not find the GPU powerlevels\n"); + return -ENXIO; + } + + for_each_child_of_node(node, child) { + unsigned int val; + + ret = of_property_read_u32(child, "qcom,gpu-freq", &val); + if (ret) + continue; + + /* + * Skip the intentionally bogus clock value found at the bottom + * of most legacy frequency tables + */ + if (val != 27000000) + dev_pm_opp_add(dev, val, 0); + } + + return 0; +} + +static int adreno_get_pwrlevels(struct device *dev, + struct adreno_platform_config *config) +{ + unsigned long freq = ULONG_MAX; + struct dev_pm_opp *opp; + int ret; + + /* You down with OPP? */ + if (!of_find_property(dev->of_node, "operating-points-v2", NULL)) + ret = adreno_get_legacy_pwrlevels(dev); + else + ret = dev_pm_opp_of_add_table(dev); + + if (ret) + return ret; + + /* Find the fastest defined rate */ + opp = dev_pm_opp_find_freq_floor(dev, &freq); + if (!IS_ERR(opp)) + config->fast_rate = dev_pm_opp_get_freq(opp); + + if (!config->fast_rate) { + DRM_DEV_INFO(dev, + "Could not find clock rate. Using default\n"); + /* Pick a suitably safe clock speed for any target */ + config->fast_rate = 200000000; + } + + return 0; +} + static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; - struct device_node *child, *node = dev->of_node; u32 val; int ret; @@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) /* find clock rates: */ config.fast_rate = 0; - config.slow_rate = ~0; - for_each_child_of_node(node, child) { - if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) { - struct device_node *pwrlvl; - for_each_child_of_node(child, pwrlvl) { - ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val); - if (ret) { - dev_err(dev, "could not find gpu-freq: %d\n", ret); - return ret; - } - config.fast_rate = max(config.fast_rate, val); - config.slow_rate = min(config.slow_rate, val); - } - } - } - if (!config.fast_rate) { - dev_warn(dev, "could not find clk rates\n"); - /* This is a safe low speed for all devices: */ - config.fast_rate = 200000000; - config.slow_rate = 27000000; - } + ret = adreno_get_pwrlevels(dev, &config); + if (ret) + return ret; dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); @@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = { {} }; +#ifdef CONFIG_PM +static int adreno_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_gpu *gpu = platform_get_drvdata(pdev); + + return gpu->funcs->pm_resume(gpu); +} + +static int adreno_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct msm_gpu *gpu = platform_get_drvdata(pdev); + + return gpu->funcs->pm_suspend(gpu); +} +#endif + +static const struct dev_pm_ops adreno_pm_ops = { + SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL) +}; + static struct platform_driver adreno_driver = { .probe = adreno_probe, .remove = adreno_remove, .driver = { .name = "adreno", .of_match_table = dt_match, + .pm = &adreno_pm_ops, }, }; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index c9bd1e6225f4..5b63fc649dcc 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) case MSM_PARAM_GMEM_SIZE: *value = adreno_gpu->gmem; return 0; + case MSM_PARAM_GMEM_BASE: + *value = 0x100000; + return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->rev.patchid | (adreno_gpu->rev.minor << 8) | @@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu) return ret; } + /* reset ringbuffer: */ + gpu->rb->cur = gpu->rb->start; + + /* reset completed fence seqno: */ + adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; + adreno_gpu->memptrs->rptr = 0; + adreno_gpu->memptrs->wptr = 0; + /* Setup REG_CP_RB_CNTL: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, /* size is log2(quad-words): */ @@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu) void adreno_recover(struct msm_gpu *gpu) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct drm_device *dev = gpu->dev; int ret; - gpu->funcs->pm_suspend(gpu); - - /* reset ringbuffer: */ - gpu->rb->cur = gpu->rb->start; - - /* reset completed fence seqno: */ - adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; - adreno_gpu->memptrs->rptr = 0; - adreno_gpu->memptrs->wptr = 0; + // XXX pm-runtime?? we *need* the device to be off after this + // so maybe continuing to call ->pm_suspend/resume() is better? + gpu->funcs->pm_suspend(gpu); gpu->funcs->pm_resume(gpu); - disable_irq(gpu->irq); - ret = gpu->funcs->hw_init(gpu); + ret = msm_gpu_hw_init(gpu); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret); /* hmm, oh well? */ } - enable_irq(gpu->irq); } void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, @@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); - gpu->funcs->pm_resume(gpu); - /* dump these out in a form that can be parsed by demsm: */ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { @@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) seq_printf(m, "IO:R %08x %08x\n", addr<<2, val); } } - - gpu->funcs->pm_suspend(gpu); } #endif @@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->rev = config->rev; gpu->fast_rate = config->fast_rate; - gpu->slow_rate = config->slow_rate; gpu->bus_freq = config->bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING gpu->bus_scale_table = config->bus_scale_table; #endif - DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", - gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); + DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u", + gpu->fast_rate, gpu->bus_freq); ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", @@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (ret) return ret; + pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", @@ -418,18 +419,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return 0; } -void adreno_gpu_cleanup(struct adreno_gpu *gpu) +void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - if (gpu->memptrs_bo) { - if (gpu->memptrs) - msm_gem_put_vaddr(gpu->memptrs_bo); + struct msm_gpu *gpu = &adreno_gpu->base; + + if (adreno_gpu->memptrs_bo) { + if (adreno_gpu->memptrs) + msm_gem_put_vaddr(adreno_gpu->memptrs_bo); + + if (adreno_gpu->memptrs_iova) + msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); + + drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); + } + release_firmware(adreno_gpu->pm4); + release_firmware(adreno_gpu->pfp); - if (gpu->memptrs_iova) - msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); + msm_gpu_cleanup(gpu); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + if (gpu->aspace) { + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_put(gpu->aspace); } - release_firmware(gpu->pm4); - release_firmware(gpu->pfp); - msm_gpu_cleanup(&gpu->base); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 42e444a67630..fb4831f9f80b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -123,7 +123,7 @@ struct adreno_gpu { /* platform config data (ie. from DT, or pdata) */ struct adreno_platform_config { struct adreno_rev rev; - uint32_t fast_rate, slow_rate, bus_freq; + uint32_t fast_rate, bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 921270ea6059..a879ffa534b4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id, } } } else { - msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(msm_dsi->host); ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index a54d3bb5baad..8177e8511afd 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -18,13 +18,6 @@ #include <linux/hdmi.h> #include "hdmi.h" - -/* Supported HDMI Audio channels */ -#define MSM_HDMI_AUDIO_CHANNEL_2 0 -#define MSM_HDMI_AUDIO_CHANNEL_4 1 -#define MSM_HDMI_AUDIO_CHANNEL_6 2 -#define MSM_HDMI_AUDIO_CHANNEL_8 3 - /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ static int nchannels[] = { 2, 4, 6, 8 }; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 1c29618f4ddb..f29194a74a19 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) spin_lock_irqsave(&dev->event_lock, flags); event = mdp4_crtc->event; if (event) { - /* if regular vblank case (!file) or if cancel-flip from - * preclose on file that requested flip, then send the - * event: - */ - if (!file || (event->base.file_priv == file)) { - mdp4_crtc->event = NULL; - DBG("%s: send event: %p", mdp4_crtc->name, event); - drm_crtc_send_vblank_event(crtc, event); - } + mdp4_crtc->event = NULL; + DBG("%s: send event: %p", mdp4_crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irqrestore(&dev->event_lock, flags); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 5b6516bb9d06..3d26d7774c08 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms) if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_destroy(aspace); + msm_gem_address_space_put(aspace); } if (mdp4_kms->rpm_enabled) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index ba2d017f6591..c2bdad88447e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = { .lm = { .count = 5, .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, .nb_stages = 5, }, .dspp = { @@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = { .lm = { .count = 5, .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + }, .nb_stages = 5, .max_width = 2048, .max_height = 0xFFFF, @@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = { .mdp = { .count = 1, .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | 0, }, .smp = { @@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = { .lm = { .count = 6, .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, .nb_stages = 5, .max_width = 2048, .max_height = 0xFFFF, @@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = { .lm = { .count = 2, /* LM0 and LM3 */ .base = { 0x44000, 0x47000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB }, + }, .nb_stages = 8, .max_width = 2048, .max_height = 0xFFFF, @@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = { .mdp = { .count = 1, .caps = MDP_CAP_SMP | + MDP_CAP_SRC_SPLIT | 0, }, .smp = { @@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = { .lm = { .count = 6, .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = 2, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = 3, + .caps = MDP_LM_CAP_DISPLAY, }, + }, .nb_stages = 8, .max_width = 2048, .max_height = 0xFFFF, @@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = { .count = 1, .caps = MDP_CAP_DSC | MDP_CAP_CDM | + MDP_CAP_SRC_SPLIT | 0, }, .ctl = { @@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = { .lm = { .count = 6, .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, + .instances = { + { .id = 0, .pp = 0, .dspp = 0, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 1, .pp = 1, .dspp = 1, + .caps = MDP_LM_CAP_DISPLAY, }, + { .id = 2, .pp = 2, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY | + MDP_LM_CAP_PAIR, }, + { .id = 3, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 4, .pp = -1, .dspp = -1, + .caps = MDP_LM_CAP_WB, }, + { .id = 5, .pp = 3, .dspp = -1, + .caps = MDP_LM_CAP_DISPLAY, }, + }, .nb_stages = 8, .max_width = 2560, .max_height = 0xFFFF, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index b1c7daaede86..75910d0f2f4c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -39,8 +39,16 @@ struct mdp5_sub_block { MDP5_SUB_BLOCK_DEFINITION; }; +struct mdp5_lm_instance { + int id; + int pp; + int dspp; + uint32_t caps; +}; + struct mdp5_lm_block { MDP5_SUB_BLOCK_DEFINITION; + struct mdp5_lm_instance instances[MAX_BASES]; uint32_t nb_stages; /* number of stages per blender */ uint32_t max_width; /* Maximum output resolution */ uint32_t max_height; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index df1c8adec3f3..8dafc7bdba48 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, struct device *dev = encoder->dev->dev; u32 total_lines_x100, vclks_line, cfg; long vsync_clk_speed; - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) { dev_err(dev, "vsync_clk is not initialized\n"); @@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder, static int pingpong_tearcheck_enable(struct drm_encoder *encoder) { struct mdp5_kms *mdp5_kms = get_kms(encoder); - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; int ret; ret = clk_set_rate(mdp5_kms->vsync_clk, @@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder) static void pingpong_tearcheck_disable(struct drm_encoder *encoder) { struct mdp5_kms *mdp5_kms = get_kms(encoder); - int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc)); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + int pp_id = mixer->pp; mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0); clk_disable_unprepare(mdp5_kms->vsync_clk); @@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); - mode = adjusted_mode; DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", @@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, mode->vsync_end, mode->vtotal, mode->type, mode->flags); pingpong_tearcheck_setup(encoder, mode); - mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf, - mdp5_cmd_enc->ctl); + mdp5_crtc_set_pipeline(encoder->crtc); } void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; - struct mdp5_interface *intf = &mdp5_cmd_enc->intf; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); if (WARN_ON(!mdp5_cmd_enc->enabled)) return; pingpong_tearcheck_disable(encoder); - mdp5_ctl_set_encoder_state(ctl, false); - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_set_encoder_state(ctl, pipeline, false); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); bs_set(mdp5_cmd_enc, 0); @@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; - struct mdp5_interface *intf = &mdp5_cmd_enc->intf; + struct mdp5_interface *intf = mdp5_cmd_enc->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); if (WARN_ON(mdp5_cmd_enc->enabled)) return; @@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) if (pingpong_tearcheck_enable(encoder)) return; - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - mdp5_ctl_set_encoder_state(ctl, true); + mdp5_ctl_set_encoder_state(ctl, pipeline, true); mdp5_cmd_enc->enabled = true; } @@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, return -EINVAL; mdp5_kms = get_kms(encoder); - intf_num = mdp5_cmd_enc->intf.num; + intf_num = mdp5_cmd_enc->intf->num; /* Switch slave encoder's trigger MUX, to use the master's * start signal for the slave encoder diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index d0c8b38b96ce..9217e0d6e93e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -32,13 +32,7 @@ struct mdp5_crtc { int id; bool enabled; - /* layer mixer used for this CRTC (+ its lock): */ -#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id) - int lm; - spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ - - /* CTL used for this CRTC: */ - struct mdp5_ctl *ctl; + spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */ /* if there is a pending flip, these will be non-null: */ struct drm_pending_vblank_event *event; @@ -61,8 +55,6 @@ struct mdp5_crtc { struct completion pp_completion; - bool cmd_mode; - struct { /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ spinlock_t lock; @@ -97,10 +89,12 @@ static void request_pp_done_pending(struct drm_crtc *crtc) static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; DBG("%s: flush=%08x", crtc->name, flush_mask); - return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); + return mdp5_ctl_commit(ctl, pipeline, flush_mask); } /* @@ -110,19 +104,25 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) */ static u32 crtc_flush_all(struct drm_crtc *crtc) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_hw_mixer *mixer, *r_mixer; struct drm_plane *plane; uint32_t flush_mask = 0; /* this should not happen: */ - if (WARN_ON(!mdp5_crtc->ctl)) + if (WARN_ON(!mdp5_cstate->ctl)) return 0; drm_atomic_crtc_for_each_plane(plane, crtc) { flush_mask |= mdp5_plane_get_flush(plane); } - flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); + mixer = mdp5_cstate->pipeline.mixer; + flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm); + + r_mixer = mdp5_cstate->pipeline.r_mixer; + if (r_mixer) + flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); return crtc_flush(crtc, flush_mask); } @@ -130,7 +130,10 @@ static u32 crtc_flush_all(struct drm_crtc *crtc) /* if file!=NULL, this is preclose potential cancel-flip path */ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) { + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; struct drm_device *dev = crtc->dev; struct drm_pending_vblank_event *event; unsigned long flags; @@ -138,22 +141,17 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) spin_lock_irqsave(&dev->event_lock, flags); event = mdp5_crtc->event; if (event) { - /* if regular vblank case (!file) or if cancel-flip from - * preclose on file that requested flip, then send the - * event: - */ - if (!file || (event->base.file_priv == file)) { - mdp5_crtc->event = NULL; - DBG("%s: send event: %p", crtc->name, event); - drm_crtc_send_vblank_event(crtc, event); - } + mdp5_crtc->event = NULL; + DBG("%s: send event: %p", crtc->name, event); + drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irqrestore(&dev->event_lock, flags); - if (mdp5_crtc->ctl && !crtc->state->enable) { + if (ctl && !crtc->state->enable) { /* set STAGE_UNUSED for all layers */ - mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0); - mdp5_crtc->ctl = NULL; + mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0); + /* XXX: What to do here? */ + /* mdp5_crtc->ctl = NULL; */ } } @@ -193,6 +191,12 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) } /* + * left/right pipe offsets for the stage array used in blend_setup() + */ +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 + +/* * blend_setup() - blend all the planes of a CRTC * * If no base layer is available, border will be enabled as the base layer. @@ -202,18 +206,26 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) static void blend_setup(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_plane *plane; const struct mdp5_cfg_hw *hw_cfg; struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL}; const struct mdp_format *format; - uint32_t lm = mdp5_crtc->lm; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + uint32_t lm = mixer->lm; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; + uint32_t r_lm = r_mixer ? r_mixer->lm : 0; + struct mdp5_ctl *ctl = mdp5_cstate->ctl; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; + enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; + enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; int i, plane_cnt = 0; bool bg_alpha_enabled = false; u32 mixer_op_mode = 0; + u32 val; #define blender(stage) ((stage) - STAGE0) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -221,14 +233,35 @@ static void blend_setup(struct drm_crtc *crtc) spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); /* ctl could be released already when we are shutting down: */ - if (!mdp5_crtc->ctl) + /* XXX: Can this happen now? */ + if (!ctl) goto out; /* Collect all plane information */ drm_atomic_crtc_for_each_plane(plane, crtc) { + enum mdp5_pipe right_pipe; + pstate = to_mdp5_plane_state(plane->state); pstates[pstate->stage] = pstate; - stage[pstate->stage] = mdp5_plane_pipe(plane); + stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane); + /* + * if we have a right mixer, stage the same pipe as we + * have on the left mixer + */ + if (r_mixer) + r_stage[pstate->stage][PIPE_LEFT] = + mdp5_plane_pipe(plane); + /* + * if we have a right pipe (i.e, the plane comprises of 2 + * hwpipes, then stage the right pipe on the right side of both + * the layer mixers + */ + right_pipe = mdp5_plane_right_pipe(plane); + if (right_pipe) { + stage[pstate->stage][PIPE_RIGHT] = right_pipe; + r_stage[pstate->stage][PIPE_RIGHT] = right_pipe; + } + plane_cnt++; } @@ -294,12 +327,27 @@ static void blend_setup(struct drm_crtc *crtc) blender(i)), fg_alpha); mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm, blender(i)), bg_alpha); + if (r_mixer) { + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm, + blender(i)), blend_op); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm, + blender(i)), fg_alpha); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm, + blender(i)), bg_alpha); + } } - mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); - - mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), + val | mixer_op_mode); + if (r_mixer) { + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), + val | mixer_op_mode); + } + mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt, + ctl_blend_flags); out: spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } @@ -307,7 +355,12 @@ out: static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); + struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer; + struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer; + uint32_t lm = mixer->lm; + u32 mixer_width, val; unsigned long flags; struct drm_display_mode *mode; @@ -325,16 +378,40 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc) mode->vsync_end, mode->vtotal, mode->type, mode->flags); + mixer_width = mode->hdisplay; + if (r_mixer) + mixer_width /= 2; + spin_lock_irqsave(&mdp5_crtc->lm_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm), - MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to LEFT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm)); + val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val); + + if (r_mixer) { + u32 r_lm = r_mixer->lm; + + mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm), + MDP5_LM_OUT_SIZE_WIDTH(mixer_width) | + MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); + + /* Assign mixer to RIGHT side in source split mode */ + val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm)); + val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT; + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val); + } + spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags); } static void mdp5_crtc_disable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); DBG("%s", crtc->name); @@ -342,7 +419,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) if (WARN_ON(!mdp5_crtc->enabled)) return; - if (mdp5_crtc->cmd_mode) + if (mdp5_cstate->cmd_mode) mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done); mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); @@ -354,6 +431,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc) static void mdp5_crtc_enable(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); DBG("%s", crtc->name); @@ -364,12 +442,73 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc) mdp5_enable(mdp5_kms); mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); - if (mdp5_crtc->cmd_mode) + if (mdp5_cstate->cmd_mode) mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done); mdp5_crtc->enabled = true; } +int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + bool need_right_mixer) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_interface *intf; + bool new_mixer = false; + + new_mixer = !pipeline->mixer; + + if ((need_right_mixer && !pipeline->r_mixer) || + (!need_right_mixer && pipeline->r_mixer)) + new_mixer = true; + + if (new_mixer) { + struct mdp5_hw_mixer *old_mixer = pipeline->mixer; + struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer; + u32 caps; + int ret; + + caps = MDP_LM_CAP_DISPLAY; + if (need_right_mixer) + caps |= MDP_LM_CAP_PAIR; + + ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps, + &pipeline->mixer, need_right_mixer ? + &pipeline->r_mixer : NULL); + if (ret) + return ret; + + mdp5_mixer_release(new_crtc_state->state, old_mixer); + if (old_r_mixer) { + mdp5_mixer_release(new_crtc_state->state, old_r_mixer); + if (!need_right_mixer) + pipeline->r_mixer = NULL; + } + } + + /* + * these should have been already set up in the encoder's atomic + * check (called by drm_atomic_helper_check_modeset) + */ + intf = pipeline->intf; + + mdp5_cstate->err_irqmask = intf2err(intf->num); + mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf); + + if ((intf->type == INTF_DSI) && + (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { + mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer); + mdp5_cstate->cmd_mode = true; + } else { + mdp5_cstate->pp_done_irqmask = 0; + mdp5_cstate->cmd_mode = false; + } + + return 0; +} + struct plane_state { struct drm_plane *plane; struct mdp5_plane_state *state; @@ -391,6 +530,29 @@ static bool is_fullscreen(struct drm_crtc_state *cstate, ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); } +enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state, + struct drm_plane_state *bpstate) +{ + struct mdp5_crtc_state *mdp5_cstate = + to_mdp5_crtc_state(new_crtc_state); + + /* + * if we're in source split mode, it's mandatory to have + * border out on the base stage + */ + if (mdp5_cstate->pipeline.r_mixer) + return STAGE0; + + /* if the bottom-most layer is not fullscreen, we need to use + * it for solid-color: + */ + if (!is_fullscreen(new_crtc_state, bpstate)) + return STAGE0; + + return STAGE_BASE; +} + static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -400,8 +562,12 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; + const struct drm_display_mode *mode = &state->adjusted_mode; bool cursor_plane = false; - int cnt = 0, base = 0, i; + bool need_right_mixer = false; + int cnt = 0, i; + int ret; + enum mdp_mixer_stage_id start; DBG("%s: check", crtc->name); @@ -409,32 +575,52 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); + /* + * if any plane on this crtc uses 2 hwpipes, then we need + * the crtc to have a right hwmixer. + */ + if (pstates[cnt].state->r_hwpipe) + need_right_mixer = true; cnt++; if (plane->type == DRM_PLANE_TYPE_CURSOR) cursor_plane = true; } - /* assign a stage based on sorted zpos property */ - sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + /* bail out early if there aren't any planes */ + if (!cnt) + return 0; - /* if the bottom-most layer is not fullscreen, we need to use - * it for solid-color: + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + /* + * we need a right hwmixer if the mode's width is greater than a single + * LM's max width */ - if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) - base++; + if (mode->hdisplay > hw_cfg->lm.max_width) + need_right_mixer = true; + + ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer); + if (ret) { + dev_err(dev->dev, "couldn't assign mixers %d\n", ret); + return ret; + } + + /* assign a stage based on sorted zpos property */ + sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); /* trigger a warning if cursor isn't the highest zorder */ WARN_ON(cursor_plane && (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); + start = get_start_stage(crtc, state, &pstates[0].state->base); + /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - - if ((cnt + base) >= hw_cfg->lm.nb_stages) { - dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base); + if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) { + dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n", + cnt, start); return -EINVAL; } @@ -442,7 +628,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, if (cursor_plane && (i == (cnt - 1))) pstates[i].state->stage = hw_cfg->lm.nb_stages; else - pstates[i].state->stage = STAGE_BASE + i + base; + pstates[i].state->stage = start + i; DBG("%s: assign pipe %s on stage=%d", crtc->name, pstates[i].plane->name, pstates[i].state->stage); @@ -461,6 +647,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct drm_device *dev = crtc->dev; unsigned long flags; @@ -477,7 +664,8 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, * it means we are trying to flush a CRTC whose state is disabled: * nothing else needs to be done. */ - if (unlikely(!mdp5_crtc->ctl)) + /* XXX: Can this happen now ? */ + if (unlikely(!mdp5_cstate->ctl)) return; blend_setup(crtc); @@ -488,11 +676,16 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc, * This is safe because no pp_done will happen before SW trigger * in command mode. */ - if (mdp5_crtc->cmd_mode) + if (mdp5_cstate->cmd_mode) request_pp_done_pending(crtc); mdp5_crtc->flushed_mask = crtc_flush_all(crtc); + /* XXX are we leaking out state here? */ + mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask; + mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask; + mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask; + request_pending(crtc, PENDING_FLIP); } @@ -527,11 +720,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, uint32_t width, uint32_t height) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; struct drm_device *dev = crtc->dev; struct mdp5_kms *mdp5_kms = get_kms(crtc); struct drm_gem_object *cursor_bo, *old_bo = NULL; uint32_t blendcfg, stride; uint64_t cursor_addr; + struct mdp5_ctl *ctl; int ret, lm; enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); @@ -544,7 +740,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - if (NULL == mdp5_crtc->ctl) + ctl = mdp5_cstate->ctl; + if (!ctl) + return -EINVAL; + + /* don't support LM cursors when we we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) return -EINVAL; if (!handle) { @@ -561,7 +762,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (ret) return -EINVAL; - lm = mdp5_crtc->lm; + lm = mdp5_cstate->pipeline.mixer->lm; stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); @@ -591,7 +792,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); set_cursor: - ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable); + ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); if (ret) { dev_err(dev->dev, "failed to %sable cursor: %d\n", cursor_enable ? "en" : "dis", ret); @@ -613,11 +814,17 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct mdp5_kms *mdp5_kms = get_kms(crtc); struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + uint32_t lm = mdp5_cstate->pipeline.mixer->lm; uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); uint32_t roi_w; uint32_t roi_h; unsigned long flags; + /* don't support LM cursors when we we have source split enabled */ + if (mdp5_cstate->pipeline.r_mixer) + return -EINVAL; + /* In case the CRTC is disabled, just drop the cursor update */ if (unlikely(!crtc->state->enable)) return 0; @@ -628,10 +835,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) get_roi(crtc, &roi_w, &roi_h); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm), + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | MDP5_LM_CURSOR_SIZE_ROI_W(roi_w)); - mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm), + mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm), MDP5_LM_CURSOR_START_XY_Y_START(y) | MDP5_LM_CURSOR_START_XY_X_START(x)); spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); @@ -641,16 +848,80 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } +static void +mdp5_crtc_atomic_print_state(struct drm_printer *p, + const struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + struct mdp5_kms *mdp5_kms = get_kms(state->crtc); + + if (WARN_ON(!pipeline)) + return; + + drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? + pipeline->mixer->name : "(null)"); + + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? + pipeline->r_mixer->name : "(null)"); +} + +static void mdp5_crtc_reset(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); + kfree(to_mdp5_crtc_state(crtc->state)); + } + + mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL); + + if (mdp5_cstate) { + mdp5_cstate->base.crtc = crtc; + crtc->state = &mdp5_cstate->base; + } +} + +static struct drm_crtc_state * +mdp5_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc->state)) + return NULL; + + mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state), + sizeof(*mdp5_cstate), GFP_KERNEL); + if (!mdp5_cstate) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base); + + return &mdp5_cstate->base; +} + +static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(state); + + kfree(mdp5_cstate); +} + static const struct drm_crtc_funcs mdp5_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = mdp5_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .set_property = drm_atomic_helper_crtc_set_property, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, .cursor_set = mdp5_crtc_cursor_set, .cursor_move = mdp5_crtc_cursor_move, + .atomic_print_state = mdp5_crtc_atomic_print_state, }; static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { @@ -658,9 +929,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { .destroy = mdp5_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, .set_property = drm_atomic_helper_crtc_set_property, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .reset = mdp5_crtc_reset, + .atomic_duplicate_state = mdp5_crtc_duplicate_state, + .atomic_destroy_state = mdp5_crtc_destroy_state, + .atomic_print_state = mdp5_crtc_atomic_print_state, }; static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { @@ -710,22 +982,26 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); int ret; ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) - dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm); + dev_warn(dev->dev, "pp done time out, lm=%d\n", + mdp5_cstate->pipeline.mixer->lm); } static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_ctl *ctl = mdp5_cstate->ctl; int ret; /* Should not call this function if crtc is disabled. */ - if (!mdp5_crtc->ctl) + if (!ctl) return; ret = drm_crtc_vblank_get(crtc); @@ -733,7 +1009,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc) return; ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, - ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) & + ((mdp5_ctl_get_commit_status(ctl) & mdp5_crtc->flushed_mask) == 0), msecs_to_jiffies(50)); if (ret <= 0) @@ -750,52 +1026,54 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) return mdp5_crtc->vblank.irqmask; } -void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); - int lm = mdp5_crtc_get_lm(crtc); - - /* now that we know what irq's we want: */ - mdp5_crtc->err.irqmask = intf2err(intf->num); - mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf); - - if ((intf->type == INTF_DSI) && - (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) { - mdp5_crtc->pp_done.irqmask = lm2ppdone(lm); - mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; - mdp5_crtc->cmd_mode = true; - } else { - mdp5_crtc->pp_done.irqmask = 0; - mdp5_crtc->pp_done.irq = NULL; - mdp5_crtc->cmd_mode = false; - } + /* should this be done elsewhere ? */ mdp_irq_update(&mdp5_kms->base); - mdp5_crtc->ctl = ctl; - mdp5_ctl_set_pipeline(ctl, intf, lm); + mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline); } struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - return mdp5_crtc->ctl; + return mdp5_cstate->ctl; } -int mdp5_crtc_get_lm(struct drm_crtc *crtc) +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); - return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm; + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return WARN_ON(!mdp5_cstate->pipeline.mixer) ? + ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer; +} + +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate; + + if (WARN_ON(!crtc)) + return ERR_PTR(-EINVAL); + + mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + return &mdp5_cstate->pipeline; } void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) { - struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); - if (mdp5_crtc->cmd_mode) + if (mdp5_cstate->cmd_mode) mdp5_crtc_wait_for_pp_done(crtc); else mdp5_crtc_wait_for_flush_done(crtc); @@ -816,7 +1094,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, crtc = &mdp5_crtc->base; mdp5_crtc->id = id; - mdp5_crtc->lm = GET_LM_ID(id); spin_lock_init(&mdp5_crtc->lm_lock); spin_lock_init(&mdp5_crtc->cursor.lock); @@ -824,6 +1101,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; + mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq; if (cursor_plane) drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index 8b93f7e13200..439e0a300e25 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -32,24 +32,16 @@ #define CTL_STAT_BUSY 0x1 #define CTL_STAT_BOOKED 0x2 -struct op_mode { - struct mdp5_interface intf; - - bool encoder_enabled; - uint32_t start_mask; -}; - struct mdp5_ctl { struct mdp5_ctl_manager *ctlm; u32 id; - int lm; /* CTL status bitmask */ u32 status; - /* Operation Mode Configuration for the Pipeline */ - struct op_mode pipeline; + bool encoder_enabled; + uint32_t start_mask; /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ spinlock_t hw_lock; @@ -146,9 +138,10 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms, spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); } -static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) +static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) { unsigned long flags; + struct mdp5_interface *intf = pipeline->intf; u32 ctl_op = 0; if (!mdp5_cfg_intf_is_virtual(intf->type)) @@ -169,52 +162,50 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf) break; } + if (pipeline->r_mixer) + ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE | + MDP5_CTL_OP_PACK_3D(1); + spin_lock_irqsave(&ctl->hw_lock, flags); ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); spin_unlock_irqrestore(&ctl->hw_lock, flags); } -int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, - struct mdp5_interface *intf, int lm) +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + struct mdp5_interface *intf = pipeline->intf; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) { - dev_err(mdp5_kms->dev->dev, - "CTL %d is allocated by INTF %d, but used by INTF %d\n", - ctl->id, ctl->pipeline.intf.num, intf->num); - return -EINVAL; - } - - ctl->lm = lm; - - memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); - - ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) | - mdp_ctl_flush_mask_encoder(intf); + ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | + mdp_ctl_flush_mask_encoder(intf); + if (r_mixer) + ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ if (!mdp5_cfg_intf_is_virtual(intf->type)) set_display_intf(mdp5_kms, intf); - set_ctl_op(ctl, intf); + set_ctl_op(ctl, pipeline); return 0; } -static bool start_signal_needed(struct mdp5_ctl *ctl) +static bool start_signal_needed(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline) { - struct op_mode *pipeline = &ctl->pipeline; + struct mdp5_interface *intf = pipeline->intf; - if (!pipeline->encoder_enabled || pipeline->start_mask != 0) + if (!ctl->encoder_enabled || ctl->start_mask != 0) return false; - switch (pipeline->intf.type) { + switch (intf->type) { case INTF_WB: return true; case INTF_DSI: - return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND; + return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; default: return false; } @@ -236,19 +227,23 @@ static void send_start_signal(struct mdp5_ctl *ctl) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -static void refill_start_mask(struct mdp5_ctl *ctl) +static void refill_start_mask(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline) { - struct op_mode *pipeline = &ctl->pipeline; - struct mdp5_interface *intf = &ctl->pipeline.intf; + struct mdp5_interface *intf = pipeline->intf; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm); + ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); + if (r_mixer) + ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); /* * Writeback encoder needs to program & flush * address registers for each page flip.. */ if (intf->type == INTF_WB) - pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf); + ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf); } /** @@ -259,17 +254,21 @@ static void refill_start_mask(struct mdp5_ctl *ctl) * Note: * This encoder state is needed to trigger START signal (data path kickoff). */ -int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + bool enabled) { + struct mdp5_interface *intf = pipeline->intf; + if (WARN_ON(!ctl)) return -EINVAL; - ctl->pipeline.encoder_enabled = enabled; - DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off"); + ctl->encoder_enabled = enabled; + DBG("intf_%d: %s", intf->num, enabled ? "on" : "off"); - if (start_signal_needed(ctl)) { + if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl); + refill_start_mask(ctl, pipeline); } return 0; @@ -280,29 +279,35 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled) * CTL registers need to be flushed after calling this function * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable) +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsigned long flags; u32 blend_cfg; - int lm = ctl->lm; + struct mdp5_hw_mixer *mixer = pipeline->mixer; + + if (unlikely(WARN_ON(!mixer))) { + dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM", + ctl->id); + return -EINVAL; + } - if (unlikely(WARN_ON(lm < 0))) { - dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", - ctl->id, lm); + if (pipeline->r_mixer) { + dev_err(ctl_mgr->dev->dev, "unsupported configuration"); return -EINVAL; } spin_lock_irqsave(&ctl->hw_lock, flags); - blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm)); + blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); if (enable) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; else blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT; - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); ctl->cursor_on = enable; spin_unlock_irqrestore(&ctl->hw_lock, flags); @@ -355,37 +360,88 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, } } -int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, - u32 ctl_blend_op_flags) +static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) +{ + unsigned long flags; + struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; + int i; + + spin_lock_irqsave(&ctl->hw_lock, flags); + + for (i = 0; i < ctl_mgr->nlm; i++) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); + } + + spin_unlock_irqrestore(&ctl->hw_lock, flags); +} + +#define PIPE_LEFT 0 +#define PIPE_RIGHT 1 +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags) { + struct mdp5_hw_mixer *mixer = pipeline->mixer; + struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; unsigned long flags; u32 blend_cfg = 0, blend_ext_cfg = 0; + u32 r_blend_cfg = 0, r_blend_ext_cfg = 0; int i, start_stage; + mdp5_ctl_reset_blend_regs(ctl); + if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) { start_stage = STAGE0; blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; + if (r_mixer) + r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR; } else { start_stage = STAGE_BASE; } for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { - blend_cfg |= mdp_ctl_blend_mask(stage[i], i); - blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); + blend_cfg |= + mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i); + blend_ext_cfg |= + mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i); + if (r_mixer) { + r_blend_cfg |= + mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i); + r_blend_ext_cfg |= + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) | + mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i); + } } spin_lock_irqsave(&ctl->hw_lock, flags); if (ctl->cursor_on) blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT; - ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg); - ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), + blend_ext_cfg); + if (r_mixer) { + ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), + r_blend_cfg); + ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), + r_blend_ext_cfg); + } spin_unlock_irqrestore(&ctl->hw_lock, flags); - ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm); + ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); + if (r_mixer) + ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); - DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm, + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm, blend_cfg, blend_ext_cfg); + if (r_mixer) + DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", + r_mixer->lm, r_blend_cfg, r_blend_ext_cfg); return 0; } @@ -443,7 +499,8 @@ u32 mdp_ctl_flush_mask_lm(int lm) } } -static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) +static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; u32 sw_mask = 0; @@ -452,7 +509,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask) /* for some targets, cursor bit is the same as LM bit */ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0)) - sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm); + sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm); return sw_mask; } @@ -498,25 +555,26 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, * * Return H/W flushed bit mask. */ -u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, + struct mdp5_pipeline *pipeline, + u32 flush_mask) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - struct op_mode *pipeline = &ctl->pipeline; unsigned long flags; u32 flush_id = ctl->id; u32 curr_ctl_flush_mask; - pipeline->start_mask &= ~flush_mask; + ctl->start_mask &= ~flush_mask; VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, - pipeline->start_mask, ctl->pending_ctl_trigger); + ctl->start_mask, ctl->pending_ctl_trigger); if (ctl->pending_ctl_trigger & flush_mask) { flush_mask |= MDP5_CTL_FLUSH_CTL; ctl->pending_ctl_trigger = 0; } - flush_mask |= fix_sw_flush(ctl, flush_mask); + flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); flush_mask &= ctl_mgr->flush_hw_mask; @@ -530,9 +588,9 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) spin_unlock_irqrestore(&ctl->hw_lock, flags); } - if (start_signal_needed(ctl)) { + if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl); + refill_start_mask(ctl, pipeline); } return curr_ctl_flush_mask; @@ -619,8 +677,6 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, found: ctl = &ctl_mgr->ctls[c]; - ctl->pipeline.intf.num = intf_num; - ctl->lm = -1; ctl->status |= CTL_STAT_BUSY; ctl->pending_ctl_trigger = 0; DBG("CTL %d allocated", ctl->id); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index fda00d33e4db..b63120388dc6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num); int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl); struct mdp5_interface; -int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf, - int lm); -int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled); +struct mdp5_pipeline; +int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p); +int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p, + bool enabled); -int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable); +int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + int cursor_id, bool enable); int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); +#define MAX_PIPE_STAGE 2 + /* * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM) * @@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) -int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, - u32 ctl_blend_op_flags); +int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + enum mdp5_pipe stage[][MAX_PIPE_STAGE], + enum mdp5_pipe r_stage[][MAX_PIPE_STAGE], + u32 stage_cnt, u32 ctl_blend_op_flags); /** * mdp_ctl_flush_mask...() - Register FLUSH masks @@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id); u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); /* @flush_mask: see CTL flush masks definitions below */ -u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); +u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, + u32 flush_mask); u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 80fa482ae8ed..c2ab0f033031 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, struct mdp5_kms *mdp5_kms = get_kms(encoder); struct drm_device *dev = encoder->dev; struct drm_connector *connector; - int intf = mdp5_encoder->intf.num; + int intf = mdp5_encoder->intf->num; uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol; uint32_t display_v_start, display_v_end; uint32_t hsync_start_x, hsync_end_x; @@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, ctrl_pol = 0; /* DSI controller cannot handle active-low sync signals. */ - if (mdp5_encoder->intf.type != INTF_DSI) { + if (mdp5_encoder->intf->type != INTF_DSI) { if (mode->flags & DRM_MODE_FLAG_NHSYNC) ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; if (mode->flags & DRM_MODE_FLAG_NVSYNC) @@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, * DISPLAY_V_START = (VBP * HCYCLE) + HBP * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP */ - if (mdp5_encoder->intf.type == INTF_eDP) { + if (mdp5_encoder->intf->type == INTF_eDP) { display_v_start += mode->htotal - mode->hsync_start; display_v_end -= mode->hsync_start - mode->hdisplay; } @@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf, - mdp5_encoder->ctl); + mdp5_crtc_set_pipeline(encoder->crtc); } static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) @@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_ctl *ctl = mdp5_encoder->ctl; - int lm = mdp5_crtc_get_lm(encoder->crtc); - struct mdp5_interface *intf = &mdp5_encoder->intf; - int intfn = mdp5_encoder->intf.num; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc); + struct mdp5_interface *intf = mdp5_encoder->intf; + int intfn = mdp5_encoder->intf->num; unsigned long flags; if (WARN_ON(!mdp5_encoder->enabled)) return; - mdp5_ctl_set_encoder_state(ctl, false); + mdp5_ctl_set_encoder_state(ctl, pipeline, false); spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) * the settings changes for the new modeset (like new * scanout buffer) don't latch properly.. */ - mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf)); + mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf)); bs_set(mdp5_encoder, 0); @@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); struct mdp5_ctl *ctl = mdp5_encoder->ctl; - struct mdp5_interface *intf = &mdp5_encoder->intf; - int intfn = mdp5_encoder->intf.num; + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc); + int intfn = intf->num; unsigned long flags; if (WARN_ON(mdp5_encoder->enabled)) @@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); - mdp5_ctl_set_encoder_state(ctl, true); + mdp5_ctl_set_encoder_state(ctl, pipeline, true); mdp5_encoder->enabled = true; } @@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = &mdp5_encoder->intf; + struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); @@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, static void mdp5_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = &mdp5_encoder->intf; + struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) mdp5_cmd_encoder_disable(encoder); @@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder) static void mdp5_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = &mdp5_encoder->intf; + struct mdp5_interface *intf = mdp5_encoder->intf; if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) mdp5_cmd_encoder_disable(encoder); @@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) mdp5_vid_encoder_enable(encoder); } +static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state); + struct mdp5_interface *intf = mdp5_encoder->intf; + struct mdp5_ctl *ctl = mdp5_encoder->ctl; + + mdp5_cstate->ctl = ctl; + mdp5_cstate->pipeline.intf = intf; + + return 0; +} + static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { .mode_set = mdp5_encoder_mode_set, .disable = mdp5_encoder_disable, .enable = mdp5_encoder_enable, + .atomic_check = mdp5_encoder_atomic_check, }; int mdp5_encoder_get_linecount(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf.num; + int intf = mdp5_encoder->intf->num; return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf)); } @@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); - int intf = mdp5_encoder->intf.num; + int intf = mdp5_encoder->intf->num; return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); } @@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, return -EINVAL; mdp5_kms = get_kms(encoder); - intf_num = mdp5_encoder->intf.num; + intf_num = mdp5_encoder->intf->num; /* Switch slave encoder's TimingGen Sync mode, * to use the master's enable signal for the slave encoder. @@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); - struct mdp5_interface *intf = &mdp5_encoder->intf; + struct mdp5_interface *intf = mdp5_encoder->intf; /* TODO: Expand this to set writeback modes too */ if (cmd_mode) { @@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) /* initialize encoder */ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) { struct drm_encoder *encoder = NULL; struct mdp5_encoder *mdp5_encoder; @@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, goto fail; } - memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf)); encoder = &mdp5_encoder->base; mdp5_encoder->ctl = ctl; + mdp5_encoder->intf = intf; spin_lock_init(&mdp5_encoder->intf_lock); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 41ccd2a15d3c..d3d6b4cae1e6 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) /* Copy state: */ new_state->hwpipe = mdp5_kms->state->hwpipe; + new_state->hwmixer = mdp5_kms->state->hwmixer; if (mdp5_kms->smp) new_state->smp = mdp5_kms->state->smp; @@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms) struct msm_gem_address_space *aspace = mdp5_kms->aspace; int i; + for (i = 0; i < mdp5_kms->num_hwmixers; i++) + mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); + for (i = 0; i < mdp5_kms->num_hwpipes; i++) mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_destroy(aspace); + msm_gem_address_space_put(aspace); } } @@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) } static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, - enum mdp5_intf_type intf_type, int intf_num, - struct mdp5_ctl *ctl) + struct mdp5_interface *intf, + struct mdp5_ctl *ctl) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; struct drm_encoder *encoder; - struct mdp5_interface intf = { - .num = intf_num, - .type = intf_type, - .mode = MDP5_INTF_MODE_NONE, - }; - encoder = mdp5_encoder_init(dev, &intf, ctl); + encoder = mdp5_encoder_init(dev, intf, ctl); if (IS_ERR(encoder)) { dev_err(dev->dev, "failed to construct encoder\n"); return encoder; @@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num) return -EINVAL; } -static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) +static int modeset_init_intf(struct mdp5_kms *mdp5_kms, + struct mdp5_interface *intf) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; - const struct mdp5_cfg_hw *hw_cfg = - mdp5_cfg_get_hw_config(mdp5_kms->cfg); - enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num]; struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm; struct mdp5_ctl *ctl; struct drm_encoder *encoder; int ret = 0; - switch (intf_type) { - case INTF_DISABLED: - break; + switch (intf->type) { case INTF_eDP: if (!priv->edp) break; - ctl = mdp5_ctlm_request(ctlm, intf_num); + ctl = mdp5_ctlm_request(ctlm, intf->num); if (!ctl) { ret = -EINVAL; break; } - encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); + encoder = construct_encoder(mdp5_kms, intf, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->hdmi) break; - ctl = mdp5_ctlm_request(ctlm, intf_num); + ctl = mdp5_ctlm_request(ctlm, intf->num); if (!ctl) { ret = -EINVAL; break; } - encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); + encoder = construct_encoder(mdp5_kms, intf, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; case INTF_DSI: { - int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); + const struct mdp5_cfg_hw *hw_cfg = + mdp5_cfg_get_hw_config(mdp5_kms->cfg); + int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num); if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { dev_err(dev->dev, "failed to find dsi from intf %d\n", - intf_num); + intf->num); ret = -EINVAL; break; } @@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) if (!priv->dsi[dsi_id]) break; - ctl = mdp5_ctlm_request(ctlm, intf_num); + ctl = mdp5_ctlm_request(ctlm, intf->num); if (!ctl) { ret = -EINVAL; break; } - encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); + encoder = construct_encoder(mdp5_kms, intf, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } default: - dev_err(dev->dev, "unknown intf: %d\n", intf_type); + dev_err(dev->dev, "unknown intf: %d\n", intf->type); ret = -EINVAL; break; } @@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) * Construct encoders and modeset initialize connector devices * for each external display interface. */ - for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { - ret = modeset_init_intf(mdp5_kms, i); + for (i = 0; i < mdp5_kms->num_intfs; i++) { + ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]); if (ret) goto fail; } @@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) * the MDP5 interfaces) than the number of layer mixers present in HW, * but let's be safe here anyway */ - num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); + num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers); /* * Construct planes equaling the number of hw pipes, and CRTCs for the @@ -744,6 +741,7 @@ fail: static void mdp5_destroy(struct platform_device *pdev) { struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev); + int i; if (mdp5_kms->ctlm) mdp5_ctlm_destroy(mdp5_kms->ctlm); @@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev) if (mdp5_kms->cfg) mdp5_cfg_destroy(mdp5_kms->cfg); + for (i = 0; i < mdp5_kms->num_intfs; i++) + kfree(mdp5_kms->intfs[i]); + if (mdp5_kms->rpm_enabled) pm_runtime_disable(&pdev->dev); @@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) return 0; } +static int hwmixer_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + int i, ret; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + for (i = 0; i < hw_cfg->lm.count; i++) { + struct mdp5_hw_mixer *mixer; + + mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]); + if (IS_ERR(mixer)) { + ret = PTR_ERR(mixer); + dev_err(dev->dev, "failed to construct LM%d (%d)\n", + i, ret); + return ret; + } + + mixer->idx = mdp5_kms->num_hwmixers; + mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer; + } + + return 0; +} + +static int interface_init(struct mdp5_kms *mdp5_kms) +{ + struct drm_device *dev = mdp5_kms->dev; + const struct mdp5_cfg_hw *hw_cfg; + const enum mdp5_intf_type *intf_types; + int i; + + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + intf_types = hw_cfg->intf.connect; + + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { + struct mdp5_interface *intf; + + if (intf_types[i] == INTF_DISABLED) + continue; + + intf = kzalloc(sizeof(*intf), GFP_KERNEL); + if (!intf) { + dev_err(dev->dev, "failed to construct INTF%d\n", i); + return -ENOMEM; + } + + intf->num = i; + intf->type = intf_types[i]; + intf->mode = MDP5_INTF_MODE_NONE; + intf->idx = mdp5_kms->num_intfs; + mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf; + } + + return 0; +} + static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; @@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) if (ret) goto fail; + ret = hwmixer_init(mdp5_kms); + if (ret) + goto fail; + + ret = interface_init(mdp5_kms); + if (ret) + goto fail; + /* set uninit-ed kms */ priv->kms = &mdp5_kms->base.base; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 9de471191eba..8bdb7ee4983b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -23,8 +23,9 @@ #include "mdp/mdp_kms.h" #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" -#include "mdp5_ctl.h" #include "mdp5_pipe.h" +#include "mdp5_mixer.h" +#include "mdp5_ctl.h" #include "mdp5_smp.h" struct mdp5_state; @@ -39,6 +40,12 @@ struct mdp5_kms { unsigned num_hwpipes; struct mdp5_hw_pipe *hwpipes[SSPP_MAX]; + unsigned num_hwmixers; + struct mdp5_hw_mixer *hwmixers[8]; + + unsigned num_intfs; + struct mdp5_interface *intfs[5]; + struct mdp5_cfg_handler *cfg; uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ @@ -83,6 +90,7 @@ struct mdp5_kms { */ struct mdp5_state { struct mdp5_hw_pipe_state hwpipe; + struct mdp5_hw_mixer_state hwmixer; struct mdp5_smp_state smp; }; @@ -96,6 +104,7 @@ struct mdp5_plane_state { struct drm_plane_state base; struct mdp5_hw_pipe *hwpipe; + struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */ /* aligned with property */ uint8_t premultiplied; @@ -108,6 +117,28 @@ struct mdp5_plane_state { #define to_mdp5_plane_state(x) \ container_of(x, struct mdp5_plane_state, base) +struct mdp5_pipeline { + struct mdp5_interface *intf; + struct mdp5_hw_mixer *mixer; + struct mdp5_hw_mixer *r_mixer; /* right mixer */ +}; + +struct mdp5_crtc_state { + struct drm_crtc_state base; + + struct mdp5_ctl *ctl; + struct mdp5_pipeline pipeline; + + /* these are derivatives of intf/mixer state in mdp5_pipeline */ + u32 vblank_irqmask; + u32 err_irqmask; + u32 pp_done_irqmask; + + bool cmd_mode; +}; +#define to_mdp5_crtc_state(x) \ + container_of(x, struct mdp5_crtc_state, base) + enum mdp5_intf_mode { MDP5_INTF_MODE_NONE = 0, @@ -121,6 +152,7 @@ enum mdp5_intf_mode { }; struct mdp5_interface { + int idx; int num; /* display interface number */ enum mdp5_intf_type type; enum mdp5_intf_mode mode; @@ -128,11 +160,11 @@ struct mdp5_interface { struct mdp5_encoder { struct drm_encoder base; - struct mdp5_interface intf; spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ bool enabled; uint32_t bsc; + struct mdp5_interface *intf; struct mdp5_ctl *ctl; }; #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) @@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num) } } -#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer) -static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) +static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer, + struct mdp5_interface *intf) { /* * In case of DSI Command Mode, the Ping Pong's read pointer IRQ @@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) if ((intf->type == INTF_DSI) && (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) - return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm); + return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp; if (intf->type == INTF_WB) return MDP5_IRQ_WB_2_DONE; @@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf) } } -static inline uint32_t lm2ppdone(int lm) +static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer) { - return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm); + return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; } int mdp5_disable(struct mdp5_kms *mdp5_kms); @@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum drm_plane_type type); struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); -int mdp5_crtc_get_lm(struct drm_crtc *crtc); -void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, - struct mdp5_interface *intf, struct mdp5_ctl *ctl); +struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc); +struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc); +void mdp5_crtc_set_pipeline(struct drm_crtc *crtc); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, struct drm_plane *plane, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c new file mode 100644 index 000000000000..8a00991f03c7 --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "mdp5_kms.h" + +/* + * As of now, there are only 2 combinations possible for source split: + * + * Left | Right + * -----|------ + * LM0 | LM1 + * LM2 | LM5 + * + */ +static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 }; + +static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm) +{ + int i; + int pair_lm; + + pair_lm = lm_right_pair[lm]; + if (pair_lm < 0) + return -EINVAL; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i]; + + if (mixer->lm == pair_lm) + return mixer->idx; + } + + return -1; +} + +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_mixer_state *new_state; + int i; + + if (IS_ERR(state)) + return PTR_ERR(state); + + new_state = &state->hwmixer; + + for (i = 0; i < mdp5_kms->num_hwmixers; i++) { + struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; + + /* + * skip if already in-use by a different CRTC. If there is a + * mixer already assigned to this CRTC, it means this call is + * a request to get an additional right mixer. Assume that the + * existing mixer is the 'left' one, and try to see if we can + * get its corresponding 'right' pair. + */ + if (new_state->hwmixer_to_crtc[cur->idx] && + new_state->hwmixer_to_crtc[cur->idx] != crtc) + continue; + + /* skip if doesn't support some required caps: */ + if (caps & ~cur->caps) + continue; + + if (r_mixer) { + int pair_idx; + + pair_idx = get_right_pair_idx(mdp5_kms, cur->lm); + if (pair_idx < 0) + return -EINVAL; + + if (new_state->hwmixer_to_crtc[pair_idx]) + continue; + + *r_mixer = mdp5_kms->hwmixers[pair_idx]; + } + + /* + * prefer a pair-able LM over an unpairable one. We can + * switch the CRTC from Normal mode to Source Split mode + * without requiring a full modeset if we had already + * assigned this CRTC a pair-able LM. + * + * TODO: There will be assignment sequences which would + * result in the CRTC requiring a full modeset, even + * if we have the LM resources to prevent it. For a platform + * with a few displays, we don't run out of pair-able LMs + * so easily. For now, ignore the possibility of requiring + * a full modeset. + */ + if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR) + *mixer = cur; + } + + if (!(*mixer)) + return -ENOMEM; + + if (r_mixer && !(*r_mixer)) + return -ENOMEM; + + DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name); + + new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc; + if (r_mixer) { + DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm, + crtc->name); + new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc; + } + + return 0; +} + +void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) +{ + struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_hw_mixer_state *new_state = &state->hwmixer; + + if (!mixer) + return; + + if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx])) + return; + + DBG("%s: release from crtc %s", mixer->name, + new_state->hwmixer_to_crtc[mixer->idx]->name); + + new_state->hwmixer_to_crtc[mixer->idx] = NULL; +} + +void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer) +{ + kfree(mixer); +} + +static const char * const mixer_names[] = { + "LM0", "LM1", "LM2", "LM3", "LM4", "LM5", +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm) +{ + struct mdp5_hw_mixer *mixer; + + mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); + if (!mixer) + return ERR_PTR(-ENOMEM); + + mixer->name = mixer_names[lm->id]; + mixer->lm = lm->id; + mixer->caps = lm->caps; + mixer->pp = lm->pp; + mixer->dspp = lm->dspp; + mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id); + + return mixer; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h new file mode 100644 index 000000000000..9be94f567fbd --- /dev/null +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MDP5_LM_H__ +#define __MDP5_LM_H__ + +/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */ +struct mdp5_hw_mixer { + int idx; + + const char *name; + + int lm; /* the LM instance # */ + uint32_t caps; + int pp; + int dspp; + + uint32_t flush_mask; /* used to commit LM registers */ +}; + +/* global atomic state of assignment between CRTCs and Layer Mixers: */ +struct mdp5_hw_mixer_state { + struct drm_crtc *hwmixer_to_crtc[8]; +}; + +struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm); +void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm); +int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, + uint32_t caps, struct mdp5_hw_mixer **mixer, + struct mdp5_hw_mixer **r_mixer); +void mdp5_mixer_release(struct drm_atomic_state *s, + struct mdp5_hw_mixer *mixer); + +#endif /* __MDP5_LM_H__ */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c index 35c4dabb0c0c..2bfac3712685 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c @@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe, hwpipe->caps = caps; hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe); - spin_lock_init(&hwpipe->pipe_lock); - return hwpipe; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h index 611da7a660c9..924c3e6f9517 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h @@ -18,7 +18,8 @@ #ifndef __MDP5_PIPE_H__ #define __MDP5_PIPE_H__ -#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */ +/* TODO: Add SSPP_MAX in mdp5.xml.h */ +#define SSPP_MAX (SSPP_CURSOR1 + 1) /* represents a hw pipe, which is dynamically assigned to a plane */ struct mdp5_hw_pipe { @@ -27,7 +28,6 @@ struct mdp5_hw_pipe { const char *name; enum mdp5_pipe pipe; - spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ uint32_t reg_offset; uint32_t caps; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 60a5451ae0b9..a38c5fe6cc19 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -22,6 +22,8 @@ struct mdp5_plane { struct drm_plane base; + spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */ + uint32_t nformats; uint32_t formats[32]; }; @@ -40,9 +42,6 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx); -static void set_scanout_locked(struct drm_plane *plane, - struct drm_framebuffer *fb); - static struct mdp5_kms *get_kms(struct drm_plane *plane) { struct msm_drm_private *priv = plane->dev->dev_private; @@ -178,9 +177,14 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, const struct drm_plane_state *state) { struct mdp5_plane_state *pstate = to_mdp5_plane_state(state); + struct mdp5_kms *mdp5_kms = get_kms(state->plane); drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ? pstate->hwpipe->name : "(null)"); + if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) + drm_printf(p, "\tright-hwpipe=%s\n", + pstate->r_hwpipe ? pstate->r_hwpipe->name : + "(null)"); drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied); drm_printf(p, "\tzpos=%u\n", pstate->zpos); drm_printf(p, "\talpha=%u\n", pstate->alpha); @@ -300,7 +304,9 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, struct drm_plane_state *old_state = plane->state; struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); bool new_hwpipe = false; + bool need_right_hwpipe = false; uint32_t max_width, max_height; + bool out_of_bounds = false; uint32_t caps = 0; struct drm_rect clip; int min_scale, max_scale; @@ -313,7 +319,23 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, max_height = config->hw->lm.max_height << 16; /* Make sure source dimensions are within bounds. */ - if ((state->src_w > max_width) || (state->src_h > max_height)) { + if (state->src_h > max_height) + out_of_bounds = true; + + if (state->src_w > max_width) { + /* If source split is supported, we can go up to 2x + * the max LM width, but we'd need to stage another + * hwpipe to the right LM. So, the drm_plane would + * consist of 2 hwpipes. + */ + if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT && + (state->src_w <= 2 * max_width)) + need_right_hwpipe = true; + else + out_of_bounds = true; + } + + if (out_of_bounds) { struct drm_rect src = drm_plane_state_src(state); DBG("Invalid source size "DRM_RECT_FP_FMT, DRM_RECT_FP_ARG(&src)); @@ -364,6 +386,15 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) new_hwpipe = true; + /* + * (re)allocte hw pipe if we're either requesting for 2 hw pipes + * or we're switching from 2 hw pipes to 1 hw pipe because the + * new src_w can be supported by 1 hw pipe itself. + */ + if ((need_right_hwpipe && !mdp5_state->r_hwpipe) || + (!need_right_hwpipe && mdp5_state->r_hwpipe)) + new_hwpipe = true; + if (mdp5_kms->smp) { const struct mdp_format *format = to_mdp_format(msm_framebuffer_format(state->fb)); @@ -382,13 +413,36 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, * it available for other planes? */ struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe; + struct mdp5_hw_pipe *old_right_hwpipe = + mdp5_state->r_hwpipe; + mdp5_state->hwpipe = mdp5_pipe_assign(state->state, plane, caps, blkcfg); if (IS_ERR(mdp5_state->hwpipe)) { DBG("%s: failed to assign hwpipe!", plane->name); return PTR_ERR(mdp5_state->hwpipe); } + + if (need_right_hwpipe) { + mdp5_state->r_hwpipe = + mdp5_pipe_assign(state->state, plane, + caps, blkcfg); + if (IS_ERR(mdp5_state->r_hwpipe)) { + DBG("%s: failed to assign right hwpipe", + plane->name); + return PTR_ERR(mdp5_state->r_hwpipe); + } + } else { + /* + * set it to NULL so that the driver knows we + * don't have a right hwpipe when committing a + * new state + */ + mdp5_state->r_hwpipe = NULL; + } + mdp5_pipe_release(state->state, old_hwpipe); + mdp5_pipe_release(state->state, old_right_hwpipe); } } @@ -437,13 +491,10 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = { .atomic_update = mdp5_plane_atomic_update, }; -static void set_scanout_locked(struct drm_plane *plane, - struct drm_framebuffer *fb) +static void set_scanout_locked(struct mdp5_kms *mdp5_kms, + enum mdp5_pipe pipe, + struct drm_framebuffer *fb) { - struct mdp5_kms *mdp5_kms = get_kms(plane); - struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe; - enum mdp5_pipe pipe = hwpipe->pipe; - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); @@ -460,8 +511,6 @@ static void set_scanout_locked(struct drm_plane *plane, msm_framebuffer_iova(fb, mdp5_kms->id, 2)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), msm_framebuffer_iova(fb, mdp5_kms->id, 3)); - - plane->fb = fb; } /* Note: mdp5_plane->pipe_lock must be locked */ @@ -714,21 +763,129 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, } } +struct pixel_ext { + int left[COMP_MAX]; + int right[COMP_MAX]; + int top[COMP_MAX]; + int bottom[COMP_MAX]; +}; + +struct phase_step { + u32 x[COMP_MAX]; + u32 y[COMP_MAX]; +}; + +static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms, + struct mdp5_hw_pipe *hwpipe, + struct drm_framebuffer *fb, + struct phase_step *step, + struct pixel_ext *pe, + u32 scale_config, u32 hdecm, u32 vdecm, + bool hflip, bool vflip, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + u32 src_img_w, u32 src_img_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h) +{ + enum mdp5_pipe pipe = hwpipe->pipe; + bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; + const struct mdp_format *format = + to_mdp_format(msm_framebuffer_format(fb)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), + MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) | + MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), + MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | + MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), + MDP5_PIPE_SRC_XY_X(src_x) | + MDP5_PIPE_SRC_XY_Y(src_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), + MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | + MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), + MDP5_PIPE_OUT_XY_X(crtc_x) | + MDP5_PIPE_OUT_XY_Y(crtc_y)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), + MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | + MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | + MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | + MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | + COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | + MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | + MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | + COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | + MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | + MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), + MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | + MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | + MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | + MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); + + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), + (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | + (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | + COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | + MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); + + /* not using secure mode: */ + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); + + if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) + mdp5_write_pixel_ext(mdp5_kms, pipe, format, + src_w, pe->left, pe->right, + src_h, pe->top, pe->bottom); + + if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), + step->x[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), + step->y[COMP_0]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), + step->x[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), + step->y[COMP_1_2]); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), + MDP5_PIPE_DECIMATION_VERT(vdecm) | + MDP5_PIPE_DECIMATION_HORZ(hdecm)); + mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), + scale_config); + } + + if (hwpipe->caps & MDP_PIPE_CAP_CSC) { + if (MDP_FORMAT_IS_YUV(format)) + csc_enable(mdp5_kms, pipe, + mdp_get_default_csc_cfg(CSC_YUV2RGB)); + else + csc_disable(mdp5_kms, pipe); + } + + set_scanout_locked(mdp5_kms, pipe, fb); +} static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_rect *src, struct drm_rect *dest) { + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); struct drm_plane_state *pstate = plane->state; struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; struct mdp5_kms *mdp5_kms = get_kms(plane); enum mdp5_pipe pipe = hwpipe->pipe; + struct mdp5_hw_pipe *right_hwpipe; const struct mdp_format *format; uint32_t nplanes, config = 0; - uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,}; - bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT; - int pe_left[COMP_MAX], pe_right[COMP_MAX]; - int pe_top[COMP_MAX], pe_bottom[COMP_MAX]; + struct phase_step step = { 0 }; + struct pixel_ext pe = { 0 }; uint32_t hdecm = 0, vdecm = 0; uint32_t pix_format; unsigned int rotation; @@ -737,6 +894,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, unsigned int crtc_w, crtc_h; uint32_t src_x, src_y; uint32_t src_w, src_h; + uint32_t src_img_w, src_img_h; + uint32_t src_x_r; + int crtc_x_r; unsigned long flags; int ret; @@ -765,23 +925,41 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, src_w = src_w >> 16; src_h = src_h >> 16; + src_img_w = min(fb->width, src_w); + src_img_h = min(fb->height, src_h); + DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name, fb->base.id, src_x, src_y, src_w, src_h, crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); - ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step); + right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe; + if (right_hwpipe) { + /* + * if the plane comprises of 2 hw pipes, assume that the width + * is split equally across them. The only parameters that varies + * between the 2 pipes are src_x and crtc_x + */ + crtc_w /= 2; + src_w /= 2; + src_img_w /= 2; + + crtc_x_r = crtc_x + crtc_w; + src_x_r = src_x + src_w; + } + + ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x); if (ret) return ret; - ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step); + ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y); if (ret) return ret; if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) { - calc_pixel_ext(format, src_w, crtc_w, phasex_step, - pe_left, pe_right, true); - calc_pixel_ext(format, src_h, crtc_h, phasey_step, - pe_top, pe_bottom, false); + calc_pixel_ext(format, src_w, crtc_w, step.x, + pe.left, pe.right, true); + calc_pixel_ext(format, src_h, crtc_h, step.y, + pe.top, pe.bottom, false); } /* TODO calc hdecm, vdecm */ @@ -798,86 +976,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, hflip = !!(rotation & DRM_REFLECT_X); vflip = !!(rotation & DRM_REFLECT_Y); - spin_lock_irqsave(&hwpipe->pipe_lock, flags); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), - MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) | - MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h))); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe), - MDP5_PIPE_SRC_SIZE_WIDTH(src_w) | - MDP5_PIPE_SRC_SIZE_HEIGHT(src_h)); + spin_lock_irqsave(&mdp5_plane->pipe_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe), - MDP5_PIPE_SRC_XY_X(src_x) | - MDP5_PIPE_SRC_XY_Y(src_y)); + mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x, src_y, src_w, src_h); + if (right_hwpipe) + mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe, + config, hdecm, vdecm, hflip, vflip, + crtc_x_r, crtc_y, crtc_w, crtc_h, + src_img_w, src_img_h, + src_x_r, src_y, src_w, src_h); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe), - MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) | - MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h)); + spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe), - MDP5_PIPE_OUT_XY_X(crtc_x) | - MDP5_PIPE_OUT_XY_Y(crtc_y)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), - MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) | - MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) | - MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) | - MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) | - COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) | - MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) | - MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | - COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) | - MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) | - MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample)); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe), - MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) | - MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) | - MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) | - MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3])); - - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe), - (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) | - (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) | - COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) | - MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS)); - - /* not using secure mode: */ - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0); - - if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) - mdp5_write_pixel_ext(mdp5_kms, pipe, format, - src_w, pe_left, pe_right, - src_h, pe_top, pe_bottom); - - if (hwpipe->caps & MDP_PIPE_CAP_SCALE) { - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe), - phasex_step[COMP_0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe), - phasey_step[COMP_0]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe), - phasex_step[COMP_1_2]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe), - phasey_step[COMP_1_2]); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe), - MDP5_PIPE_DECIMATION_VERT(vdecm) | - MDP5_PIPE_DECIMATION_HORZ(hdecm)); - mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config); - } - - if (hwpipe->caps & MDP_PIPE_CAP_CSC) { - if (MDP_FORMAT_IS_YUV(format)) - csc_enable(mdp5_kms, pipe, - mdp_get_default_csc_cfg(CSC_YUV2RGB)); - else - csc_disable(mdp5_kms, pipe); - } - - set_scanout_locked(plane, fb); - - spin_unlock_irqrestore(&hwpipe->pipe_lock, flags); + plane->fb = fb; return ret; } @@ -934,6 +1049,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, if (new_plane_state->visible) { struct mdp5_ctl *ctl; + struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc); ret = mdp5_plane_mode_set(plane, crtc, fb, &new_plane_state->src, @@ -942,7 +1058,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, ctl = mdp5_crtc_get_ctl(crtc); - mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); } *to_mdp5_plane_state(plane_state) = @@ -959,6 +1075,10 @@ slow: src_x, src_y, src_w, src_h, ctx); } +/* + * Use this func and the one below only after the atomic state has been + * successfully swapped + */ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) { struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); @@ -969,14 +1089,30 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) return pstate->hwpipe->pipe; } +enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane) +{ + struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + + if (!pstate->r_hwpipe) + return SSPP_NONE; + + return pstate->r_hwpipe->pipe; +} + uint32_t mdp5_plane_get_flush(struct drm_plane *plane) { struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); + u32 mask; if (WARN_ON(!pstate->hwpipe)) return 0; - return pstate->hwpipe->flush_mask; + mask = pstate->hwpipe->flush_mask; + + if (pstate->r_hwpipe) + mask |= pstate->r_hwpipe->flush_mask; + + return mask; } /* initialize plane */ @@ -998,6 +1134,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, ARRAY_SIZE(mdp5_plane->formats), false); + spin_lock_init(&mdp5_plane->pipe_lock); + if (type == DRM_PLANE_TYPE_CURSOR) ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_cursor_plane_funcs, diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 7574cdfef418..1185487e7e5e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); #define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */ #define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */ #define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */ +#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */ /* MDP pipe capabilities */ #define MDP_PIPE_CAP_HFLIP BIT(0) @@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) #define MDP_PIPE_CAP_CURSOR BIT(6) +/* MDP layer mixer caps */ +#define MDP_LM_CAP_DISPLAY BIT(0) +#define MDP_LM_CAP_WB BIT(1) +#define MDP_LM_CAP_PAIR BIT(2) + static inline bool pipe_supports_yuv(uint32_t pipe_caps) { return (pipe_caps & MDP_PIPE_CAP_SCALE) && diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 4f35d4eb85d0..1855182c76ce 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m) if (gpu) { seq_printf(m, "%s Status:\n", gpu->name); + pm_runtime_get_sync(&gpu->pdev->dev); gpu->funcs->show(gpu, m); + pm_runtime_put_sync(&gpu->pdev->dev); } return 0; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9208e67be453..87b5695d4034 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev, struct msm_gem_address_space *aspace) { struct msm_drm_private *priv = dev->dev_private; - int idx = priv->num_aspaces++; - if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) + if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace))) return -EINVAL; - priv->aspace[idx] = aspace; + priv->aspace[priv->num_aspaces] = aspace; - return idx; + return priv->num_aspaces++; } #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING @@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev) if (gpu) { mutex_lock(&ddev->struct_mutex); + // XXX what do we do here? + //pm_runtime_enable(&pdev->dev); gpu->funcs->pm_suspend(gpu); mutex_unlock(&ddev->struct_mutex); gpu->funcs->destroy(gpu); @@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) return 0; } -static void msm_preclose(struct drm_device *dev, struct drm_file *file) +static void msm_postclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv; @@ -812,7 +813,7 @@ static struct drm_driver msm_driver = { DRIVER_ATOMIC | DRIVER_MODESET, .open = msm_open, - .preclose = msm_preclose, + .postclose = msm_postclose, .lastclose = msm_lastclose, .irq_handler = msm_irq, .irq_preinstall = msm_irq_preinstall, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b885c3d5ae4d..28b6f9ba5066 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, int msm_gem_map_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt, int npages); -void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); +void msm_gem_address_space_put(struct msm_gem_address_space *aspace); + struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 59811f29607d..68e509b3b9e4 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, size = PAGE_ALIGN(size); + /* Disallow zero sized objects as they make the underlying + * infrastructure grumpy + */ + if (size == 0) + return ERR_PTR(-EINVAL); + ret = msm_gem_new_impl(dev, size, flags, NULL, &obj); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 7d529516b332..1b4cf20043ea 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -18,6 +18,7 @@ #ifndef __MSM_GEM_H__ #define __MSM_GEM_H__ +#include <linux/kref.h> #include <linux/reservation.h> #include "msm_drv.h" @@ -31,6 +32,7 @@ struct msm_gem_address_space { */ struct drm_mm mm; struct msm_mmu *mmu; + struct kref kref; }; struct msm_gem_vma { diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 1172fe7a9252..1c545ebe6a5a 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) return -EINVAL; + if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { + in_fence = sync_file_get_fence(args->fence_fd); + + if (!in_fence) + return -EINVAL; + + /* TODO if we get an array-fence due to userspace merging multiple + * fences, we need a way to determine if all the backing fences + * are from our own context.. + */ + + if (in_fence->context != gpu->fctx->context) { + ret = dma_fence_wait(in_fence, true); + if (ret) + return ret; + } + } + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; @@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { - in_fence = sync_file_get_fence(args->fence_fd); - - if (!in_fence) { - ret = -EINVAL; - goto out; - } - - /* TODO if we get an array-fence due to userspace merging multiple - * fences, we need a way to determine if all the backing fences - * are from our own context.. - */ - - if (in_fence->context != gpu->fctx->context) { - ret = dma_fence_wait(in_fence, true); - if (ret) - goto out; - } - - } - if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit); if (ret) diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index b654eca7636a..f285d7e210db 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -19,6 +19,25 @@ #include "msm_gem.h" #include "msm_mmu.h" +static void +msm_gem_address_space_destroy(struct kref *kref) +{ + struct msm_gem_address_space *aspace = container_of(kref, + struct msm_gem_address_space, kref); + + drm_mm_takedown(&aspace->mm); + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); + kfree(aspace); +} + + +void msm_gem_address_space_put(struct msm_gem_address_space *aspace) +{ + if (aspace) + kref_put(&aspace->kref, msm_gem_address_space_destroy); +} + void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt) @@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, drm_mm_remove_node(&vma->node); vma->iova = 0; + + msm_gem_address_space_put(aspace); } int @@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, size, IOMMU_READ | IOMMU_WRITE); } - return ret; -} + /* Get a reference to the aspace to keep it around */ + kref_get(&aspace->kref); -void -msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) -{ - drm_mm_takedown(&aspace->mm); - if (aspace->mmu) - aspace->mmu->funcs->destroy(aspace->mmu); - kfree(aspace); + return ret; } struct msm_gem_address_space * @@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + kref_init(&aspace->kref); + return aspace; } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 99e05aacbee1..97b9c38c6b3f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu) { int i; - if (gpu->grp_clks[0] && gpu->fast_rate) - clk_set_rate(gpu->grp_clks[0], gpu->fast_rate); + if (gpu->core_clk && gpu->fast_rate) + clk_set_rate(gpu->core_clk, gpu->fast_rate); /* Set the RBBM timer rate to 19.2Mhz */ - if (gpu->grp_clks[2]) - clk_set_rate(gpu->grp_clks[2], 19200000); + if (gpu->rbbmtimer_clk) + clk_set_rate(gpu->rbbmtimer_clk, 19200000); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + for (i = gpu->nr_clocks - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_prepare(gpu->grp_clks[i]); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + for (i = gpu->nr_clocks - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_enable(gpu->grp_clks[i]); @@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu) { int i; - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + for (i = gpu->nr_clocks - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_disable(gpu->grp_clks[i]); - for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--) + for (i = gpu->nr_clocks - 1; i >= 0; i--) if (gpu->grp_clks[i]) clk_unprepare(gpu->grp_clks[i]); - if (gpu->grp_clks[0] && gpu->slow_rate) - clk_set_rate(gpu->grp_clks[0], gpu->slow_rate); + /* + * Set the clock to a deliberately low rate. On older targets the clock + * speed had to be non zero to avoid problems. On newer targets this + * will be rounded down to zero anyway so it all works out. + */ + if (gpu->core_clk) + clk_set_rate(gpu->core_clk, 27000000); - if (gpu->grp_clks[2]) - clk_set_rate(gpu->grp_clks[2], 0); + if (gpu->rbbmtimer_clk) + clk_set_rate(gpu->rbbmtimer_clk, 0); return 0; } @@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu) int msm_gpu_pm_resume(struct msm_gpu *gpu) { - struct drm_device *dev = gpu->dev; int ret; - DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (gpu->active_cnt++ > 0) - return 0; - - if (WARN_ON(gpu->active_cnt <= 0)) - return -EINVAL; + DBG("%s", gpu->name); ret = enable_pwrrail(gpu); if (ret) @@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) if (ret) return ret; + gpu->needs_hw_init = true; + return 0; } int msm_gpu_pm_suspend(struct msm_gpu *gpu) { - struct drm_device *dev = gpu->dev; int ret; - DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt); - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - if (--gpu->active_cnt > 0) - return 0; - - if (WARN_ON(gpu->active_cnt < 0)) - return -EINVAL; + DBG("%s", gpu->name); ret = disable_axi(gpu); if (ret) @@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) return 0; } -/* - * Inactivity detection (for suspend): - */ - -static void inactive_worker(struct work_struct *work) +int msm_gpu_hw_init(struct msm_gpu *gpu) { - struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work); - struct drm_device *dev = gpu->dev; - - if (gpu->inactive) - return; - - DBG("%s: inactive!\n", gpu->name); - mutex_lock(&dev->struct_mutex); - if (!(msm_gpu_active(gpu) || gpu->inactive)) { - disable_axi(gpu); - disable_clk(gpu); - gpu->inactive = true; - } - mutex_unlock(&dev->struct_mutex); -} + int ret; -static void inactive_handler(unsigned long data) -{ - struct msm_gpu *gpu = (struct msm_gpu *)data; - struct msm_drm_private *priv = gpu->dev->dev_private; + if (!gpu->needs_hw_init) + return 0; - queue_work(priv->wq, &gpu->inactive_work); -} + disable_irq(gpu->irq); + ret = gpu->funcs->hw_init(gpu); + if (!ret) + gpu->needs_hw_init = false; + enable_irq(gpu->irq); -/* cancel inactive timer and make sure we are awake: */ -static void inactive_cancel(struct msm_gpu *gpu) -{ - DBG("%s", gpu->name); - del_timer(&gpu->inactive_timer); - if (gpu->inactive) { - enable_clk(gpu); - enable_axi(gpu); - gpu->inactive = false; - } -} - -static void inactive_start(struct msm_gpu *gpu) -{ - DBG("%s", gpu->name); - mod_timer(&gpu->inactive_timer, - round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES)); + return ret; } /* @@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work) /* retire completed submits, plus the one that hung: */ retire_submits(gpu); - inactive_cancel(gpu); + pm_runtime_get_sync(&gpu->pdev->dev); gpu->funcs->recover(gpu); + pm_runtime_put_sync(&gpu->pdev->dev); /* replay the remaining submits after the one that hung: */ list_for_each_entry(submit, &gpu->submit_list, node) { @@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu) { unsigned long flags; + pm_runtime_get_sync(&gpu->pdev->dev); + spin_lock_irqsave(&gpu->perf_lock, flags); /* we could dynamically enable/disable perfcntr registers too.. */ gpu->last_sample.active = msm_gpu_active(gpu); @@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu) void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) { gpu->perfcntr_active = false; + pm_runtime_put_sync(&gpu->pdev->dev); } /* returns -errno or # of cntrs sampled */ @@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) drm_gem_object_unreference(&msm_obj->base); } + pm_runtime_mark_last_busy(&gpu->pdev->dev); + pm_runtime_put_autosuspend(&gpu->pdev->dev); msm_gem_submit_free(submit); } @@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work) mutex_lock(&dev->struct_mutex); retire_submits(gpu); mutex_unlock(&dev->struct_mutex); - - if (!msm_gpu_active(gpu)) - inactive_start(gpu); } /* call from irq handler to schedule work to retire bo's */ @@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - inactive_cancel(gpu); + pm_runtime_get_sync(&gpu->pdev->dev); + + msm_gpu_hw_init(gpu); list_add_tail(&submit->node, &gpu->submit_list); @@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data) return gpu->funcs->irq(gpu); } -static const char *clk_names[] = { - "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", -}; +static struct clk *get_clock(struct device *dev, const char *name) +{ + struct clk *clk = devm_clk_get(dev, name); + + return IS_ERR(clk) ? NULL : clk; +} + +static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) +{ + struct device *dev = &pdev->dev; + struct property *prop; + const char *name; + int i = 0; + + gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names"); + if (gpu->nr_clocks < 1) { + gpu->nr_clocks = 0; + return 0; + } + + gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks, + GFP_KERNEL); + if (!gpu->grp_clks) + return -ENOMEM; + + of_property_for_each_string(dev->of_node, "clock-names", prop, name) { + gpu->grp_clks[i] = get_clock(dev, name); + + /* Remember the key clocks that we need to control later */ + if (!strcmp(name, "core")) + gpu->core_clk = gpu->grp_clks[i]; + else if (!strcmp(name, "rbbmtimer")) + gpu->rbbmtimer_clk = gpu->grp_clks[i]; + + ++i; + } + + return 0; +} int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, const char *ioname, const char *irqname, int ringsz) { struct iommu_domain *iommu; - int i, ret; + int ret; if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); @@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; - gpu->inactive = true; gpu->fctx = msm_fence_context_alloc(drm, name); if (IS_ERR(gpu->fctx)) { ret = PTR_ERR(gpu->fctx); @@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); - INIT_WORK(&gpu->inactive_work, inactive_worker); INIT_WORK(&gpu->recover_work, recover_worker); INIT_LIST_HEAD(&gpu->submit_list); - setup_timer(&gpu->inactive_timer, inactive_handler, - (unsigned long)gpu); setup_timer(&gpu->hangcheck_timer, hangcheck_handler, (unsigned long)gpu); spin_lock_init(&gpu->perf_lock); - BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); /* Map registers: */ gpu->mmio = msm_ioremap(pdev, ioname, name); @@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } - /* Acquire clocks: */ - for (i = 0; i < ARRAY_SIZE(clk_names); i++) { - gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); - DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); - if (IS_ERR(gpu->grp_clks[i])) - gpu->grp_clks[i] = NULL; - } + ret = get_clocks(pdev, gpu); + if (ret) + goto fail; gpu->ebi1_clk = msm_clk_get(pdev, "bus"); DBG("ebi1_clk: %p", gpu->ebi1_clk); @@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; } + gpu->pdev = pdev; + platform_set_drvdata(pdev, gpu); + bs_init(gpu); return 0; @@ -706,9 +697,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->aspace) - msm_gem_address_space_destroy(gpu->aspace); - if (gpu->fctx) msm_fence_context_free(gpu->fctx); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index c4c39d3272c7..aa3241000455 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -64,6 +64,7 @@ struct msm_gpu_funcs { struct msm_gpu { const char *name; struct drm_device *dev; + struct platform_device *pdev; const struct msm_gpu_funcs *funcs; /* performance counters (hw & sw): */ @@ -88,9 +89,8 @@ struct msm_gpu { /* fencing: */ struct msm_fence_context *fctx; - /* is gpu powered/active? */ - int active_cnt; - bool inactive; + /* does gpu need hw_init? */ + bool needs_hw_init; /* worker for handling active-list retiring: */ struct work_struct retire_work; @@ -103,8 +103,10 @@ struct msm_gpu { /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; - struct clk *ebi1_clk, *grp_clks[6]; - uint32_t fast_rate, slow_rate, bus_freq; + struct clk **grp_clks; + int nr_clocks; + struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; + uint32_t fast_rate, bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; @@ -114,9 +116,7 @@ struct msm_gpu { /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ -#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD) - struct timer_list inactive_timer; - struct work_struct inactive_work; + #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; @@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val) int msm_gpu_pm_suspend(struct msm_gpu *gpu); int msm_gpu_pm_resume(struct msm_gpu *gpu); +int msm_gpu_hw_init(struct msm_gpu *gpu); + void msm_gpu_perfcntr_start(struct msm_gpu *gpu); void msm_gpu_perfcntr_stop(struct msm_gpu *gpu); int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 7f5779daf5c8..b23d33622f37 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); - return iommu_attach_device(iommu->domain, mmu->dev); + int ret; + + pm_runtime_get_sync(mmu->dev); + ret = iommu_attach_device(iommu->domain, mmu->dev); + pm_runtime_put_sync(mmu->dev); + + return ret; } static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); + + pm_runtime_get_sync(mmu->dev); iommu_detach_device(iommu->domain, mmu->dev); + pm_runtime_put_sync(mmu->dev); } static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len, int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); - struct iommu_domain *domain = iommu->domain; - struct scatterlist *sg; - unsigned long da = iova; - unsigned int i, j; - int ret; - - if (!domain || !sgt) - return -EINVAL; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - dma_addr_t pa = sg_phys(sg) - sg->offset; - size_t bytes = sg->length + sg->offset; - - VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes); + size_t ret; - ret = iommu_map(domain, da, pa, bytes, prot); - if (ret) - goto fail; +// pm_runtime_get_sync(mmu->dev); + ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); +// pm_runtime_put_sync(mmu->dev); + WARN_ON(ret < 0); - da += bytes; - } - - return 0; - -fail: - da = iova; - - for_each_sg(sgt->sgl, sg, i, j) { - size_t bytes = sg->length + sg->offset; - iommu_unmap(domain, da, bytes); - da += bytes; - } - return ret; + return (ret == len) ? 0 : -EINVAL; } static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, unsigned len) { struct msm_iommu *iommu = to_msm_iommu(mmu); - struct iommu_domain *domain = iommu->domain; - struct scatterlist *sg; - unsigned long da = iova; - int i; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) { - size_t bytes = sg->length + sg->offset; - size_t unmapped; - - unmapped = iommu_unmap(domain, da, bytes); - if (unmapped < bytes) - return unmapped; - - VERB("unmap[%d]: %08lx(%zx)", i, da, bytes); - - BUG_ON(!PAGE_ALIGNED(bytes)); - da += bytes; - } + pm_runtime_get_sync(mmu->dev); + iommu_unmap(iommu->domain, iova, len); + pm_runtime_put_sync(mmu->dev); return 0; } diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 3df7322fd74e..0e81faab2c50 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) } for (i = 0; i < submit->nr_cmds; i++) { - uint32_t iova = submit->cmd[i].iova; + uint64_t iova = submit->cmd[i].iova; uint32_t szd = submit->cmd[i].size; /* in dwords */ /* snapshot cmdstream bo's (if we haven't already): */ @@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: case MSM_SUBMIT_CMD_BUF: rd_write_section(rd, RD_CMDSTREAM_ADDR, - (uint32_t[2]){ iova, szd }, 8); + (uint32_t[3]){ iova, szd, iova >> 32 }, 12); break; } } diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h index fac0824197f1..bf3e532665fb 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h @@ -22,17 +22,14 @@ #ifndef __NVKM_CORE_MSGQUEUE_H #define __NVKM_CORE_MSGQUEUE_H - -#include <core/os.h> - -struct nvkm_falcon; +#include <subdev/secboot.h> struct nvkm_msgqueue; -enum nvkm_secboot_falcon; /* Hopefully we will never have firmware arguments larger than that... */ #define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100 -int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **); +int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *, + struct nvkm_msgqueue **); void nvkm_msgqueue_del(struct nvkm_msgqueue **); void nvkm_msgqueue_recv(struct nvkm_msgqueue *); int nvkm_msgqueue_reinit(struct nvkm_msgqueue *); @@ -41,7 +38,6 @@ int nvkm_msgqueue_reinit(struct nvkm_msgqueue *); void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *); /* interface to ACR unit running on falcon (NVIDIA signed firmware) */ -int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *, - enum nvkm_secboot_falcon); +int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index e5c9b6268dcc..7c7d91cad09a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -42,6 +42,10 @@ struct nvkm_device_tegra_func { * Whether the chip requires a reference clock */ bool require_ref_clk; + /* + * Whether the chip requires the VDD regulator + */ + bool require_vdd; }; int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *, diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 24efa900d8ca..f00527b36acc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -68,4 +68,5 @@ int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); +int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 0a636833e0eb..c7944b19bed8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -44,4 +44,6 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 891497a0fe3b..28d513fbf44c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -97,6 +97,7 @@ int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **); #include <subdev/bios.h> #include <subdev/bios/ramcfg.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h index c4ecf255ff39..6e2b70bd2f41 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h @@ -7,4 +7,5 @@ int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); +int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index e68ba636741b..58f10890c3b6 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h @@ -29,4 +29,5 @@ int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); +int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h index d6a4bdb6573b..59f3ba551681 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h @@ -55,10 +55,11 @@ struct nvkm_secboot { #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon); -int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon); +int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long); int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); +int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 548f36d33924..e427f80344c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = { .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; struct nvkm_vma * diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6104f61b00fc..21b10f9840c9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -111,7 +111,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, }; struct nouveau_display *disp = nouveau_display(crtc->dev); struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; - int ret, retry = 1; + int ret, retry = 20; do { ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 4c4cc2260257..1ada186fab77 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -53,13 +53,21 @@ static int nouveau_platform_remove(struct platform_device *pdev) #if IS_ENABLED(CONFIG_OF) static const struct nvkm_device_tegra_func gk20a_platform_data = { .iommu_bit = 34, + .require_vdd = true, }; static const struct nvkm_device_tegra_func gm20b_platform_data = { .iommu_bit = 34, + .require_vdd = true, .require_ref_clk = true, }; +static const struct nvkm_device_tegra_func gp10b_platform_data = { + .iommu_bit = 36, + /* power provided by generic PM domains */ + .require_vdd = false, +}; + static const struct of_device_id nouveau_platform_match[] = { { .compatible = "nvidia,gk20a", @@ -69,6 +77,10 @@ static const struct of_device_id nouveau_platform_match[] = { .compatible = "nvidia,gm20b", .data = &gm20b_platform_data, }, + { + .compatible = "nvidia,gp10b", + .data = &gp10b_platform_data, + }, { } }; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index bf504788bce6..0e58537352fe 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1002,7 +1002,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane, { struct nv50_wndw_atom *asyw = nv50_wndw_atom(state); __drm_atomic_helper_plane_destroy_state(&asyw->state); - dma_fence_put(asyw->state.fence); kfree(asyw); } @@ -1014,7 +1013,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane) if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL))) return NULL; __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state); - asyw->state.fence = NULL; asyw->interval = 1; asyw->sema = armw->sema; asyw->ntfy = armw->ntfy; @@ -2043,6 +2041,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace; u32 hfrontp = mode->hsync_start - mode->hdisplay; u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace; + u32 blankus; struct nv50_head_mode *m = &asyh->mode; m->h.active = mode->htotal; @@ -2056,9 +2055,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) m->v.blanks = m->v.active - vfrontp - 1; /*XXX: Safe underestimate, even "0" works */ - m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; - m->v.blankus *= 1000; - m->v.blankus /= mode->clock; + blankus = (m->v.active - mode->vdisplay - 2) * m->h.active; + blankus *= 1000; + blankus /= mode->clock; + m->v.blankus = blankus; if (mode->flags & DRM_MODE_FLAG_INTERLACE) { m->v.blank2e = m->v.active + m->v.synce + vbackp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 1076949b802a..b690bc12a5b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -714,7 +714,7 @@ nv4a_chipset = { .i2c = nv04_i2c_new, .imem = nv40_instmem_new, .mc = nv44_mc_new, - .mmu = nv44_mmu_new, + .mmu = nv04_mmu_new, .pci = nv40_pci_new, .therm = nv40_therm_new, .timer = nv41_timer_new, @@ -2201,8 +2201,6 @@ nv132_chipset = { .mc = gp100_mc_new, .mmu = gf100_mmu_new, .secboot = gp102_secboot_new, - .sec2 = gp102_sec2_new, - .nvdec = gp102_nvdec_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2215,6 +2213,8 @@ nv132_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp102_gr_new, + .nvdec = gp102_nvdec_new, + .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2235,8 +2235,6 @@ nv134_chipset = { .mc = gp100_mc_new, .mmu = gf100_mmu_new, .secboot = gp102_secboot_new, - .sec2 = gp102_sec2_new, - .nvdec = gp102_nvdec_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2249,6 +2247,8 @@ nv134_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp102_gr_new, + .nvdec = gp102_nvdec_new, + .sec2 = gp102_sec2_new, .sw = gf100_sw_new, }; @@ -2269,8 +2269,6 @@ nv136_chipset = { .mc = gp100_mc_new, .mmu = gf100_mmu_new, .secboot = gp102_secboot_new, - .sec2 = gp102_sec2_new, - .nvdec = gp102_nvdec_new, .pci = gp100_pci_new, .pmu = gp102_pmu_new, .timer = gk20a_timer_new, @@ -2283,6 +2281,65 @@ nv136_chipset = { .dma = gf119_dma_new, .fifo = gp100_fifo_new, .gr = gp102_gr_new, + .nvdec = gp102_nvdec_new, + .sec2 = gp102_sec2_new, + .sw = gf100_sw_new, +}; + +static const struct nvkm_device_chip +nv137_chipset = { + .name = "GP107", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp102_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .secboot = gp102_secboot_new, + .pci = gp100_pci_new, + .pmu = gp102_pmu_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp102_ce_new, + .ce[1] = gp102_ce_new, + .ce[2] = gp102_ce_new, + .ce[3] = gp102_ce_new, + .disp = gp102_disp_new, + .dma = gf119_dma_new, + .fifo = gp100_fifo_new, + .gr = gp107_gr_new, + .nvdec = gp102_nvdec_new, + .sec2 = gp102_sec2_new, + .sw = gf100_sw_new, +}; + +static const struct nvkm_device_chip +nv13b_chipset = { + .name = "GP10B", + .bar = gk20a_bar_new, + .bus = gf100_bus_new, + .fb = gp10b_fb_new, + .fuse = gm107_fuse_new, + .ibus = gp10b_ibus_new, + .imem = gk20a_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp10b_mc_new, + .mmu = gf100_mmu_new, + .secboot = gp10b_secboot_new, + .pmu = gm20b_pmu_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[2] = gp102_ce_new, + .dma = gf119_dma_new, + .fifo = gp10b_fifo_new, + .gr = gp10b_gr_new, .sw = gf100_sw_new, }; @@ -2724,6 +2781,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x132: device->chip = &nv132_chipset; break; case 0x134: device->chip = &nv134_chipset; break; case 0x136: device->chip = &nv136_chipset; break; + case 0x137: device->chip = &nv137_chipset; break; + case 0x13b: device->chip = &nv13b_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); goto done; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index f2bc0b7d9b93..6474bd2a6d07 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -28,9 +28,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) { int ret; - ret = regulator_enable(tdev->vdd); - if (ret) - goto err_power; + if (tdev->vdd) { + ret = regulator_enable(tdev->vdd); + if (ret) + goto err_power; + } ret = clk_prepare_enable(tdev->clk); if (ret) @@ -67,7 +69,8 @@ err_clk_pwr: err_clk_ref: clk_disable_unprepare(tdev->clk); err_clk: - regulator_disable(tdev->vdd); + if (tdev->vdd) + regulator_disable(tdev->vdd); err_power: return ret; } @@ -75,6 +78,8 @@ err_power: static int nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) { + int ret; + reset_control_assert(tdev->rst); udelay(10); @@ -84,7 +89,13 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) clk_disable_unprepare(tdev->clk); udelay(10); - return regulator_disable(tdev->vdd); + if (tdev->vdd) { + ret = regulator_disable(tdev->vdd); + if (ret) + return ret; + } + + return 0; } static void @@ -264,10 +275,12 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, tdev->func = func; tdev->pdev = pdev; - tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); - if (IS_ERR(tdev->vdd)) { - ret = PTR_ERR(tdev->vdd); - goto free; + if (func->require_vdd) { + tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(tdev->vdd)) { + ret = PTR_ERR(tdev->vdd); + goto free; + } } tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 98651a43bc12..64e51838edf8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -14,6 +14,7 @@ nvkm-y += nvkm/engine/fifo/gm107.o nvkm-y += nvkm/engine/fifo/gm200.o nvkm-y += nvkm/engine/fifo/gm20b.o nvkm-y += nvkm/engine/fifo/gp100.o +nvkm-y += nvkm/engine/fifo/gp10b.o nvkm-y += nvkm/engine/fifo/chan.o nvkm-y += nvkm/engine/fifo/channv50.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c index 0ec179fc40a1..5f722c6e8a2f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c @@ -44,6 +44,8 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx) *ctx = 0x38; return true; case NVKM_ENGINE_MPEG: + if (engine->subdev.device->chipset < 0x44) + return false; *reg = 0x00330c; *ctx = 0x54; return true; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h index 679f3ec311e9..44bff98d6725 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h @@ -83,4 +83,5 @@ extern const struct nvkm_enum gk104_fifo_fault_hubclient[]; extern const struct nvkm_enum gk104_fifo_fault_gpcclient[]; extern const struct nvkm_enum gm107_fifo_fault_engine[]; +extern const struct nvkm_enum gp100_fifo_fault_engine[]; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c index b2635aea9f6e..41f16cf5a918 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c @@ -24,7 +24,7 @@ #include "gk104.h" #include "changk104.h" -static const struct nvkm_enum +const struct nvkm_enum gp100_fifo_fault_engine[] = { { 0x01, "DISPLAY" }, { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c new file mode 100644 index 000000000000..4af96c3e69ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "gk104.h" +#include "changk104.h" + +static const struct gk104_fifo_func +gp10b_fifo = { + .fault.engine = gp100_fifo_fault_engine, + .fault.reason = gk104_fifo_fault_reason, + .fault.hubclient = gk104_fifo_fault_hubclient, + .fault.gpcclient = gk104_fifo_fault_gpcclient, + .chan = { + &gp100_fifo_gpfifo_oclass, + NULL + }, +}; + +int +gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +{ + return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 2938ad5aca40..8a22558b7b52 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -33,6 +33,8 @@ nvkm-y += nvkm/engine/gr/gm200.o nvkm-y += nvkm/engine/gr/gm20b.o nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/gp102.o +nvkm-y += nvkm/engine/gr/gp107.o +nvkm-y += nvkm/engine/gr/gp10b.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o @@ -52,3 +54,4 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o nvkm-y += nvkm/engine/gr/ctxgm20b.o nvkm-y += nvkm/engine/gr/ctxgp100.o nvkm-y += nvkm/engine/gr/ctxgp102.o +nvkm-y += nvkm/engine/gr/ctxgp107.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 0ae032fa2909..017180d147cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -106,6 +106,9 @@ void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *); void gp100_grctx_generate_pagepool(struct gf100_grctx *); extern const struct gf100_grctx_func gp102_grctx; +void gp102_grctx_generate_attrib(struct gf100_grctx *); + +extern const struct gf100_grctx_func gp107_grctx; /* context init value lists */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c index ee26d64af73a..80b7ab0bee3a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c @@ -29,7 +29,7 @@ * PGRAPH context implementation ******************************************************************************/ -static void +void gp102_grctx_generate_attrib(struct gf100_grctx *info) { struct gf100_gr *gr = info->gr; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c new file mode 100644 index 000000000000..8da91a0b3bd2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c @@ -0,0 +1,47 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "ctxgf100.h" + +#include <subdev/fb.h> + +/******************************************************************************* + * PGRAPH context implementation + ******************************************************************************/ + +const struct gf100_grctx_func +gp107_grctx = { + .main = gp100_grctx_generate_main, + .unkn = gk104_grctx_generate_unkn, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x300, + .pagepool = gp100_grctx_generate_pagepool, + .pagepool_size = 0x20000, + .attrib = gp102_grctx_generate_attrib, + .attrib_nr_max = 0x15de, + .attrib_nr = 0x540, + .alpha_nr_max = 0xc00, + .alpha_nr = 0x800, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index a4410ef19db5..99689f4de502 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1463,25 +1463,27 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr) struct nvkm_subdev *subdev = &gr->base.engine.subdev; struct nvkm_device *device = subdev->device; struct nvkm_secboot *sb = device->secboot; - int ret = 0; + u32 secboot_mask = 0; /* load fuc microcode */ nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); + secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS); else gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d); - if (ret) - return ret; if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); + secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS); else gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad); - if (ret) - return ret; + + if (secboot_mask != 0) { + int ret = nvkm_secboot_reset(sb, secboot_mask); + if (ret) + return ret; + } nvkm_mc_unk260(device, 1); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 1d2101af2a87..a36e45a4a635 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -125,6 +125,7 @@ struct gf100_gr_func { void (*init_rop_active_fbps)(struct gf100_gr *); void (*init_ppc_exceptions)(struct gf100_gr *); void (*init_swdx_pes_mask)(struct gf100_gr *); + void (*init_num_active_ltcs)(struct gf100_gr *); void (*set_hww_esr_report_mask)(struct gf100_gr *); const struct gf100_gr_pack *mmio; struct { @@ -301,4 +302,8 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; void gm107_gr_init_bios(struct gf100_gr *); void gm200_gr_init_gpc_mmu(struct gf100_gr *); + +void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr); + +void gp102_gr_init_swdx_pes_mask(struct gf100_gr *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c index 94ed7debb714..867a5f7cc5bc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -40,6 +40,15 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ } +void +gp100_gr_init_num_active_ltcs(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); + nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); +} + int gp100_gr_init(struct gf100_gr *gr) { @@ -81,8 +90,7 @@ gp100_gr_init(struct gf100_gr *gr) } nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); - nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); - nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); + gr->func->init_num_active_ltcs(gr); gr->func->init_rop_active_fbps(gr); if (gr->func->init_swdx_pes_mask) @@ -154,6 +162,7 @@ gp100_gr = { .init_gpc_mmu = gm200_gr_init_gpc_mmu, .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, .rops = gm200_gr_rops, .ppc_nr = 2, .grctx = &gp100_grctx, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c index 1d5117a16299..61e3a0b08559 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c @@ -26,7 +26,7 @@ #include <nvif/class.h> -static void +void gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; @@ -47,6 +47,7 @@ gp102_gr = { .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, + .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, .rops = gm200_gr_rops, .ppc_nr = 3, .grctx = &gp102_grctx, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c new file mode 100644 index 000000000000..f7272323f694 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ctxgf100.h" + +#include <nvif/class.h> + +static const struct gf100_gr_func +gp107_gr = { + .init = gp100_gr_init, + .init_gpc_mmu = gm200_gr_init_gpc_mmu, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask, + .init_num_active_ltcs = gp100_gr_init_num_active_ltcs, + .rops = gm200_gr_rops, + .ppc_nr = 1, + .grctx = &gp107_grctx, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, PASCAL_B, &gf100_fermi }, + { -1, -1, PASCAL_COMPUTE_B }, + {} + } +}; + +int +gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gm200_gr_new_(&gp107_gr, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c new file mode 100644 index 000000000000..5f3d161a0842 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gf100.h" +#include "ctxgf100.h" + +#include <nvif/class.h> + +static void +gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); +} + +static const struct gf100_gr_func +gp10b_gr = { + .init = gp100_gr_init, + .init_gpc_mmu = gm200_gr_init_gpc_mmu, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .init_num_active_ltcs = gp10b_gr_init_num_active_ltcs, + .rops = gm200_gr_rops, + .ppc_nr = 1, + .grctx = &gp102_grctx, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, PASCAL_A, &gf100_fermi }, + { -1, -1, PASCAL_COMPUTE_A }, + {} + } +}; + +int +gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gm200_gr_new_(&gp10b_gr, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c index 003ac915eaad..8a8895246d26 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c @@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine) } if (type == 0x00000010) { - if (!nv31_mpeg_mthd(mpeg, mthd, data)) + if (nv31_mpeg_mthd(mpeg, mthd, data)) show &= ~0x01000000; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index e536f37e24b0..c3cf02ed468e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c @@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine) } if (type == 0x00000010) { - if (!nv44_mpeg_mthd(subdev->device, mthd, data)) + if (nv44_mpeg_mthd(subdev->device, mthd, data)) show &= ~0x01000000; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c index 982efedb4b13..d45d7947a964 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c @@ -463,26 +463,49 @@ nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf) } int -nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon) +nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue, + unsigned long falcon_mask) { - if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon) + unsigned long falcon; + + if (!queue || !queue->func->acr_func) return -ENODEV; - return queue->func->acr_func->boot_falcon(queue, falcon); + /* Does the firmware support booting multiple falcons? */ + if (queue->func->acr_func->boot_multiple_falcons) + return queue->func->acr_func->boot_multiple_falcons(queue, + falcon_mask); + + /* Else boot all requested falcons individually */ + if (!queue->func->acr_func->boot_falcon) + return -ENODEV; + + for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { + int ret = queue->func->acr_func->boot_falcon(queue, falcon); + + if (ret) + return ret; + } + + return 0; } int -nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue) +nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, + const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue) { const struct nvkm_subdev *subdev = falcon->owner; int ret = -EINVAL; switch (version) { case 0x0137c63d: - ret = msgqueue_0137c63d_new(falcon, queue); + ret = msgqueue_0137c63d_new(falcon, sb, queue); + break; + case 0x0137bca5: + ret = msgqueue_0137bca5_new(falcon, sb, queue); break; case 0x0148cdec: - ret = msgqueue_0148cdec_new(falcon, queue); + ret = msgqueue_0148cdec_new(falcon, sb, queue); break; default: nvkm_error(subdev, "unhandled firmware version 0x%08x\n", diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h index f37afe963d3e..13b54f8d8e04 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h @@ -101,9 +101,11 @@ struct nvkm_msgqueue_init_func { * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR * * @boot_falcon: build and send the command to reset a given falcon + * @boot_multiple_falcons: build and send the command to reset several falcons */ struct nvkm_msgqueue_acr_func { int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon); + int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long); }; struct nvkm_msgqueue_func { @@ -201,7 +203,11 @@ int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority, void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *, struct nvkm_msgqueue_queue *); -int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **); -int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **); +int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *, + struct nvkm_msgqueue **); +int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *, + struct nvkm_msgqueue **); +int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *, + struct nvkm_msgqueue **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c index bba91207fb18..fec0273158f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c @@ -43,6 +43,15 @@ struct msgqueue_0137c63d { #define msgqueue_0137c63d(q) \ container_of(q, struct msgqueue_0137c63d, base) +struct msgqueue_0137bca5 { + struct msgqueue_0137c63d base; + + u64 wpr_addr; +}; +#define msgqueue_0137bca5(q) \ + container_of(container_of(q, struct msgqueue_0137c63d, base), \ + struct msgqueue_0137bca5, base); + static struct nvkm_msgqueue_queue * msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue, enum msgqueue_msg_priority priority) @@ -180,6 +189,7 @@ msgqueue_0137c63d_init_func = { enum { ACR_CMD_INIT_WPR_REGION = 0x00, ACR_CMD_BOOTSTRAP_FALCON = 0x01, + ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03, }; static void @@ -286,11 +296,81 @@ acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon) return 0; } +static void +acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv, + struct nvkm_msgqueue_hdr *hdr) +{ + struct acr_bootstrap_falcon_msg { + struct nvkm_msgqueue_msg base; + + u32 falcon_mask; + } *msg = (void *)hdr; + const struct nvkm_subdev *subdev = priv->falcon->owner; + unsigned long falcon_mask = msg->falcon_mask; + u32 falcon_id, falcon_treated = 0; + + for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) { + nvkm_debug(subdev, "%s booted\n", + nvkm_secboot_falcon_name[falcon_id]); + falcon_treated |= BIT(falcon_id); + } + + if (falcon_treated != msg->falcon_mask) { + nvkm_error(subdev, "in bootstrap falcon callback:\n"); + nvkm_error(subdev, "invalid falcon mask 0x%x\n", + msg->falcon_mask); + return; + } +} + +static int +acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask) +{ + DECLARE_COMPLETION_ONSTACK(completed); + /* + * flags - Flag specifying RESET or no RESET. + * falcon id - Falcon id specifying falcon to bootstrap. + */ + struct { + struct nvkm_msgqueue_hdr hdr; + u8 cmd_type; + u32 flags; + u32 falcon_mask; + u32 use_va_mask; + u32 wpr_lo; + u32 wpr_hi; + } cmd; + struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv); + + memset(&cmd, 0, sizeof(cmd)); + + cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR; + cmd.hdr.size = sizeof(cmd); + cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS; + cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; + cmd.falcon_mask = falcon_mask; + cmd.wpr_lo = lower_32_bits(queue->wpr_addr); + cmd.wpr_hi = upper_32_bits(queue->wpr_addr); + nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr, + acr_boot_multiple_falcons_callback, &completed, true); + + if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000))) + return -ETIMEDOUT; + + return 0; +} + static const struct nvkm_msgqueue_acr_func msgqueue_0137c63d_acr_func = { .boot_falcon = acr_boot_falcon, }; +static const struct nvkm_msgqueue_acr_func +msgqueue_0137bca5_acr_func = { + .boot_falcon = acr_boot_falcon, + .boot_multiple_falcons = acr_boot_multiple_falcons, +}; + static void msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue) { @@ -307,7 +387,8 @@ msgqueue_0137c63d_func = { }; int -msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue) +msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, + struct nvkm_msgqueue **queue) { struct msgqueue_0137c63d *ret; @@ -321,3 +402,35 @@ msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue) return 0; } + +static const struct nvkm_msgqueue_func +msgqueue_0137bca5_func = { + .init_func = &msgqueue_0137c63d_init_func, + .acr_func = &msgqueue_0137bca5_acr_func, + .cmd_queue = msgqueue_0137c63d_cmd_queue, + .recv = msgqueue_0137c63d_process_msgs, + .dtor = msgqueue_0137c63d_dtor, +}; + +int +msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, + struct nvkm_msgqueue **queue) +{ + struct msgqueue_0137bca5 *ret; + + ret = kzalloc(sizeof(*ret), GFP_KERNEL); + if (!ret) + return -ENOMEM; + + *queue = &ret->base.base; + + /* + * FIXME this must be set to the address of a *GPU* mapping within the + * ACR address space! + */ + /* ret->wpr_addr = sb->wpr_addr; */ + + nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base); + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c index ed5d0da4f4e9..9424803b9ef4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c @@ -247,7 +247,8 @@ msgqueue_0148cdec_func = { }; int -msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue) +msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb, + struct nvkm_msgqueue **queue) { struct msgqueue_0148cdec *ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c index eaf74eb72983..8ab896dd4e92 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c @@ -33,7 +33,7 @@ nvbios_boostTe(struct nvkm_bios *bios, u32 boost = 0; if (!bit_entry(bios, 'P', &bit_P)) { - if (bit_P.version == 2) + if (bit_P.version == 2 && bit_P.length >= 0x34) boost = nvbios_rd32(bios, bit_P.offset + 0x30); if (boost) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c index 5063382d8a6c..7c8c36054f71 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c @@ -33,7 +33,7 @@ nvbios_cstepTe(struct nvkm_bios *bios, u32 cstep = 0; if (!bit_entry(bios, 'P', &bit_P)) { - if (bit_P.version == 2) + if (bit_P.version == 2 && bit_P.length >= 0x38) cstep = nvbios_rd32(bios, bit_P.offset + 0x34); if (cstep) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c index 456f9ea920dc..0dfb15a27e4e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c @@ -32,7 +32,7 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) u32 fan = 0; if (!bit_entry(bios, 'P', &bit_P)) { - if (bit_P.version == 2 && bit_P.length >= 0x5a) + if (bit_P.version == 2 && bit_P.length >= 0x5c) fan = nvbios_rd32(bios, bit_P.offset + 0x58); if (fan) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c index 617bfffce4ad..03d2f970a29f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c @@ -33,7 +33,7 @@ nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u32 power_budget; if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 || - bit_P.length < 0x2c) + bit_P.length < 0x30) return 0; power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c index f199270163d2..20b6fc8243e0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c @@ -31,7 +31,7 @@ nvbios_vpstate_offset(struct nvkm_bios *b) struct bit_entry bit_P; if (!bit_entry(b, 'P', &bit_P)) { - if (bit_P.version == 2) + if (bit_P.version == 2 && bit_P.length >= 0x3c) return nvbios_rd32(b, bit_P.offset + 0x38); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index 1c5e5ba487a8..2571530e82f1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -28,6 +28,7 @@ nvkm-y += nvkm/subdev/fb/gm200.o nvkm-y += nvkm/subdev/fb/gm20b.o nvkm-y += nvkm/subdev/fb/gp100.o nvkm-y += nvkm/subdev/fb/gp102.o +nvkm-y += nvkm/subdev/fb/gp10b.o nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c new file mode 100644 index 000000000000..f2b1fbf428d5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include "gf100.h" + +static const struct nvkm_fb_func +gp10b_fb = { + .dtor = gf100_fb_dtor, + .oneinit = gf100_fb_oneinit, + .init = gm200_fb_init, + .init_page = gm200_fb_init_page, + .intr = gf100_fb_intr, + .memtype_valid = gf100_fb_memtype_valid, +}; + +int +gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +{ + return gf100_fb_new_(&gp10b_fb, device, index, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index 53c32fc694e9..c63975907c90 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c @@ -589,7 +589,7 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n", fbp, size, ltcs); lcomm = min(lcomm, (u64)(size / ltcs) << 20); - total += size << 20; + total += (u64) size << 20; ltcn += ltcs; } else { nvkm_debug(subdev, "FBP %d: disabled\n", fbp); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c index 77c649723ad7..4a57defc99b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c @@ -164,7 +164,7 @@ static int nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_gpio *gpio = nvkm_gpio(subdev); - u32 mask = (1 << gpio->func->lines) - 1; + u32 mask = (1ULL << gpio->func->lines) - 1; gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0); gpio->func->intr_stat(gpio, &mask, &mask); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild index ad572d3b5466..7f5883b78efa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild @@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ibus/gf117.o nvkm-y += nvkm/subdev/ibus/gk104.o nvkm-y += nvkm/subdev/ibus/gk20a.o nvkm-y += nvkm/subdev/ibus/gm200.o +nvkm-y += nvkm/subdev/ibus/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c new file mode 100644 index 000000000000..39db90aa2c80 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include <subdev/ibus.h> + +#include "priv.h" + +static int +gp10b_ibus_init(struct nvkm_subdev *ibus) +{ + struct nvkm_device *device = ibus->device; + + nvkm_wr32(device, 0x1200a8, 0x0); + + /* init ring */ + nvkm_wr32(device, 0x12004c, 0x4); + nvkm_wr32(device, 0x122204, 0x2); + nvkm_rd32(device, 0x122204); + + /* timeout configuration */ + nvkm_wr32(device, 0x009080, 0x800186a0); + + return 0; +} + +static const struct nvkm_subdev_func +gp10b_ibus = { + .init = gp10b_ibus_init, + .intr = gk104_ibus_intr, +}; + +int +gp10b_ibus_new(struct nvkm_device *device, int index, + struct nvkm_subdev **pibus) +{ + struct nvkm_subdev *ibus; + if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL))) + return -ENOMEM; + nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 9dec58ec3d9f..cd5adbec5e57 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -94,7 +94,7 @@ struct gk20a_instmem { struct nvkm_instmem base; /* protects vaddr_* and gk20a_instobj::vaddr* */ - spinlock_t lock; + struct mutex lock; /* CPU mappings LRU */ unsigned int vaddr_use; @@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) struct gk20a_instmem *imem = node->base.imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; const u64 size = nvkm_memory_size(memory); - unsigned long flags; nvkm_ltc_flush(ltc); - spin_lock_irqsave(&imem->lock, flags); + mutex_lock(&imem->lock); if (node->base.vaddr) { if (!node->use_cpt) { @@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) out: node->use_cpt++; - spin_unlock_irqrestore(&imem->lock, flags); + mutex_unlock(&imem->lock); return node->base.vaddr; } @@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); struct gk20a_instmem *imem = node->base.imem; struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; - unsigned long flags; - spin_lock_irqsave(&imem->lock, flags); + mutex_lock(&imem->lock); /* we should at least have one user to release... */ if (WARN_ON(node->use_cpt == 0)) @@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory) list_add_tail(&node->vaddr_node, &imem->vaddr_lru); out: - spin_unlock_irqrestore(&imem->lock, flags); + mutex_unlock(&imem->lock); wmb(); nvkm_ltc_invalidate(ltc); @@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) struct gk20a_instmem *imem = node->base.imem; struct device *dev = imem->base.subdev.device->dev; struct nvkm_mm_node *r = node->base.mem.mem; - unsigned long flags; int i; if (unlikely(!r)) goto out; - spin_lock_irqsave(&imem->lock, flags); + mutex_lock(&imem->lock); /* vaddr has already been recycled */ if (node->base.vaddr) gk20a_instobj_iommu_recycle_vaddr(node); - spin_unlock_irqrestore(&imem->lock, flags); + mutex_unlock(&imem->lock); /* clear IOMMU bit to unmap pages */ r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); @@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index, if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) return -ENOMEM; nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); - spin_lock_init(&imem->lock); + mutex_init(&imem->lock); *pimem = &imem->base; /* do not allow more than 1MB of CPU-mapped instmem */ diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild index 12943f92c206..2befbe36dc28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild @@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/mc/gf100.o nvkm-y += nvkm/subdev/mc/gk104.o nvkm-y += nvkm/subdev/mc/gk20a.o nvkm-y += nvkm/subdev/mc/gp100.o +nvkm-y += nvkm/subdev/mc/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c index 4d22f4abd6de..7321ad3758c3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -42,7 +42,7 @@ gp100_mc_intr_update(struct gp100_mc *mc) } } -static void +void gp100_mc_intr_unarm(struct nvkm_mc *base) { struct gp100_mc *mc = gp100_mc(base); @@ -53,7 +53,7 @@ gp100_mc_intr_unarm(struct nvkm_mc *base) spin_unlock_irqrestore(&mc->lock, flags); } -static void +void gp100_mc_intr_rearm(struct nvkm_mc *base) { struct gp100_mc *mc = gp100_mc(base); @@ -64,7 +64,7 @@ gp100_mc_intr_rearm(struct nvkm_mc *base) spin_unlock_irqrestore(&mc->lock, flags); } -static void +void gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr) { struct gp100_mc *mc = gp100_mc(base); @@ -87,13 +87,14 @@ gp100_mc = { }; int -gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, + int index, struct nvkm_mc **pmc) { struct gp100_mc *mc; if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - nvkm_mc_ctor(&gp100_mc, device, index, &mc->base); + nvkm_mc_ctor(func, device, index, &mc->base); *pmc = &mc->base; spin_lock_init(&mc->lock); @@ -101,3 +102,9 @@ gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) mc->mask = 0x7fffffff; return 0; } + +int +gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +{ + return gp100_mc_new_(&gp100_mc, device, index, pmc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c new file mode 100644 index 000000000000..2283e3b74277 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "priv.h" + +void +gp10b_mc_init(struct nvkm_mc *mc) +{ + struct nvkm_device *device = mc->subdev.device; + nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */ + nvkm_wr32(device, 0x00020c, 0xffffffff); /* everything out of ELPG */ +} + +static const struct nvkm_mc_func +gp10b_mc = { + .init = gp10b_mc_init, + .intr = gk104_mc_intr, + .intr_unarm = gp100_mc_intr_unarm, + .intr_rearm = gp100_mc_intr_rearm, + .intr_mask = gp100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, + .reset = gk104_mc_reset, +}; + +int +gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +{ + return gp100_mc_new_(&gp10b_mc, device, index, pmc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h index 4f0576a06d24..3be4126441e4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h @@ -41,12 +41,18 @@ extern const struct nvkm_mc_map nv17_mc_reset[]; void nv44_mc_init(struct nvkm_mc *); void nv50_mc_init(struct nvkm_mc *); +void gk104_mc_init(struct nvkm_mc *); void gf100_mc_intr_unarm(struct nvkm_mc *); void gf100_mc_intr_rearm(struct nvkm_mc *); void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32); u32 gf100_mc_intr_stat(struct nvkm_mc *); void gf100_mc_unk260(struct nvkm_mc *, u32); +void gp100_mc_intr_unarm(struct nvkm_mc *); +void gp100_mc_intr_rearm(struct nvkm_mc *); +void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32); +int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int, + struct nvkm_mc **); extern const struct nvkm_mc_map gk104_mc_intr[]; extern const struct nvkm_mc_map gk104_mc_reset[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild index ac7f50ae53c6..e698f4836521 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild @@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/secboot/acr_r375.o nvkm-y += nvkm/subdev/secboot/gm200.o nvkm-y += nvkm/subdev/secboot/gm20b.o nvkm-y += nvkm/subdev/secboot/gp102.o +nvkm-y += nvkm/subdev/secboot/gp10b.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h index 93d804652d44..b615fc81aca4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h @@ -39,8 +39,7 @@ struct nvkm_acr_func { int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool); int (*load)(struct nvkm_acr *, struct nvkm_falcon *, struct nvkm_gpuobj *, u64); - int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, - enum nvkm_secboot_falcon); + int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long); }; /** diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c index 993a38eb3ed5..a721354249ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c @@ -26,8 +26,6 @@ #include <core/gpuobj.h> #include <core/firmware.h> #include <engine/falcon.h> -#include <subdev/mc.h> -#include <subdev/timer.h> #include <subdev/pmu.h> #include <core/msgqueue.h> #include <engine/sec2.h> @@ -241,6 +239,7 @@ struct ls_ucode_img_r352 { */ struct ls_ucode_img * acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, + const struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon_id) { const struct nvkm_subdev *subdev = acr->base.subdev; @@ -253,7 +252,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr, img->base.falcon_id = falcon_id; - ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base); + ret = acr->func->ls_func[falcon_id]->load(sb, &img->base); if (ret) { kfree(img->base.ucode_data); @@ -462,12 +461,14 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs, * will be copied into the WPR region by the HS firmware. */ static int -acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) +acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb) { const struct nvkm_subdev *subdev = acr->base.subdev; struct list_head imgs; struct ls_ucode_img *img, *t; unsigned long managed_falcons = acr->base.managed_falcons; + u64 wpr_addr = sb->wpr_addr; + u32 wpr_size = sb->wpr_size; int managed_count = 0; u32 image_wpr_size, ls_blob_size; int falcon_id; @@ -479,7 +480,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size) for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) { struct ls_ucode_img *img; - img = acr->func->ls_ucode_img_load(acr, falcon_id); + img = acr->func->ls_ucode_img_load(acr, sb, falcon_id); if (IS_ERR(img)) { if (acr->base.optional_falcons & BIT(falcon_id)) { managed_falcons &= ~BIT(falcon_id); @@ -704,7 +705,7 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb) return 0; /* Load and prepare the managed falcon's firmwares */ - ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size); + ret = acr_r352_prepare_ls_blob(acr, sb); if (ret) return ret; @@ -882,7 +883,6 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) { const struct nvkm_subdev *subdev = &sb->subdev; unsigned long managed_falcons = acr->base.managed_falcons; - u32 reg; int falcon_id; int ret; @@ -917,54 +917,13 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id]; - if (func->post_run) - func->post_run(&acr->base, sb); - } - - /* Re-start ourselves if we are managed */ - if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon)) - return 0; - - /* Enable interrupts */ - nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff); - nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true); - - /* Start LS firmware on boot falcon */ - nvkm_falcon_start(sb->boot_falcon); - - /* - * There is a bug where the LS firmware sometimes require to be started - * twice (this happens only on SEC). Detect and workaround that - * condition. - * - * Once started, the falcon will end up in STOPPED condition (bit 5) - * if successful, or in HALT condition (bit 4) if not. - */ - nvkm_msec(subdev->device, 1, - if ((reg = nvkm_rd32(subdev->device, - sb->boot_falcon->addr + 0x100) - & 0x30) != 0) - break; - ); - if (reg & BIT(4)) { - nvkm_debug(subdev, "applying workaround for start bug..."); - nvkm_falcon_start(sb->boot_falcon); - nvkm_msec(subdev->device, 1, - if ((reg = nvkm_rd32(subdev->device, - sb->boot_falcon->addr + 0x100) - & 0x30) != 0) - break; - ); - if (reg & BIT(4)) { - nvkm_error(subdev, "%s failed to start\n", - nvkm_secboot_falcon_name[acr->base.boot_falcon]); - return -EINVAL; + if (func->post_run) { + ret = func->post_run(&acr->base, sb); + if (ret) + return ret; } } - nvkm_debug(subdev, "%s started\n", - nvkm_secboot_falcon_name[acr->base.boot_falcon]); - return 0; } @@ -976,15 +935,16 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb) */ static int acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon) + unsigned long falcon_mask) { + int falcon; int ret; /* * Perform secure boot each time we are called on FECS. Since only FECS * and GPCCS are managed and started together, this ought to be safe. */ - if (falcon != NVKM_SECBOOT_FALCON_FECS) + if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS))) goto end; ret = acr_r352_shutdown(acr, sb); @@ -996,7 +956,9 @@ acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb, return ret; end: - acr->falcon_state[falcon] = RESET; + for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) { + acr->falcon_state[falcon] = RESET; + } return 0; } @@ -1009,11 +971,11 @@ end: */ static int acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, - enum nvkm_secboot_falcon falcon) + unsigned long falcon_mask) { struct acr_r352 *acr = acr_r352(_acr); struct nvkm_msgqueue *queue; - const char *fname = nvkm_secboot_falcon_name[falcon]; + int falcon; bool wpr_already_set = sb->wpr_set; int ret; @@ -1026,7 +988,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) { /* Redo secure boot entirely if it was already done */ if (wpr_already_set) - return acr_r352_reset_nopmu(acr, sb, falcon); + return acr_r352_reset_nopmu(acr, sb, falcon_mask); /* Else return the result of the initial invokation */ else return ret; @@ -1044,13 +1006,15 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb, } /* Otherwise just ask the LS firmware to reset the falcon */ - nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname); - ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon); + for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) + nvkm_debug(&sb->subdev, "resetting %s falcon\n", + nvkm_secboot_falcon_name[falcon]); + ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask); if (ret) { - nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname); + nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret); return ret; } - nvkm_debug(&sb->subdev, "falcon %s reset\n", fname); + nvkm_debug(&sb->subdev, "falcon reset done\n"); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h index 6e88520566c9..3d58ab871563 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h @@ -57,11 +57,11 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app) * @lhdr_flags: LS flags */ struct acr_r352_ls_func { - int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *); + int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *); void (*generate_bl_desc)(const struct nvkm_acr *, const struct ls_ucode_img *, u64, void *); u32 bl_desc_size; - void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); + int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *); u32 lhdr_flags; }; @@ -82,6 +82,7 @@ struct acr_r352_func { bool shadow_blob; struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *, + const struct nvkm_secboot *, enum nvkm_secboot_falcon); int (*ls_fill_headers)(struct acr_r352 *, struct list_head *); int (*ls_write_wpr)(struct acr_r352 *, struct list_head *, @@ -145,6 +146,7 @@ struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *, enum nvkm_secboot_falcon, unsigned long); struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *, + const struct nvkm_secboot *, enum nvkm_secboot_falcon); int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *); int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c index f860713642f1..866877b88797 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c @@ -107,6 +107,7 @@ struct ls_ucode_img_r367 { struct ls_ucode_img * acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, + const struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon_id) { const struct nvkm_subdev *subdev = acr->base.subdev; @@ -119,7 +120,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr, img->base.falcon_id = falcon_id; - ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base); + ret = acr->func->ls_func[falcon_id]->load(sb, &img->base); if (ret) { kfree(img->base.ucode_data); kfree(img->base.sig); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h index ec6a71ca36be..8bdfb3e5cd1c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h @@ -28,6 +28,7 @@ void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *); struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *, + const struct nvkm_secboot *, enum nvkm_secboot_falcon); int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *); int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c index 5c11e8c50964..ee29c6c11afd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c @@ -102,15 +102,15 @@ nvkm_secboot_falcon_name[] = { * nvkm_secboot_reset() - reset specified falcon */ int -nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) +nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask) { /* Unmanaged falcon? */ - if (!(BIT(falcon) & sb->acr->managed_falcons)) { + if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) { nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n"); return -EINVAL; } - return sb->acr->func->reset(sb->acr, sb, falcon); + return sb->acr->func->reset(sb->acr, sb, falcon_mask); } /** diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h index 6dc9fc384f24..c8ab3d76bdef 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h @@ -41,4 +41,7 @@ void *gm200_secboot_dtor(struct nvkm_secboot *); int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *, struct nvkm_falcon *); +/* Tegra-only */ +int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32); + #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index 29e6f73dfd7e..30491d132d59 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c @@ -23,31 +23,32 @@ #include "acr.h" #include "gm200.h" +#define TEGRA210_MC_BASE 0x70019000 + #ifdef CONFIG_ARCH_TEGRA -#define TEGRA_MC_BASE 0x70019000 #define MC_SECURITY_CARVEOUT2_CFG0 0xc58 #define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c #define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60 #define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64 #define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1) /** - * sb_tegra_read_wpr() - read the WPR registers on Tegra + * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra * * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region * is reserved from system memory by the bootloader and irreversibly locked. * This function reads the address and size of the pre-configured WPR region. */ -static int -gm20b_tegra_read_wpr(struct gm200_secboot *gsb) +int +gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) { struct nvkm_secboot *sb = &gsb->base; void __iomem *mc; u32 cfg; - mc = ioremap(TEGRA_MC_BASE, 0xd00); + mc = ioremap(mc_base, 0xd00); if (!mc) { nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); - return PTR_ERR(mc); + return -ENOMEM; } sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); @@ -70,8 +71,8 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb) return 0; } #else -static int -gm20b_tegra_read_wpr(struct gm200_secboot *gsb) +int +gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) { nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); return -EINVAL; @@ -84,7 +85,7 @@ gm20b_secboot_oneinit(struct nvkm_secboot *sb) struct gm200_secboot *gsb = gm200_secboot(sb); int ret; - ret = gm20b_tegra_read_wpr(gsb); + ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c new file mode 100644 index 000000000000..632e9545e292 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "acr.h" +#include "gm200.h" + +#define TEGRA186_MC_BASE 0x02c10000 + +static int +gp10b_secboot_oneinit(struct nvkm_secboot *sb) +{ + struct gm200_secboot *gsb = gm200_secboot(sb); + int ret; + + ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE); + if (ret) + return ret; + + return gm200_secboot_oneinit(sb); +} + +static const struct nvkm_secboot_func +gp10b_secboot = { + .dtor = gm200_secboot_dtor, + .oneinit = gp10b_secboot_oneinit, + .fini = gm200_secboot_fini, + .run_blob = gm200_secboot_run_blob, +}; + +int +gp10b_secboot_new(struct nvkm_device *device, int index, + struct nvkm_secboot **psb) +{ + int ret; + struct gm200_secboot *gsb; + struct nvkm_acr *acr; + + acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) | + BIT(NVKM_SECBOOT_FALCON_GPCCS) | + BIT(NVKM_SECBOOT_FALCON_PMU)); + if (IS_ERR(acr)) + return PTR_ERR(acr); + + gsb = kzalloc(sizeof(*gsb), GFP_KERNEL); + if (!gsb) { + psb = NULL; + return -ENOMEM; + } + *psb = &gsb->base; + + ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base); + if (ret) + return ret; + + return 0; +} + +MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); +MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h index 4ff9138a2a83..9b7c402594e8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h @@ -147,11 +147,11 @@ struct fw_bl_desc { u32 data_size; }; -int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *); -int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *); -int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *); -void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); -int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *); -void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); +int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *); +int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *); +int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *); +int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); +int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *); +int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c index 40a6df77bb8a..d1cf02d22db1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c @@ -144,15 +144,13 @@ error: } int -acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev, - struct ls_ucode_img *img) +acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img) { - return ls_ucode_img_load_gr(subdev, img, "fecs"); + return ls_ucode_img_load_gr(&sb->subdev, img, "fecs"); } int -acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev, - struct ls_ucode_img *img) +acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img) { - return ls_ucode_img_load_gr(subdev, img, "gpccs"); + return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs"); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c index ef0b298b70d7..ee989210725e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c @@ -28,6 +28,8 @@ #include <core/msgqueue.h> #include <subdev/pmu.h> #include <engine/sec2.h> +#include <subdev/mc.h> +#include <subdev/timer.h> /** * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw @@ -73,10 +75,11 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name, return 0; } -static void +static int acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, struct nvkm_falcon *falcon, u32 addr_args) { + struct nvkm_device *device = falcon->owner->device; u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE; u8 buf[cmdline_size]; @@ -85,65 +88,118 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue, nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0); /* rearm the queue so it will wait for the init message */ nvkm_msgqueue_reinit(queue); + + /* Enable interrupts */ + nvkm_falcon_wr32(falcon, 0x10, 0xff); + nvkm_mc_intr_mask(device, falcon->owner->index, true); + + /* Start LS firmware on boot falcon */ + nvkm_falcon_start(falcon); + + return 0; } int -acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev, - struct ls_ucode_img *img) +acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img) { - struct nvkm_pmu *pmu = subdev->device->pmu; + struct nvkm_pmu *pmu = sb->subdev.device->pmu; int ret; - ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img); + ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img); if (ret) return ret; /* Allocate the PMU queue corresponding to the FW version */ ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon, - &pmu->queue); + sb, &pmu->queue); if (ret) return ret; return 0; } -void +int acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) { struct nvkm_device *device = sb->subdev.device; struct nvkm_pmu *pmu = device->pmu; u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; + int ret; + + ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); + if (ret) + return ret; - acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); + nvkm_debug(&sb->subdev, "%s started\n", + nvkm_secboot_falcon_name[acr->boot_falcon]); + + return 0; } int -acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev, - struct ls_ucode_img *img) +acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img) { - struct nvkm_sec2 *sec = subdev->device->sec2; + struct nvkm_sec2 *sec = sb->subdev.device->sec2; int ret; - ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img); + ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img); if (ret) return ret; /* Allocate the PMU queue corresponding to the FW version */ ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon, - &sec->queue); + sb, &sec->queue); if (ret) return ret; return 0; } -void +int acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) { - struct nvkm_device *device = sb->subdev.device; + const struct nvkm_subdev *subdev = &sb->subdev; + struct nvkm_device *device = subdev->device; struct nvkm_sec2 *sec = device->sec2; /* on SEC arguments are always at the beginning of EMEM */ - u32 addr_args = 0x01000000; + const u32 addr_args = 0x01000000; + u32 reg; + int ret; - acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); + ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args); + if (ret) + return ret; + + /* + * There is a bug where the LS firmware sometimes require to be started + * twice (this happens only on SEC). Detect and workaround that + * condition. + * + * Once started, the falcon will end up in STOPPED condition (bit 5) + * if successful, or in HALT condition (bit 4) if not. + */ + nvkm_msec(device, 1, + if ((reg = nvkm_falcon_rd32(sb->boot_falcon, 0x100) & 0x30) != 0) + break; + ); + if (reg & BIT(4)) { + nvkm_debug(subdev, "applying workaround for start bug..."); + nvkm_falcon_start(sb->boot_falcon); + nvkm_msec(subdev->device, 1, + if ((reg = nvkm_rd32(subdev->device, + sb->boot_falcon->addr + 0x100) + & 0x30) != 0) + break; + ); + if (reg & BIT(4)) { + nvkm_error(subdev, "%s failed to start\n", + nvkm_secboot_falcon_name[acr->boot_falcon]); + return -EINVAL; + } + } + + nvkm_debug(&sb->subdev, "%s started\n", + nvkm_secboot_falcon_name[acr->boot_falcon]); + + return 0; } diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index ee5883f59be5..0dbe0306953d 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -160,10 +160,10 @@ static struct dma_buf_ops omap_dmabuf_ops = { .release = omap_gem_dmabuf_release, .begin_cpu_access = omap_gem_dmabuf_begin_cpu_access, .end_cpu_access = omap_gem_dmabuf_end_cpu_access, - .kmap_atomic = omap_gem_dmabuf_kmap_atomic, - .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic, - .kmap = omap_gem_dmabuf_kmap, - .kunmap = omap_gem_dmabuf_kunmap, + .map_atomic = omap_gem_dmabuf_kmap_atomic, + .unmap_atomic = omap_gem_dmabuf_kunmap_atomic, + .map = omap_gem_dmabuf_kmap, + .unmap = omap_gem_dmabuf_kunmap, .mmap = omap_gem_dmabuf_mmap, }; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 62aba976e744..3e29a9903303 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -7,6 +7,16 @@ config DRM_PANEL menu "Display Panels" depends on DRM && DRM_PANEL +config DRM_PANEL_LVDS + tristate "Generic LVDS panel driver" + depends on OF + depends on BACKLIGHT_CLASS_DEVICE + select VIDEOMODE_HELPERS + help + This driver supports LVDS panels that don't require device-specific + handling of power supplies or control signals. It implements automatic + backlight handling if the panel is attached to a backlight controller. + config DRM_PANEL_SIMPLE tristate "support for simple panels" depends on OF @@ -52,6 +62,12 @@ config DRM_PANEL_PANASONIC_VVX10F034N00 WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some Xperia Z2 tablets +config DRM_PANEL_SAMSUNG_S6E3HA2 + tristate "Samsung S6E3HA2 DSI video mode panel" + depends on OF + depends on DRM_MIPI_DSI + select VIDEOMODE_HELPERS + config DRM_PANEL_SAMSUNG_S6E8AA0 tristate "Samsung S6E8AA0 DSI video mode panel" depends on OF @@ -81,4 +97,11 @@ config DRM_PANEL_SHARP_LS043T1LE01 Say Y here if you want to enable support for Sharp LS043T1LE01 qHD (540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard +config DRM_PANEL_SITRONIX_ST7789V + tristate "Sitronix ST7789V panel" + depends on OF && SPI + help + Say Y here if you want to enable support for the Sitronix + ST7789V controller for 240x320 LCD panels + endmenu diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index a5c7ec0236e0..292b3c77aede 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -1,8 +1,11 @@ +obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o +obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o +obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c new file mode 100644 index 000000000000..3216aa9a88d6 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -0,0 +1,286 @@ +/* + * rcar_du_crtc.c -- R-Car Display Unit CRTCs + * + * Copyright (C) 2016 Laurent Pinchart + * Copyright (C) 2016 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/backlight.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_panel.h> + +#include <video/display_timing.h> +#include <video/of_display_timing.h> +#include <video/videomode.h> + +struct panel_lvds { + struct drm_panel panel; + struct device *dev; + + const char *label; + unsigned int width; + unsigned int height; + struct videomode video_mode; + unsigned int bus_format; + bool data_mirror; + + struct backlight_device *backlight; + + struct gpio_desc *enable_gpio; + struct gpio_desc *reset_gpio; +}; + +static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel) +{ + return container_of(panel, struct panel_lvds, panel); +} + +static int panel_lvds_disable(struct drm_panel *panel) +{ + struct panel_lvds *lvds = to_panel_lvds(panel); + + if (lvds->backlight) { + lvds->backlight->props.power = FB_BLANK_POWERDOWN; + lvds->backlight->props.state |= BL_CORE_FBBLANK; + backlight_update_status(lvds->backlight); + } + + return 0; +} + +static int panel_lvds_unprepare(struct drm_panel *panel) +{ + struct panel_lvds *lvds = to_panel_lvds(panel); + + if (lvds->enable_gpio) + gpiod_set_value_cansleep(lvds->enable_gpio, 0); + + return 0; +} + +static int panel_lvds_prepare(struct drm_panel *panel) +{ + struct panel_lvds *lvds = to_panel_lvds(panel); + + if (lvds->enable_gpio) + gpiod_set_value_cansleep(lvds->enable_gpio, 1); + + return 0; +} + +static int panel_lvds_enable(struct drm_panel *panel) +{ + struct panel_lvds *lvds = to_panel_lvds(panel); + + if (lvds->backlight) { + lvds->backlight->props.state &= ~BL_CORE_FBBLANK; + lvds->backlight->props.power = FB_BLANK_UNBLANK; + backlight_update_status(lvds->backlight); + } + + return 0; +} + +static int panel_lvds_get_modes(struct drm_panel *panel) +{ + struct panel_lvds *lvds = to_panel_lvds(panel); + struct drm_connector *connector = lvds->panel.connector; + struct drm_display_mode *mode; + + mode = drm_mode_create(lvds->panel.drm); + if (!mode) + return 0; + + drm_display_mode_from_videomode(&lvds->video_mode, mode); + mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + connector->display_info.width_mm = lvds->width; + connector->display_info.height_mm = lvds->height; + drm_display_info_set_bus_formats(&connector->display_info, + &lvds->bus_format, 1); + connector->display_info.bus_flags = lvds->data_mirror + ? DRM_BUS_FLAG_DATA_LSB_TO_MSB + : DRM_BUS_FLAG_DATA_MSB_TO_LSB; + + return 1; +} + +static const struct drm_panel_funcs panel_lvds_funcs = { + .disable = panel_lvds_disable, + .unprepare = panel_lvds_unprepare, + .prepare = panel_lvds_prepare, + .enable = panel_lvds_enable, + .get_modes = panel_lvds_get_modes, +}; + +static int panel_lvds_parse_dt(struct panel_lvds *lvds) +{ + struct device_node *np = lvds->dev->of_node; + struct display_timing timing; + const char *mapping; + int ret; + + ret = of_get_display_timing(np, "panel-timing", &timing); + if (ret < 0) + return ret; + + videomode_from_timing(&timing, &lvds->video_mode); + + ret = of_property_read_u32(np, "width-mm", &lvds->width); + if (ret < 0) { + dev_err(lvds->dev, "%s: invalid or missing %s DT property\n", + of_node_full_name(np), "width-mm"); + return -ENODEV; + } + ret = of_property_read_u32(np, "height-mm", &lvds->height); + if (ret < 0) { + dev_err(lvds->dev, "%s: invalid or missing %s DT property\n", + of_node_full_name(np), "height-mm"); + return -ENODEV; + } + + of_property_read_string(np, "label", &lvds->label); + + ret = of_property_read_string(np, "data-mapping", &mapping); + if (ret < 0) { + dev_err(lvds->dev, "%s: invalid or missing %s DT property\n", + of_node_full_name(np), "data-mapping"); + return -ENODEV; + } + + if (!strcmp(mapping, "jeida-18")) { + lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG; + } else if (!strcmp(mapping, "jeida-24")) { + lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA; + } else if (!strcmp(mapping, "vesa-24")) { + lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG; + } else { + dev_err(lvds->dev, "%s: invalid or missing %s DT property\n", + of_node_full_name(np), "data-mapping"); + return -EINVAL; + } + + lvds->data_mirror = of_property_read_bool(np, "data-mirror"); + + return 0; +} + +static int panel_lvds_probe(struct platform_device *pdev) +{ + struct panel_lvds *lvds; + struct device_node *np; + int ret; + + lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); + if (!lvds) + return -ENOMEM; + + lvds->dev = &pdev->dev; + + ret = panel_lvds_parse_dt(lvds); + if (ret < 0) + return ret; + + /* Get GPIOs and backlight controller. */ + lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(lvds->enable_gpio)) { + ret = PTR_ERR(lvds->enable_gpio); + dev_err(lvds->dev, "failed to request %s GPIO: %d\n", + "enable", ret); + return ret; + } + + lvds->reset_gpio = devm_gpiod_get_optional(lvds->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(lvds->reset_gpio)) { + ret = PTR_ERR(lvds->reset_gpio); + dev_err(lvds->dev, "failed to request %s GPIO: %d\n", + "reset", ret); + return ret; + } + + np = of_parse_phandle(lvds->dev->of_node, "backlight", 0); + if (np) { + lvds->backlight = of_find_backlight_by_node(np); + of_node_put(np); + + if (!lvds->backlight) + return -EPROBE_DEFER; + } + + /* + * TODO: Handle all power supplies specified in the DT node in a generic + * way for panels that don't care about power supply ordering. LVDS + * panels that require a specific power sequence will need a dedicated + * driver. + */ + + /* Register the panel. */ + drm_panel_init(&lvds->panel); + lvds->panel.dev = lvds->dev; + lvds->panel.funcs = &panel_lvds_funcs; + + ret = drm_panel_add(&lvds->panel); + if (ret < 0) + goto error; + + dev_set_drvdata(lvds->dev, lvds); + return 0; + +error: + put_device(&lvds->backlight->dev); + return ret; +} + +static int panel_lvds_remove(struct platform_device *pdev) +{ + struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev); + + drm_panel_detach(&lvds->panel); + drm_panel_remove(&lvds->panel); + + panel_lvds_disable(&lvds->panel); + + if (lvds->backlight) + put_device(&lvds->backlight->dev); + + return 0; +} + +static const struct of_device_id panel_lvds_of_table[] = { + { .compatible = "panel-lvds", }, + { /* Sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, panel_lvds_of_table); + +static struct platform_driver panel_lvds_driver = { + .probe = panel_lvds_probe, + .remove = panel_lvds_remove, + .driver = { + .name = "panel-lvds", + .of_match_table = panel_lvds_of_table, + }, +}; + +module_platform_driver(panel_lvds_driver); + +MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); +MODULE_DESCRIPTION("LVDS Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c new file mode 100644 index 000000000000..4cc08d7b3de4 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c @@ -0,0 +1,739 @@ +/* + * MIPI-DSI based s6e3ha2 AMOLED 5.7 inch panel driver. + * + * Copyright (c) 2016 Samsung Electronics Co., Ltd. + * Donghwa Lee <dh09.lee@samsung.com> + * Hyungwon Hwang <human.hwang@samsung.com> + * Hoegeun Kwon <hoegeun.kwon@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <drm/drmP.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_panel.h> +#include <linux/backlight.h> +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> + +#define S6E3HA2_MIN_BRIGHTNESS 0 +#define S6E3HA2_MAX_BRIGHTNESS 100 +#define S6E3HA2_DEFAULT_BRIGHTNESS 80 + +#define S6E3HA2_NUM_GAMMA_STEPS 46 +#define S6E3HA2_GAMMA_CMD_CNT 35 +#define S6E3HA2_VINT_STATUS_MAX 10 + +static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = { + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x82, 0x83, + 0x85, 0x88, 0x8b, 0x8b, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8c, + 0x94, 0x84, 0xb1, 0xaf, 0x8e, 0xcf, 0xad, 0xc9, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x84, 0x84, + 0x85, 0x87, 0x8b, 0x8a, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8a, + 0x93, 0x84, 0xb0, 0xae, 0x8e, 0xc9, 0xa8, 0xc5, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x88, 0x81, 0x84, 0x8a, 0x88, 0x8a, + 0x91, 0x84, 0xb1, 0xae, 0x8b, 0xd5, 0xb2, 0xcc, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x87, 0x81, 0x84, 0x8a, 0x87, 0x8a, + 0x91, 0x85, 0xae, 0xac, 0x8a, 0xc3, 0xa3, 0xc0, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x85, 0x85, + 0x86, 0x85, 0x88, 0x89, 0x84, 0x89, 0x82, 0x84, 0x87, 0x85, 0x8b, + 0x91, 0x88, 0xad, 0xab, 0x8a, 0xb7, 0x9b, 0xb6, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x89, 0x8a, 0x84, 0x89, 0x83, 0x83, 0x86, 0x84, 0x8b, + 0x90, 0x84, 0xb0, 0xae, 0x8b, 0xce, 0xad, 0xc8, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x89, + 0x8f, 0x84, 0xac, 0xaa, 0x89, 0xb1, 0x98, 0xaf, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x88, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8c, + 0x91, 0x86, 0xac, 0xaa, 0x89, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x88, + 0x8b, 0x82, 0xad, 0xaa, 0x8a, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8a, + 0x8e, 0x84, 0xae, 0xac, 0x89, 0xda, 0xb7, 0xd0, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x80, 0x83, 0x82, 0x8b, + 0x8e, 0x85, 0xac, 0xaa, 0x89, 0xc8, 0xaa, 0xc1, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x85, 0x86, 0x87, 0x89, 0x81, 0x85, 0x81, 0x84, 0x86, 0x84, 0x8c, + 0x8c, 0x84, 0xa9, 0xa8, 0x87, 0xa3, 0x92, 0xa1, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x85, 0x86, 0x87, 0x89, 0x84, 0x86, 0x83, 0x80, 0x83, 0x81, 0x8c, + 0x8d, 0x84, 0xaa, 0xaa, 0x89, 0xce, 0xaf, 0xc5, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x85, 0x86, 0x87, 0x89, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c, + 0x8c, 0x84, 0xa8, 0xa8, 0x88, 0xb5, 0x9f, 0xb0, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c, + 0x8b, 0x84, 0xab, 0xa8, 0x86, 0xd4, 0xb4, 0xc9, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x84, 0x84, 0x85, 0x8b, + 0x8a, 0x83, 0xa6, 0xa5, 0x84, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84, + 0x86, 0x85, 0x86, 0x86, 0x82, 0x85, 0x81, 0x82, 0x83, 0x84, 0x8e, + 0x8b, 0x83, 0xa4, 0xa3, 0x8a, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8e, + 0x8b, 0x83, 0xa4, 0xa2, 0x86, 0xc1, 0xa9, 0xb7, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8d, + 0x89, 0x82, 0xa2, 0xa1, 0x84, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83, + 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x83, 0x83, 0x85, 0x8c, + 0x87, 0x7f, 0xa2, 0x9d, 0x88, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xbb, 0x00, 0xc5, 0x00, 0xb4, 0x87, 0x86, 0x86, 0x84, 0x83, + 0x86, 0x87, 0x87, 0x87, 0x80, 0x82, 0x7f, 0x86, 0x86, 0x88, 0x8a, + 0x84, 0x7e, 0x9d, 0x9c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xbd, 0x00, 0xc7, 0x00, 0xb7, 0x87, 0x85, 0x85, 0x84, 0x83, + 0x86, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x83, 0x84, 0x85, 0x8a, + 0x85, 0x7e, 0x9c, 0x9b, 0x85, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xc0, 0x00, 0xca, 0x00, 0xbb, 0x87, 0x86, 0x85, 0x83, 0x83, + 0x85, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x84, 0x85, 0x86, 0x89, + 0x83, 0x7d, 0x9c, 0x99, 0x87, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xc4, 0x00, 0xcd, 0x00, 0xbe, 0x87, 0x86, 0x85, 0x83, 0x83, + 0x86, 0x85, 0x85, 0x87, 0x81, 0x82, 0x80, 0x82, 0x82, 0x83, 0x8a, + 0x85, 0x7f, 0x9f, 0x9b, 0x86, 0xb4, 0xa1, 0xac, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xc7, 0x00, 0xd0, 0x00, 0xc2, 0x87, 0x85, 0x85, 0x83, 0x82, + 0x85, 0x85, 0x85, 0x86, 0x82, 0x83, 0x80, 0x82, 0x82, 0x84, 0x87, + 0x86, 0x80, 0x9e, 0x9a, 0x87, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xca, 0x00, 0xd2, 0x00, 0xc5, 0x87, 0x85, 0x84, 0x82, 0x82, + 0x84, 0x85, 0x85, 0x86, 0x81, 0x82, 0x7f, 0x82, 0x82, 0x84, 0x88, + 0x86, 0x81, 0x9d, 0x98, 0x86, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xce, 0x00, 0xd6, 0x00, 0xca, 0x86, 0x85, 0x84, 0x83, 0x83, + 0x85, 0x84, 0x84, 0x85, 0x81, 0x82, 0x80, 0x81, 0x81, 0x82, 0x89, + 0x86, 0x81, 0x9c, 0x97, 0x86, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xd1, 0x00, 0xd9, 0x00, 0xce, 0x86, 0x84, 0x83, 0x83, 0x82, + 0x85, 0x85, 0x85, 0x86, 0x81, 0x83, 0x81, 0x82, 0x82, 0x83, 0x86, + 0x83, 0x7f, 0x99, 0x95, 0x86, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xd4, 0x00, 0xdb, 0x00, 0xd1, 0x86, 0x85, 0x83, 0x83, 0x82, + 0x85, 0x84, 0x84, 0x85, 0x80, 0x83, 0x82, 0x80, 0x80, 0x81, 0x87, + 0x84, 0x81, 0x98, 0x93, 0x85, 0xae, 0x9c, 0xa8, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xd8, 0x00, 0xde, 0x00, 0xd6, 0x86, 0x84, 0x83, 0x81, 0x81, + 0x83, 0x85, 0x85, 0x85, 0x82, 0x83, 0x81, 0x81, 0x81, 0x83, 0x86, + 0x84, 0x80, 0x98, 0x91, 0x85, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xdc, 0x00, 0xe2, 0x00, 0xda, 0x85, 0x84, 0x83, 0x82, 0x82, + 0x84, 0x84, 0x84, 0x85, 0x81, 0x82, 0x82, 0x80, 0x80, 0x81, 0x83, + 0x82, 0x7f, 0x99, 0x93, 0x86, 0x94, 0x8b, 0x92, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xdf, 0x00, 0xe5, 0x00, 0xde, 0x85, 0x84, 0x82, 0x82, 0x82, + 0x84, 0x83, 0x83, 0x84, 0x81, 0x81, 0x80, 0x83, 0x82, 0x84, 0x82, + 0x81, 0x7f, 0x99, 0x92, 0x86, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x83, 0x83, 0x84, 0x80, + 0x81, 0x7c, 0x99, 0x92, 0x87, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x85, 0x84, 0x83, 0x81, 0x81, + 0x82, 0x82, 0x82, 0x83, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83, + 0x82, 0x80, 0x91, 0x8d, 0x83, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83, + 0x81, 0x7f, 0x91, 0x8c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x82, + 0x82, 0x7f, 0x94, 0x89, 0x84, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x83, + 0x82, 0x7f, 0x91, 0x85, 0x81, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x83, 0x82, 0x84, 0x83, + 0x82, 0x7f, 0x90, 0x84, 0x81, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x80, 0x80, + 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x80, 0x80, 0x81, 0x81, + 0x82, 0x83, 0x7e, 0x80, 0x7c, 0xa4, 0x97, 0x9f, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xe9, 0x00, 0xec, 0x00, 0xe8, 0x84, 0x83, 0x82, 0x81, 0x81, + 0x82, 0x82, 0x82, 0x83, 0x7f, 0x7f, 0x7f, 0x81, 0x80, 0x82, 0x83, + 0x83, 0x84, 0x79, 0x7c, 0x79, 0xb1, 0xa0, 0xaa, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xed, 0x00, 0xf0, 0x00, 0xec, 0x83, 0x83, 0x82, 0x80, 0x80, + 0x81, 0x82, 0x82, 0x82, 0x7f, 0x7f, 0x7e, 0x81, 0x81, 0x82, 0x80, + 0x81, 0x81, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xf1, 0x00, 0xf4, 0x00, 0xf1, 0x83, 0x82, 0x82, 0x80, 0x80, + 0x81, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x7d, + 0x7e, 0x7f, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xf6, 0x00, 0xf7, 0x00, 0xf5, 0x82, 0x82, 0x81, 0x80, 0x80, + 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x82, + 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfa, 0x81, 0x81, 0x81, 0x80, 0x80, + 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 }, + { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00 } +}; + +unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = { + 0x18, 0x19, 0x1a, 0x1b, 0x1c, + 0x1d, 0x1e, 0x1f, 0x20, 0x21 +}; + +struct s6e3ha2 { + struct device *dev; + struct drm_panel panel; + struct backlight_device *bl_dev; + + struct regulator_bulk_data supplies[2]; + struct gpio_desc *reset_gpio; + struct gpio_desc *enable_gpio; +}; + +static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + return mipi_dsi_dcs_write_buffer(dsi, data, len); +} + +#define s6e3ha2_dcs_write_seq_static(ctx, seq...) do { \ + static const u8 d[] = { seq }; \ + int ret; \ + ret = s6e3ha2_dcs_write(ctx, d, ARRAY_SIZE(d)); \ + if (ret < 0) \ + return ret; \ +} while (0) + +#define s6e3ha2_call_write_func(ret, func) do { \ + ret = (func); \ + if (ret < 0) \ + return ret; \ +} while (0) + +static int s6e3ha2_test_key_on_f0(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a); + return 0; +} + +static int s6e3ha2_test_key_off_f0(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0xa5, 0xa5); + return 0; +} + +static int s6e3ha2_test_key_on_fc(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a); + return 0; +} + +static int s6e3ha2_test_key_off_fc(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0xa5, 0xa5); + return 0; +} + +static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67); + s6e3ha2_dcs_write_seq_static(ctx, 0xf9, 0x09); + return 0; +} + +static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c); + s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39); + s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0); + s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20); + s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62, 0x40, + 0x80, 0xc0, 0x28, 0x28, 0x28, 0x28, 0x39, 0xc5); + return 0; +} + +static int s6e3ha2_aor_control(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb2, 0x03, 0x10); + return 0; +} + +static int s6e3ha2_caps_elvss_set(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb6, 0x9c, 0x0a); + return 0; +} + +static int s6e3ha2_acl_off(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0x55, 0x00); + return 0; +} + +static int s6e3ha2_acl_off_opr(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb5, 0x40); + return 0; +} + +static int s6e3ha2_test_global(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x07); + return 0; +} + +static int s6e3ha2_test(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb8, 0x19); + return 0; +} + +static int s6e3ha2_touch_hsync_on1(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xbd, 0x33, 0x11, 0x02, + 0x16, 0x02, 0x16); + return 0; +} + +static int s6e3ha2_pentile_control(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xc0, 0x00, 0x00, 0xd8, 0xd8); + return 0; +} + +static int s6e3ha2_poc_global(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x20); + return 0; +} + +static int s6e3ha2_poc_setting(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x08); + return 0; +} + +static int s6e3ha2_pcd_set_off(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xcc, 0x40, 0x51); + return 0; +} + +static int s6e3ha2_err_fg_set(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xed, 0x44); + return 0; +} + +static int s6e3ha2_hbm_off(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0x53, 0x00); + return 0; +} + +static int s6e3ha2_te_start_setting(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xb9, 0x10, 0x09, 0xff, 0x00, 0x09); + return 0; +} + +static int s6e3ha2_gamma_update(struct s6e3ha2 *ctx) +{ + s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x03); + ndelay(100); /* need for 100ns delay */ + s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x00); + return 0; +} + +static int s6e3ha2_get_brightness(struct backlight_device *bl_dev) +{ + return bl_dev->props.brightness; +} + +static int s6e3ha2_set_vint(struct s6e3ha2 *ctx) +{ + struct backlight_device *bl_dev = ctx->bl_dev; + unsigned int brightness = bl_dev->props.brightness; + unsigned char data[] = { 0xf4, 0x8b, + vint_table[brightness * (S6E3HA2_VINT_STATUS_MAX - 1) / + S6E3HA2_MAX_BRIGHTNESS] }; + + return s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data)); +} + +static unsigned int s6e3ha2_get_brightness_index(unsigned int brightness) +{ + return (brightness * (S6E3HA2_NUM_GAMMA_STEPS - 1)) / + S6E3HA2_MAX_BRIGHTNESS; +} + +static int s6e3ha2_update_gamma(struct s6e3ha2 *ctx, unsigned int brightness) +{ + struct backlight_device *bl_dev = ctx->bl_dev; + unsigned int index = s6e3ha2_get_brightness_index(brightness); + u8 data[S6E3HA2_GAMMA_CMD_CNT + 1] = { 0xca, }; + int ret; + + memcpy(data + 1, gamma_tbl + index, S6E3HA2_GAMMA_CMD_CNT); + s6e3ha2_call_write_func(ret, + s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data))); + + s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx)); + bl_dev->props.brightness = brightness; + + return 0; +} + +static int s6e3ha2_set_brightness(struct backlight_device *bl_dev) +{ + struct s6e3ha2 *ctx = bl_get_data(bl_dev); + unsigned int brightness = bl_dev->props.brightness; + int ret; + + if (brightness < S6E3HA2_MIN_BRIGHTNESS || + brightness > bl_dev->props.max_brightness) { + dev_err(ctx->dev, "Invalid brightness: %u\n", brightness); + return -EINVAL; + } + + if (bl_dev->props.power > FB_BLANK_NORMAL) + return -EPERM; + + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_update_gamma(ctx, brightness)); + s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_set_vint(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx)); + + return 0; +} + +static const struct backlight_ops s6e3ha2_bl_ops = { + .get_brightness = s6e3ha2_get_brightness, + .update_status = s6e3ha2_set_brightness, +}; + +static int s6e3ha2_panel_init(struct s6e3ha2 *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + s6e3ha2_call_write_func(ret, mipi_dsi_dcs_exit_sleep_mode(dsi)); + usleep_range(5000, 6000); + + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_single_dsi_set(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_freq_calibration(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx)); + + return 0; +} + +static int s6e3ha2_power_off(struct s6e3ha2 *ctx) +{ + return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); +} + +static int s6e3ha2_disable(struct drm_panel *panel) +{ + struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + s6e3ha2_call_write_func(ret, mipi_dsi_dcs_enter_sleep_mode(dsi)); + s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi)); + + msleep(40); + ctx->bl_dev->props.power = FB_BLANK_NORMAL; + + return 0; +} + +static int s6e3ha2_unprepare(struct drm_panel *panel) +{ + struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel); + + return s6e3ha2_power_off(ctx); +} + +static int s6e3ha2_power_on(struct s6e3ha2 *ctx) +{ + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); + if (ret < 0) + return ret; + + msleep(120); + + gpiod_set_value(ctx->enable_gpio, 0); + usleep_range(5000, 6000); + gpiod_set_value(ctx->enable_gpio, 1); + + gpiod_set_value(ctx->reset_gpio, 1); + usleep_range(5000, 6000); + gpiod_set_value(ctx->reset_gpio, 0); + usleep_range(5000, 6000); + + return 0; +} +static int s6e3ha2_prepare(struct drm_panel *panel) +{ + struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel); + int ret; + + ret = s6e3ha2_power_on(ctx); + if (ret < 0) + return ret; + + ret = s6e3ha2_panel_init(ctx); + if (ret < 0) + goto err; + + ctx->bl_dev->props.power = FB_BLANK_NORMAL; + + return 0; + +err: + s6e3ha2_power_off(ctx); + return ret; +} + +static int s6e3ha2_enable(struct drm_panel *panel) +{ + struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + /* common setting */ + s6e3ha2_call_write_func(ret, + mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK)); + + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_touch_hsync_on1(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_pentile_control(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_poc_global(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_poc_setting(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx)); + + /* pcd setting off for TB */ + s6e3ha2_call_write_func(ret, s6e3ha2_pcd_set_off(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_err_fg_set(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_te_start_setting(ctx)); + + /* brightness setting */ + s6e3ha2_call_write_func(ret, s6e3ha2_set_brightness(ctx->bl_dev)); + s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_caps_elvss_set(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_acl_off(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_acl_off_opr(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_hbm_off(ctx)); + + /* elvss temp compensation */ + s6e3ha2_call_write_func(ret, s6e3ha2_test_global(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test(ctx)); + s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx)); + + s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi)); + ctx->bl_dev->props.power = FB_BLANK_UNBLANK; + + return 0; +} + +static const struct drm_display_mode default_mode = { + .clock = 222372, + .hdisplay = 1440, + .hsync_start = 1440 + 1, + .hsync_end = 1440 + 1 + 1, + .htotal = 1440 + 1 + 1 + 1, + .vdisplay = 2560, + .vsync_start = 2560 + 1, + .vsync_end = 2560 + 1 + 1, + .vtotal = 2560 + 1 + 1 + 15, + .vrefresh = 60, + .flags = 0, +}; + +static int s6e3ha2_get_modes(struct drm_panel *panel) +{ + struct drm_connector *connector = panel->connector; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(panel->drm, &default_mode); + if (!mode) { + DRM_ERROR("failed to add mode %ux%ux@%u\n", + default_mode.hdisplay, default_mode.vdisplay, + default_mode.vrefresh); + return -ENOMEM; + } + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + connector->display_info.width_mm = 71; + connector->display_info.height_mm = 125; + + return 1; +} + +static const struct drm_panel_funcs s6e3ha2_drm_funcs = { + .disable = s6e3ha2_disable, + .unprepare = s6e3ha2_unprepare, + .prepare = s6e3ha2_prepare, + .enable = s6e3ha2_enable, + .get_modes = s6e3ha2_get_modes, +}; + +static int s6e3ha2_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct s6e3ha2 *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + mipi_dsi_set_drvdata(dsi, ctx); + + ctx->dev = dev; + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS; + + ctx->supplies[0].supply = "vdd3"; + ctx->supplies[1].supply = "vci"; + + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), + ctx->supplies); + if (ret < 0) { + dev_err(dev, "failed to get regulators: %d\n", ret); + return ret; + } + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset_gpio)) { + dev_err(dev, "cannot get reset-gpios %ld\n", + PTR_ERR(ctx->reset_gpio)); + return PTR_ERR(ctx->reset_gpio); + } + + ctx->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->enable_gpio)) { + dev_err(dev, "cannot get enable-gpios %ld\n", + PTR_ERR(ctx->enable_gpio)); + return PTR_ERR(ctx->enable_gpio); + } + + ctx->bl_dev = backlight_device_register("s6e3ha2", dev, ctx, + &s6e3ha2_bl_ops, NULL); + if (IS_ERR(ctx->bl_dev)) { + dev_err(dev, "failed to register backlight device\n"); + return PTR_ERR(ctx->bl_dev); + } + + ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS; + ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS; + ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; + + drm_panel_init(&ctx->panel); + ctx->panel.dev = dev; + ctx->panel.funcs = &s6e3ha2_drm_funcs; + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) + goto unregister_backlight; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) + goto remove_panel; + + return ret; + +remove_panel: + drm_panel_remove(&ctx->panel); + +unregister_backlight: + backlight_device_unregister(ctx->bl_dev); + + return ret; +} + +static int s6e3ha2_remove(struct mipi_dsi_device *dsi) +{ + struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi); + + mipi_dsi_detach(dsi); + drm_panel_remove(&ctx->panel); + backlight_device_unregister(ctx->bl_dev); + + return 0; +} + +static const struct of_device_id s6e3ha2_of_match[] = { + { .compatible = "samsung,s6e3ha2" }, + { } +}; +MODULE_DEVICE_TABLE(of, s6e3ha2_of_match); + +static struct mipi_dsi_driver s6e3ha2_driver = { + .probe = s6e3ha2_probe, + .remove = s6e3ha2_remove, + .driver = { + .name = "panel-samsung-s6e3ha2", + .of_match_table = s6e3ha2_of_match, + }, +}; +module_mipi_dsi_driver(s6e3ha2_driver); + +MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>"); +MODULE_AUTHOR("Hyungwon Hwang <human.hwang@samsung.com>"); +MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>"); +MODULE_DESCRIPTION("MIPI-DSI based s6e3ha2 AMOLED Panel Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 89eb0422821c..c4566ce8fda7 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -386,6 +386,31 @@ static void panel_simple_shutdown(struct device *dev) panel_simple_disable(&panel->base); } +static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = { + .clock = 9000, + .hdisplay = 480, + .hsync_start = 480 + 2, + .hsync_end = 480 + 2 + 41, + .htotal = 480 + 2 + 41 + 2, + .vdisplay = 272, + .vsync_start = 272 + 2, + .vsync_end = 272 + 2 + 10, + .vtotal = 272 + 2 + 10 + 2, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc ampire_am_480272h3tmqw_t01h = { + .modes = &ire_am_480272h3tmqw_t01h_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 105, + .height = 67, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, +}; + static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = { .clock = 33333, .hdisplay = 800, @@ -1806,8 +1831,36 @@ static const struct panel_desc urt_umsh_8596md_parallel = { .bus_format = MEDIA_BUS_FMT_RGB666_1X18, }; +static const struct drm_display_mode winstar_wf35ltiacd_mode = { + .clock = 6410, + .hdisplay = 320, + .hsync_start = 320 + 20, + .hsync_end = 320 + 20 + 30, + .htotal = 320 + 20 + 30 + 38, + .vdisplay = 240, + .vsync_start = 240 + 4, + .vsync_end = 240 + 4 + 3, + .vtotal = 240 + 4 + 3 + 15, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc winstar_wf35ltiacd = { + .modes = &winstar_wf35ltiacd_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 70, + .height = 53, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, +}; + static const struct of_device_id platform_of_match[] = { { + .compatible = "ampire,am-480272h3tmqw-t01h", + .data = &ire_am_480272h3tmqw_t01h, + }, { .compatible = "ampire,am800480r3tmqwa1h", .data = &ire_am800480r3tmqwa1h, }, { @@ -1994,6 +2047,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "urt,umsh-8596md-20t", .data = &urt_umsh_8596md_parallel, }, { + .compatible = "winstar,wf35ltiacd", + .data = &winstar_wf35ltiacd, + }, { /* sentinel */ } }; diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c new file mode 100644 index 000000000000..358c64ef1922 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -0,0 +1,449 @@ +/* + * Copyright (C) 2017 Free Electrons + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + */ + +#include <linux/gpio/consumer.h> +#include <linux/regulator/consumer.h> +#include <linux/spi/spi.h> + +#include <drm/drmP.h> +#include <drm/drm_panel.h> + +#include <video/mipi_display.h> + +#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4) +#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0) + +#define ST7789V_RAMCTRL_CMD 0xb0 +#define ST7789V_RAMCTRL_RM_RGB BIT(4) +#define ST7789V_RAMCTRL_DM_RGB BIT(0) +#define ST7789V_RAMCTRL_MAGIC (3 << 6) +#define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4) + +#define ST7789V_RGBCTRL_CMD 0xb1 +#define ST7789V_RGBCTRL_WO BIT(7) +#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5) +#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3) +#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2) +#define ST7789V_RGBCTRL_PCLK_HIGH BIT(1) +#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f) +#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f) + +#define ST7789V_PORCTRL_CMD 0xb2 +#define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4) +#define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf) +#define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4) +#define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf) + +#define ST7789V_GCTRL_CMD 0xb7 +#define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4) +#define ST7789V_GCTRL_VGLS(n) ((n) & 7) + +#define ST7789V_VCOMS_CMD 0xbb + +#define ST7789V_LCMCTRL_CMD 0xc0 +#define ST7789V_LCMCTRL_XBGR BIT(5) +#define ST7789V_LCMCTRL_XMX BIT(3) +#define ST7789V_LCMCTRL_XMH BIT(2) + +#define ST7789V_VDVVRHEN_CMD 0xc2 +#define ST7789V_VDVVRHEN_CMDEN BIT(0) + +#define ST7789V_VRHS_CMD 0xc3 + +#define ST7789V_VDVS_CMD 0xc4 + +#define ST7789V_FRCTRL2_CMD 0xc6 + +#define ST7789V_PWCTRL1_CMD 0xd0 +#define ST7789V_PWCTRL1_MAGIC 0xa4 +#define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6) +#define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4) +#define ST7789V_PWCTRL1_VDS(n) ((n) & 3) + +#define ST7789V_PVGAMCTRL_CMD 0xe0 +#define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4) +#define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4) +#define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf) +#define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f) +#define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f) +#define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f) +#define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f) +#define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf) +#define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f) +#define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7) +#define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4) +#define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f) +#define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf) +#define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f) +#define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f) +#define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f) +#define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f) +#define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4) + +#define ST7789V_NVGAMCTRL_CMD 0xe1 +#define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4) +#define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4) +#define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf) +#define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f) +#define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f) +#define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f) +#define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f) +#define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf) +#define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f) +#define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7) +#define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4) +#define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f) +#define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf) +#define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f) +#define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f) +#define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f) +#define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f) +#define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4) + +#define ST7789V_TEST(val, func) \ + do { \ + if ((val = (func))) \ + return val; \ + } while (0) + +struct st7789v { + struct drm_panel panel; + struct spi_device *spi; + struct gpio_desc *reset; + struct backlight_device *backlight; + struct regulator *power; +}; + +enum st7789v_prefix { + ST7789V_COMMAND = 0, + ST7789V_DATA = 1, +}; + +static inline struct st7789v *panel_to_st7789v(struct drm_panel *panel) +{ + return container_of(panel, struct st7789v, panel); +} + +static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix, + u8 data) +{ + struct spi_transfer xfer = { }; + struct spi_message msg; + u16 txbuf = ((prefix & 1) << 8) | data; + + spi_message_init(&msg); + + xfer.tx_buf = &txbuf; + xfer.bits_per_word = 9; + xfer.len = sizeof(txbuf); + + spi_message_add_tail(&xfer, &msg); + return spi_sync(ctx->spi, &msg); +} + +static int st7789v_write_command(struct st7789v *ctx, u8 cmd) +{ + return st7789v_spi_write(ctx, ST7789V_COMMAND, cmd); +} + +static int st7789v_write_data(struct st7789v *ctx, u8 cmd) +{ + return st7789v_spi_write(ctx, ST7789V_DATA, cmd); +} + +static const struct drm_display_mode default_mode = { + .clock = 7000, + .hdisplay = 240, + .hsync_start = 240 + 38, + .hsync_end = 240 + 38 + 10, + .htotal = 240 + 38 + 10 + 10, + .vdisplay = 320, + .vsync_start = 320 + 8, + .vsync_end = 320 + 8 + 4, + .vtotal = 320 + 8 + 4 + 4, + .vrefresh = 60, +}; + +static int st7789v_get_modes(struct drm_panel *panel) +{ + struct drm_connector *connector = panel->connector; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(panel->drm, &default_mode); + if (!mode) { + dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n", + default_mode.hdisplay, default_mode.vdisplay, + default_mode.vrefresh); + return -ENOMEM; + } + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + + panel->connector->display_info.width_mm = 61; + panel->connector->display_info.height_mm = 103; + + return 1; +} + +static int st7789v_prepare(struct drm_panel *panel) +{ + struct st7789v *ctx = panel_to_st7789v(panel); + int ret; + + ret = regulator_enable(ctx->power); + if (ret) + return ret; + + gpiod_set_value(ctx->reset, 1); + msleep(30); + gpiod_set_value(ctx->reset, 0); + msleep(120); + + ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE)); + + /* We need to wait 120ms after a sleep out command */ + msleep(120); + + ST7789V_TEST(ret, st7789v_write_command(ctx, + MIPI_DCS_SET_ADDRESS_MODE)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, + MIPI_DCS_SET_PIXEL_FORMAT)); + ST7789V_TEST(ret, st7789v_write_data(ctx, + (MIPI_DCS_PIXEL_FMT_18BIT << 4) | + (MIPI_DCS_PIXEL_FMT_18BIT))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PORCTRL_IDLE_BP(3) | + ST7789V_PORCTRL_IDLE_FP(3))); + ST7789V_TEST(ret, st7789v_write_data(ctx, + ST7789V_PORCTRL_PARTIAL_BP(3) | + ST7789V_PORCTRL_PARTIAL_FP(3))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_GCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_GCTRL_VGLS(5) | + ST7789V_GCTRL_VGHS(3))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VCOMS_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0x2b)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_LCMCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_LCMCTRL_XMH | + ST7789V_LCMCTRL_XMX | + ST7789V_LCMCTRL_XBGR)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVVRHEN_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_VDVVRHEN_CMDEN)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VRHS_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVS_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0x20)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_FRCTRL2_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PWCTRL1_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_MAGIC)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_AVDD(2) | + ST7789V_PWCTRL1_AVCL(2) | + ST7789V_PWCTRL1_VDS(1))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PVGAMCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP63(0xd))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP1(0xca))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP2(0xe))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP4(8))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP6(9))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP13(7))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP20(0x2d))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP27(0xb) | + ST7789V_PVGAMCTRL_VP36(3))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP43(0x3d))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_JP1(3) | + ST7789V_PVGAMCTRL_VP50(4))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP57(0xa))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP59(0xa))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP61(0x1b))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP62(0x28))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_NVGAMCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN63(0xd))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN1(0xca))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN2(0xf))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN4(8))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN6(8))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN13(7))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN20(0x2e))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN27(0xc) | + ST7789V_NVGAMCTRL_VN36(5))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN43(0x40))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_JN1(3) | + ST7789V_NVGAMCTRL_VN50(4))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN57(9))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN59(0xb))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28))); + + ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB | + ST7789V_RAMCTRL_RM_RGB)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_EPF(3) | + ST7789V_RAMCTRL_MAGIC)); + + ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO | + ST7789V_RGBCTRL_RCM(2) | + ST7789V_RGBCTRL_VSYNC_HIGH | + ST7789V_RGBCTRL_HSYNC_HIGH | + ST7789V_RGBCTRL_PCLK_HIGH)); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8))); + ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20))); + + return 0; +} + +static int st7789v_enable(struct drm_panel *panel) +{ + struct st7789v *ctx = panel_to_st7789v(panel); + + if (ctx->backlight) { + ctx->backlight->props.state &= ~BL_CORE_FBBLANK; + ctx->backlight->props.power = FB_BLANK_UNBLANK; + backlight_update_status(ctx->backlight); + } + + return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON); +} + +static int st7789v_disable(struct drm_panel *panel) +{ + struct st7789v *ctx = panel_to_st7789v(panel); + int ret; + + ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF)); + + if (ctx->backlight) { + ctx->backlight->props.power = FB_BLANK_POWERDOWN; + ctx->backlight->props.state |= BL_CORE_FBBLANK; + backlight_update_status(ctx->backlight); + } + + return 0; +} + +static int st7789v_unprepare(struct drm_panel *panel) +{ + struct st7789v *ctx = panel_to_st7789v(panel); + int ret; + + ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_SLEEP_MODE)); + + regulator_disable(ctx->power); + + return 0; +} + +static const struct drm_panel_funcs st7789v_drm_funcs = { + .disable = st7789v_disable, + .enable = st7789v_enable, + .get_modes = st7789v_get_modes, + .prepare = st7789v_prepare, + .unprepare = st7789v_unprepare, +}; + +static int st7789v_probe(struct spi_device *spi) +{ + struct device_node *backlight; + struct st7789v *ctx; + int ret; + + ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + spi_set_drvdata(spi, ctx); + ctx->spi = spi; + + ctx->panel.dev = &spi->dev; + ctx->panel.funcs = &st7789v_drm_funcs; + + ctx->power = devm_regulator_get(&spi->dev, "power"); + if (IS_ERR(ctx->power)) + return PTR_ERR(ctx->power); + + ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ctx->reset)) { + dev_err(&spi->dev, "Couldn't get our reset line\n"); + return PTR_ERR(ctx->reset); + } + + backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0); + if (backlight) { + ctx->backlight = of_find_backlight_by_node(backlight); + of_node_put(backlight); + + if (!ctx->backlight) + return -EPROBE_DEFER; + } + + ret = drm_panel_add(&ctx->panel); + if (ret < 0) + goto err_free_backlight; + + return 0; + +err_free_backlight: + if (ctx->backlight) + put_device(&ctx->backlight->dev); + + return ret; +} + +static int st7789v_remove(struct spi_device *spi) +{ + struct st7789v *ctx = spi_get_drvdata(spi); + + drm_panel_detach(&ctx->panel); + drm_panel_remove(&ctx->panel); + + if (ctx->backlight) + put_device(&ctx->backlight->dev); + + return 0; +} + +static const struct of_device_id st7789v_of_match[] = { + { .compatible = "sitronix,st7789v" }, + { } +}; +MODULE_DEVICE_TABLE(of, st7789v_of_match); + +static struct spi_driver st7789v_driver = { + .probe = st7789v_probe, + .remove = st7789v_remove, + .driver = { + .name = "st7789v", + .of_match_table = st7789v_of_match, + }, +}; +module_spi_driver(st7789v_driver); + +MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); +MODULE_DESCRIPTION("Sitronix st7789v LCD Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 7d1cab57c89e..0fdedee4509d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = { .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, .move_notify = &qxl_bo_move_notify, }; diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index aefca0b03f38..c31e660e35db 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3295,7 +3295,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) mem_trp = ((temp >> 8) & 0x7) + 1; mem_tras = ((temp >> 11) & 0xf) + 4; } else if (rdev->family == CHIP_RV350 || - rdev->family <= CHIP_RV380) { + rdev->family == CHIP_RV380) { /* rv3x0 */ mem_trcd = (temp & 0x7) + 3; mem_trp = ((temp >> 8) & 0x7) + 3; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2e400dc414e3..c1c8e2208a21 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -499,6 +499,7 @@ struct radeon_bo { u32 tiling_flags; u32 pitch; int surface_reg; + unsigned prime_shared_count; /* list of all virtual address to which this bo * is associated to */ diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a8442f7196d6..df6b58c08544 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -164,6 +164,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; } + /* Objects shared as dma-bufs cannot be moved to VRAM */ + if (p->relocs[i].robj->prime_shared_count) { + p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM; + if (!p->relocs[i].allowed_domains) { + DRM_ERROR("BO associated with dma-buf cannot " + "be moved to VRAM\n"); + return -EINVAL; + } + } + p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].tv.shared = !r->write_domain; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 981385eb5389..17d3dafc8319 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1355,6 +1355,12 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); if (radeon_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 9b0b123ce079..dddb372de2b9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -120,6 +120,10 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj, return r; } } + if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { + /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ + return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 74b276060c20..bec2ec056de4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -352,6 +352,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, return 0; } + if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { + /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ + return -EINVAL; + } + radeon_ttm_placement_from_domain(bo, domain); for (i = 0; i < bo->placement.num_placement; i++) { /* force to pin into visible video ram */ diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index f3609c97496b..7110d403322c 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -77,6 +77,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&rdev->gem.mutex); + bo->prime_shared_count = 1; return &bo->gem_base; } @@ -91,6 +92,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj) /* pin buffer into GTT */ ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); + if (likely(ret == 0)) + bo->prime_shared_count++; + radeon_bo_unreserve(bo); return ret; } @@ -105,6 +109,8 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj) return; radeon_bo_unpin(bo); + if (bo->prime_shared_count) + bo->prime_shared_count--; radeon_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index aaa3e80fecb4..8b7623b5a624 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = { .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, .io_mem_free = &radeon_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; int radeon_ttm_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 4c2fd056dd6d..8a50dab19e5c 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -11,15 +11,17 @@ config DRM_RCAR_DU Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. -config DRM_RCAR_HDMI - bool "R-Car DU HDMI Encoder Support" - depends on DRM_RCAR_DU +config DRM_RCAR_DW_HDMI + tristate "R-Car DU Gen3 HDMI Encoder Support" + depends on DRM && OF + select DRM_DW_HDMI help - Enable support for external HDMI encoders. + Enable support for R-Car Gen3 internal HDMI encoder. config DRM_RCAR_LVDS bool "R-Car DU LVDS Encoder Support" depends on DRM_RCAR_DU + select DRM_PANEL help Enable support for the R-Car Display Unit embedded LVDS encoders. diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index d3b44651061a..2131e722de3b 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -4,13 +4,11 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_group.o \ rcar_du_kms.o \ rcar_du_lvdscon.o \ - rcar_du_plane.o \ - rcar_du_vgacon.o - -rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o + rcar_du_plane.o rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o +obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index edcbe2e3625d..4ed6f2340af0 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -106,9 +106,62 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) * Hardware Setup */ +struct dpll_info { + unsigned int output; + unsigned int fdpll; + unsigned int n; + unsigned int m; +}; + +static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc, + struct dpll_info *dpll, + unsigned long input, + unsigned long target) +{ + unsigned long best_diff = (unsigned long)-1; + unsigned long diff; + unsigned int fdpll; + unsigned int m; + unsigned int n; + + for (n = 39; n < 120; n++) { + for (m = 0; m < 4; m++) { + for (fdpll = 1; fdpll < 32; fdpll++) { + unsigned long output; + + /* 1/2 (FRQSEL=1) for duty rate 50% */ + output = input * (n + 1) / (m + 1) + / (fdpll + 1) / 2; + + if (output >= 400000000) + continue; + + diff = abs((long)output - (long)target); + if (best_diff > diff) { + best_diff = diff; + dpll->n = n; + dpll->m = m; + dpll->fdpll = fdpll; + dpll->output = output; + } + + if (diff == 0) + goto done; + } + } + } + +done: + dev_dbg(rcrtc->group->dev->dev, + "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n", + dpll->output, dpll->fdpll, dpll->n, dpll->m, + best_diff); +} + static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; + struct rcar_du_device *rcdu = rcrtc->group->dev; unsigned long mode_clock = mode->clock * 1000; unsigned long clk; u32 value; @@ -124,12 +177,18 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) escr = div | ESCR_DCLKSEL_CLKS; if (rcrtc->extclock) { + struct dpll_info dpll = { 0 }; unsigned long extclk; unsigned long extrate; unsigned long rate; u32 extdiv; extclk = clk_get_rate(rcrtc->extclock); + if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { + rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock); + extclk = dpll.output; + } + extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock); extdiv = clamp(extdiv, 1U, 64U) - 1; @@ -140,7 +199,27 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) abs((long)rate - (long)mode_clock)) { dev_dbg(rcrtc->group->dev->dev, "crtc%u: using external clock\n", rcrtc->index); - escr = extdiv | ESCR_DCLKSEL_DCLKIN; + + if (rcdu->info->dpll_ch & (1 << rcrtc->index)) { + u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE + | DPLLCR_FDPLL(dpll.fdpll) + | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) + | DPLLCR_STBY; + + if (rcrtc->index == 1) + dpllcr |= DPLLCR_PLCS1 + | DPLLCR_INCS_DOTCLKIN1; + else + dpllcr |= DPLLCR_PLCS0 + | DPLLCR_INCS_DOTCLKIN0; + + rcar_du_group_write(rcrtc->group, DPLLCR, + dpllcr); + + escr = ESCR_DCLKSEL_DCLKIN | 1; + } else { + escr = ESCR_DCLKSEL_DCLKIN | extdiv; + } } } @@ -488,22 +567,29 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc) rcar_du_crtc_stop(rcrtc); rcar_du_crtc_put(rcrtc); + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + rcrtc->outputs = 0; } static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - struct drm_pending_vblank_event *event = crtc->state->event; struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct drm_device *dev = rcrtc->crtc.dev; unsigned long flags; - if (event) { + if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); - rcrtc->event = event; + rcrtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index a7194812997e..15871fae7445 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -1,7 +1,7 @@ /* * rcar_du_crtc.h -- R-Car Display Unit CRTCs * - * Copyright (C) 2013-2014 Renesas Electronics Corporation + * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) * @@ -61,6 +61,8 @@ enum rcar_du_output { RCAR_DU_OUTPUT_DPAD1, RCAR_DU_OUTPUT_LVDS0, RCAR_DU_OUTPUT_LVDS1, + RCAR_DU_OUTPUT_HDMI0, + RCAR_DU_OUTPUT_HDMI1, RCAR_DU_OUTPUT_TCON, RCAR_DU_OUTPUT_MAX, }; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 62a3b3e32153..d6a0255181cc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -44,12 +44,10 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = { */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_DPAD1] = { .possible_crtcs = BIT(1) | BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 1, }, }, @@ -68,17 +66,14 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = { */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(2) | BIT(1) | BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_LVDS0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_LVDS, .port = 1, }, [RCAR_DU_OUTPUT_LVDS1] = { .possible_crtcs = BIT(2) | BIT(1), - .encoder_type = DRM_MODE_ENCODER_LVDS, .port = 2, }, }, @@ -97,12 +92,10 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = { */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(1) | BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_LVDS0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_LVDS, .port = 1, }, }, @@ -118,12 +111,10 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = { /* R8A7792 has two RGB outputs. */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_DPAD1] = { .possible_crtcs = BIT(1), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 1, }, }, @@ -141,12 +132,10 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = { */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_DPAD1] = { .possible_crtcs = BIT(1), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 1, }, }, @@ -160,21 +149,28 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = { | RCAR_DU_FEATURE_VSP1_SOURCE, .num_crtcs = 4, .routes = { - /* R8A7795 has one RGB output, one LVDS output and two - * (currently unsupported) HDMI outputs. + /* R8A7795 has one RGB output, two HDMI outputs and one + * LVDS output. */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(3), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + [RCAR_DU_OUTPUT_HDMI1] = { + .possible_crtcs = BIT(2), + .port = 2, + }, [RCAR_DU_OUTPUT_LVDS0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_LVDS, .port = 3, }, }, .num_lvds = 1, + .dpll_ch = BIT(1) | BIT(2), }; static const struct rcar_du_device_info rcar_du_r8a7796_info = { @@ -189,12 +185,10 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = { */ [RCAR_DU_OUTPUT_DPAD0] = { .possible_crtcs = BIT(2), - .encoder_type = DRM_MODE_ENCODER_NONE, .port = 0, }, [RCAR_DU_OUTPUT_LVDS0] = { .possible_crtcs = BIT(0), - .encoder_type = DRM_MODE_ENCODER_LVDS, .port = 2, }, }, @@ -318,10 +312,8 @@ static int rcar_du_probe(struct platform_device *pdev) if (rcdu == NULL) return -ENOMEM; - init_waitqueue_head(&rcdu->commit.wait); - rcdu->dev = &pdev->dev; - rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data; + rcdu->info = of_device_get_match_data(rcdu->dev); platform_set_drvdata(pdev, rcdu); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index c843c3134498..f8cd79488ece 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -38,7 +38,6 @@ struct rcar_du_lvdsenc; /* * struct rcar_du_output_routing - Output routing specification * @possible_crtcs: bitmask of possible CRTCs for the output - * @encoder_type: DRM type of the internal encoder associated with the output * @port: device tree port number corresponding to this output route * * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data @@ -47,7 +46,6 @@ struct rcar_du_lvdsenc; */ struct rcar_du_output_routing { unsigned int possible_crtcs; - unsigned int encoder_type; unsigned int port; }; @@ -67,6 +65,7 @@ struct rcar_du_device_info { unsigned int num_crtcs; struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX]; unsigned int num_lvds; + unsigned int dpll_ch; }; #define RCAR_DU_MAX_CRTCS 4 @@ -98,11 +97,6 @@ struct rcar_du_device { unsigned int vspd1_sink; struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS]; - - struct { - wait_queue_head_t wait; - u32 pending; - } commit; }; static inline bool rcar_du_has(struct rcar_du_device *rcdu, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index ab8645c57e2d..3e048dd98b64 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -16,14 +16,13 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" -#include "rcar_du_hdmienc.h" #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" #include "rcar_du_lvdsenc.h" -#include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- * Encoder @@ -33,6 +32,11 @@ static void rcar_du_encoder_disable(struct drm_encoder *encoder) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + if (renc->connector && renc->connector->panel) { + drm_panel_disable(renc->connector->panel); + drm_panel_unprepare(renc->connector->panel); + } + if (renc->lvds) rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false); } @@ -43,6 +47,11 @@ static void rcar_du_encoder_enable(struct drm_encoder *encoder) if (renc->lvds) rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true); + + if (renc->connector && renc->connector->panel) { + drm_panel_prepare(renc->connector->panel); + drm_panel_enable(renc->connector->panel); + } } static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, @@ -52,30 +61,36 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, struct rcar_du_encoder *renc = to_rcar_encoder(encoder); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; const struct drm_display_mode *mode = &crtc_state->mode; - const struct drm_display_mode *panel_mode; struct drm_connector *connector = conn_state->connector; struct drm_device *dev = encoder->dev; - /* DAC encoders have currently no restriction on the mode. */ - if (encoder->encoder_type == DRM_MODE_ENCODER_DAC) - return 0; - - if (list_empty(&connector->modes)) { - dev_dbg(dev->dev, "encoder: empty modes list\n"); - return -EINVAL; + /* + * Only panel-related encoder types require validation here, everything + * else is handled by the bridge drivers. + */ + if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + const struct drm_display_mode *panel_mode; + + if (list_empty(&connector->modes)) { + dev_dbg(dev->dev, "encoder: empty modes list\n"); + return -EINVAL; + } + + panel_mode = list_first_entry(&connector->modes, + struct drm_display_mode, head); + + /* We're not allowed to modify the resolution. */ + if (mode->hdisplay != panel_mode->hdisplay || + mode->vdisplay != panel_mode->vdisplay) + return -EINVAL; + + /* + * The flat panel mode is fixed, just copy it to the adjusted + * mode. + */ + drm_mode_copy(adjusted_mode, panel_mode); } - panel_mode = list_first_entry(&connector->modes, - struct drm_display_mode, head); - - /* We're not allowed to modify the resolution. */ - if (mode->hdisplay != panel_mode->hdisplay || - mode->vdisplay != panel_mode->vdisplay) - return -EINVAL; - - /* The flat panel mode is fixed, just copy it to the adjusted mode. */ - drm_mode_copy(adjusted_mode, panel_mode); - if (renc->lvds) rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode); @@ -83,16 +98,54 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder, } static void rcar_du_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + struct drm_display_info *info = &conn_state->connector->display_info; + enum rcar_lvds_mode mode; + + rcar_du_crtc_route_output(crtc_state->crtc, renc->output); + + if (!renc->lvds) { + /* + * The DU driver creates connectors only for the outputs of the + * internal LVDS encoders. + */ + renc->connector = NULL; + return; + } + + renc->connector = to_rcar_connector(conn_state->connector); + + if (!info->num_bus_formats || !info->bus_formats) { + dev_err(encoder->dev->dev, "no LVDS bus format reported\n"); + return; + } + + switch (info->bus_formats[0]) { + case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: + case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: + mode = RCAR_LVDS_MODE_JEIDA; + break; + case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: + mode = RCAR_LVDS_MODE_VESA; + break; + default: + dev_err(encoder->dev->dev, + "unsupported LVDS bus format 0x%04x\n", + info->bus_formats[0]); + return; + } + + if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB) + mode |= RCAR_LVDS_MODE_MIRROR; - rcar_du_crtc_route_output(encoder->crtc, renc->output); + rcar_du_lvdsenc_set_mode(renc->lvds, mode); } static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .mode_set = rcar_du_encoder_mode_set, + .atomic_mode_set = rcar_du_encoder_mode_set, .disable = rcar_du_encoder_disable, .enable = rcar_du_encoder_enable, .atomic_check = rcar_du_encoder_atomic_check, @@ -103,14 +156,13 @@ static const struct drm_encoder_funcs encoder_funcs = { }; int rcar_du_encoder_init(struct rcar_du_device *rcdu, - enum rcar_du_encoder_type type, enum rcar_du_output output, struct device_node *enc_node, struct device_node *con_node) { struct rcar_du_encoder *renc; struct drm_encoder *encoder; - unsigned int encoder_type; + struct drm_bridge *bridge = NULL; int ret; renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); @@ -133,52 +185,51 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, break; } - switch (type) { - case RCAR_DU_ENCODER_VGA: - encoder_type = DRM_MODE_ENCODER_DAC; - break; - case RCAR_DU_ENCODER_LVDS: - encoder_type = DRM_MODE_ENCODER_LVDS; - break; - case RCAR_DU_ENCODER_HDMI: - encoder_type = DRM_MODE_ENCODER_TMDS; - break; - case RCAR_DU_ENCODER_NONE: - default: - /* No external encoder, use the internal encoder type. */ - encoder_type = rcdu->info->routes[output].encoder_type; - break; - } + if (enc_node) { + dev_dbg(rcdu->dev, "initializing encoder %s for output %u\n", + of_node_full_name(enc_node), output); - if (type == RCAR_DU_ENCODER_HDMI) { - ret = rcar_du_hdmienc_init(rcdu, renc, enc_node); - if (ret < 0) + /* Locate the DRM bridge from the encoder DT node. */ + bridge = of_drm_find_bridge(enc_node); + if (!bridge) { + ret = -EPROBE_DEFER; goto done; + } } else { - ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, - encoder_type, NULL); - if (ret < 0) - goto done; - - drm_encoder_helper_add(encoder, &encoder_helper_funcs); + dev_dbg(rcdu->dev, + "initializing internal encoder for output %u\n", + output); } - switch (encoder_type) { - case DRM_MODE_ENCODER_LVDS: - ret = rcar_du_lvds_connector_init(rcdu, renc, con_node); - break; - - case DRM_MODE_ENCODER_DAC: - ret = rcar_du_vga_connector_init(rcdu, renc); - break; - - case DRM_MODE_ENCODER_TMDS: - /* connector managed by the bridge driver */ - break; - - default: - ret = -EINVAL; - break; + ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret < 0) + goto done; + + drm_encoder_helper_add(encoder, &encoder_helper_funcs); + + if (bridge) { + /* + * Attach the bridge to the encoder. The bridge will create the + * connector. + */ + ret = drm_bridge_attach(encoder, bridge, NULL); + if (ret) { + drm_encoder_cleanup(encoder); + return ret; + } + } else { + /* There's no bridge, create the connector manually. */ + switch (output) { + case RCAR_DU_OUTPUT_LVDS0: + case RCAR_DU_OUTPUT_LVDS1: + ret = rcar_du_lvds_connector_init(rcdu, renc, con_node); + break; + + default: + ret = -EINVAL; + break; + } } done: diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index a050a3699857..5422fa4df272 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -17,22 +17,14 @@ #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> +struct drm_panel; struct rcar_du_device; -struct rcar_du_hdmienc; struct rcar_du_lvdsenc; -enum rcar_du_encoder_type { - RCAR_DU_ENCODER_UNUSED = 0, - RCAR_DU_ENCODER_NONE, - RCAR_DU_ENCODER_VGA, - RCAR_DU_ENCODER_LVDS, - RCAR_DU_ENCODER_HDMI, -}; - struct rcar_du_encoder { struct drm_encoder base; enum rcar_du_output output; - struct rcar_du_hdmienc *hdmi; + struct rcar_du_connector *connector; struct rcar_du_lvdsenc *lvds; }; @@ -44,13 +36,13 @@ struct rcar_du_encoder { struct rcar_du_connector { struct drm_connector connector; struct rcar_du_encoder *encoder; + struct drm_panel *panel; }; #define to_rcar_connector(c) \ container_of(c, struct rcar_du_connector, connector) int rcar_du_encoder_init(struct rcar_du_device *rcdu, - enum rcar_du_encoder_type type, enum rcar_du_output output, struct device_node *enc_node, struct device_node *con_node); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c deleted file mode 100644 index c4c5d1abcff8..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ /dev/null @@ -1,134 +0,0 @@ -/* - * R-Car Display Unit HDMI Encoder - * - * Copyright (C) 2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/slab.h> - -#include <drm/drmP.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> - -#include "rcar_du_drv.h" -#include "rcar_du_encoder.h" -#include "rcar_du_hdmienc.h" -#include "rcar_du_lvdsenc.h" - -struct rcar_du_hdmienc { - struct rcar_du_encoder *renc; - bool enabled; -}; - -#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi) - -static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) -{ - struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - - if (hdmienc->renc->lvds) - rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, - false); - - hdmienc->enabled = false; -} - -static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) -{ - struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - - if (hdmienc->renc->lvds) - rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, - true); - - hdmienc->enabled = true; -} - -static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; - - if (hdmienc->renc->lvds) - rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds, - adjusted_mode); - - return 0; -} - - -static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - - rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output); -} - -static const struct drm_encoder_helper_funcs encoder_helper_funcs = { - .mode_set = rcar_du_hdmienc_mode_set, - .disable = rcar_du_hdmienc_disable, - .enable = rcar_du_hdmienc_enable, - .atomic_check = rcar_du_hdmienc_atomic_check, -}; - -static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder) -{ - struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); - - if (hdmienc->enabled) - rcar_du_hdmienc_disable(encoder); - - drm_encoder_cleanup(encoder); -} - -static const struct drm_encoder_funcs encoder_funcs = { - .destroy = rcar_du_hdmienc_cleanup, -}; - -int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc, struct device_node *np) -{ - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); - struct drm_bridge *bridge; - struct rcar_du_hdmienc *hdmienc; - int ret; - - hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL); - if (hdmienc == NULL) - return -ENOMEM; - - /* Locate the DRM bridge from the HDMI encoder DT node. */ - bridge = of_drm_find_bridge(np); - if (!bridge) - return -EPROBE_DEFER; - - ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - if (ret < 0) - return ret; - - drm_encoder_helper_add(encoder, &encoder_helper_funcs); - - renc->hdmi = hdmienc; - hdmienc->renc = renc; - - /* Link the bridge to the encoder. */ - ret = drm_bridge_attach(encoder, bridge, NULL); - if (ret) { - drm_encoder_cleanup(encoder); - return ret; - } - - return 0; -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h deleted file mode 100644 index 2ff0128ac8e1..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * R-Car Display Unit HDMI Encoder - * - * Copyright (C) 2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_HDMIENC_H__ -#define __RCAR_DU_HDMIENC_H__ - -#include <linux/module.h> - -struct device_node; -struct rcar_du_device; -struct rcar_du_encoder; - -#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI) -int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc, struct device_node *np); -#else -static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc, - struct device_node *np) -{ - return -ENOSYS; -} -#endif - -#endif /* __RCAR_DU_HDMIENC_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index ff61f6032f2c..f4125c8ca902 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -249,18 +249,9 @@ static int rcar_du_atomic_check(struct drm_device *dev, return rcar_du_atomic_check_planes(dev, state); } -struct rcar_du_commit { - struct work_struct work; - struct drm_device *dev; - struct drm_atomic_state *state; - u32 crtcs; -}; - -static void rcar_du_atomic_complete(struct rcar_du_commit *commit) +static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state) { - struct drm_device *dev = commit->dev; - struct rcar_du_device *rcdu = dev->dev_private; - struct drm_atomic_state *old_state = commit->state; + struct drm_device *dev = old_state->dev; /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); @@ -268,114 +259,31 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit) drm_atomic_helper_commit_planes(dev, old_state, DRM_PLANE_COMMIT_ACTIVE_ONLY); + drm_atomic_helper_commit_hw_done(old_state); drm_atomic_helper_wait_for_vblanks(dev, old_state); drm_atomic_helper_cleanup_planes(dev, old_state); - - drm_atomic_state_put(old_state); - - /* Complete the commit, wake up any waiter. */ - spin_lock(&rcdu->commit.wait.lock); - rcdu->commit.pending &= ~commit->crtcs; - wake_up_all_locked(&rcdu->commit.wait); - spin_unlock(&rcdu->commit.wait.lock); - - kfree(commit); -} - -static void rcar_du_atomic_work(struct work_struct *work) -{ - struct rcar_du_commit *commit = - container_of(work, struct rcar_du_commit, work); - - rcar_du_atomic_complete(commit); -} - -static int rcar_du_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) -{ - struct rcar_du_device *rcdu = dev->dev_private; - struct rcar_du_commit *commit; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - unsigned int i; - int ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - /* Allocate the commit object. */ - commit = kzalloc(sizeof(*commit), GFP_KERNEL); - if (commit == NULL) { - ret = -ENOMEM; - goto error; - } - - INIT_WORK(&commit->work, rcar_du_atomic_work); - commit->dev = dev; - commit->state = state; - - /* Wait until all affected CRTCs have completed previous commits and - * mark them as pending. - */ - for_each_crtc_in_state(state, crtc, crtc_state, i) - commit->crtcs |= drm_crtc_mask(crtc); - - spin_lock(&rcdu->commit.wait.lock); - ret = wait_event_interruptible_locked(rcdu->commit.wait, - !(rcdu->commit.pending & commit->crtcs)); - if (ret == 0) - rcdu->commit.pending |= commit->crtcs; - spin_unlock(&rcdu->commit.wait.lock); - - if (ret) { - kfree(commit); - goto error; - } - - /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(state, true); - - drm_atomic_state_get(state); - if (nonblock) - schedule_work(&commit->work); - else - rcar_du_atomic_complete(commit); - - return 0; - -error: - drm_atomic_helper_cleanup_planes(dev, state); - return ret; } /* ----------------------------------------------------------------------------- * Initialization */ +static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = { + .atomic_commit_tail = rcar_du_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = { .fb_create = rcar_du_fb_create, .output_poll_changed = rcar_du_output_poll_changed, .atomic_check = rcar_du_atomic_check, - .atomic_commit = rcar_du_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, enum rcar_du_output output, struct of_endpoint *ep) { - static const struct { - const char *compatible; - enum rcar_du_encoder_type type; - } encoders[] = { - { "adi,adv7123", RCAR_DU_ENCODER_VGA }, - { "adi,adv7511w", RCAR_DU_ENCODER_HDMI }, - { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS }, - }; - - enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE; struct device_node *connector = NULL; struct device_node *encoder = NULL; struct device_node *ep_node = NULL; @@ -394,6 +302,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, return -ENODEV; } + if (!of_device_is_available(entity)) { + dev_dbg(rcdu->dev, + "connected entity %s is disabled, skipping\n", + entity->full_name); + return -ENODEV; + } + entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0); for_each_endpoint_of_node(entity, ep_node) { @@ -422,30 +337,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, of_node_put(entity_ep_node); - if (encoder) { - /* - * If an encoder has been found, get its type based on its - * compatible string. - */ - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(encoders); ++i) { - if (of_device_is_compatible(encoder, - encoders[i].compatible)) { - enc_type = encoders[i].type; - break; - } - } - - if (i == ARRAY_SIZE(encoders)) { - dev_warn(rcdu->dev, - "unknown encoder type for %s, skipping\n", - encoder->full_name); - of_node_put(encoder); - of_node_put(connector); - return -EINVAL; - } - } else { + if (!encoder) { /* * If no encoder has been found the entity must be the * connector. @@ -453,7 +345,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu, connector = entity; } - ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector); + ret = rcar_du_encoder_init(rcdu, output, encoder, connector); if (ret && ret != -EPROBE_DEFER) dev_warn(rcdu->dev, "failed to initialize encoder %s on output %u (%d), skipping\n", @@ -561,6 +453,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) dev->mode_config.max_width = 4095; dev->mode_config.max_height = 2047; dev->mode_config.funcs = &rcar_du_mode_config_funcs; + dev->mode_config.helper_private = &rcar_du_mode_config_helper; rcdu->num_crtcs = rcdu->info->num_crtcs; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index 3bcfd161c53f..ee91481131ad 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> #include <video/display_timing.h> #include <video/of_display_timing.h> @@ -25,47 +26,30 @@ #include "rcar_du_kms.h" #include "rcar_du_lvdscon.h" -struct rcar_du_lvds_connector { - struct rcar_du_connector connector; - - struct { - unsigned int width_mm; /* Panel width in mm */ - unsigned int height_mm; /* Panel height in mm */ - struct videomode mode; - } panel; -}; - -#define to_rcar_lvds_connector(c) \ - container_of(c, struct rcar_du_lvds_connector, connector.connector) - static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) { - struct rcar_du_lvds_connector *lvdscon = - to_rcar_lvds_connector(connector); - struct drm_display_mode *mode; - - mode = drm_mode_create(connector->dev); - if (mode == NULL) - return 0; + struct rcar_du_connector *rcon = to_rcar_connector(connector); - mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; - - drm_display_mode_from_videomode(&lvdscon->panel.mode, mode); - - drm_mode_probed_add(connector, mode); - - return 1; + return drm_panel_get_modes(rcon->panel); } static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_lvds_connector_get_modes, }; +static void rcar_du_lvds_connector_destroy(struct drm_connector *connector) +{ + struct rcar_du_connector *rcon = to_rcar_connector(connector); + + drm_panel_detach(rcon->panel); + drm_connector_cleanup(connector); +} + static const struct drm_connector_funcs connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, + .destroy = rcar_du_lvds_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -75,27 +59,19 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, const struct device_node *np) { struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); - struct rcar_du_lvds_connector *lvdscon; + struct rcar_du_connector *rcon; struct drm_connector *connector; - struct display_timing timing; int ret; - lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL); - if (lvdscon == NULL) + rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL); + if (rcon == NULL) return -ENOMEM; - ret = of_get_display_timing(np, "panel-timing", &timing); - if (ret < 0) - return ret; - - videomode_from_timing(&timing, &lvdscon->panel.mode); + connector = &rcon->connector; - of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm); - of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm); - - connector = &lvdscon->connector.connector; - connector->display_info.width_mm = lvdscon->panel.width_mm; - connector->display_info.height_mm = lvdscon->panel.height_mm; + rcon->panel = of_drm_find_panel(np); + if (!rcon->panel) + return -EPROBE_DEFER; ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, DRM_MODE_CONNECTOR_LVDS); @@ -112,7 +88,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - lvdscon->connector.encoder = renc; + ret = drm_panel_attach(rcon->panel, connector); + if (ret < 0) + return ret; + + rcon->encoder = renc; return 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c index e3a4985f6f3f..1661f6201210 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c @@ -31,6 +31,7 @@ struct rcar_du_lvdsenc { bool enabled; enum rcar_lvds_input input; + enum rcar_lvds_mode mode; }; static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data) @@ -61,7 +62,7 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds, /* Select the input, hardcode mode 0, enable LVDS operation and turn * bias circuitry on. */ - lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN; + lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN; if (rcrtc->index == 2) lvdcr0 |= LVDCR0_DUSEL; rcar_lvds_write(lvds, LVDCR0, lvdcr0); @@ -114,7 +115,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds, * Turn the PLL on, set it to LVDS normal mode, wait for the startup * delay and turn the output on. */ - lvdcr0 = LVDCR0_PLLON; + lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON; rcar_lvds_write(lvds, LVDCR0, lvdcr0); lvdcr0 |= LVDCR0_PWD; @@ -211,6 +212,12 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, mode->clock = clamp(mode->clock, 25175, 148500); } +void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds, + enum rcar_lvds_mode mode) +{ + lvds->mode = mode; +} + static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds, struct platform_device *pdev) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h index dfdba746edf4..7218ac89333e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h @@ -26,8 +26,17 @@ enum rcar_lvds_input { RCAR_LVDS_INPUT_DU2, }; +/* Keep in sync with the LVDCR0.LVMD hardware register values. */ +enum rcar_lvds_mode { + RCAR_LVDS_MODE_JEIDA = 0, + RCAR_LVDS_MODE_MIRROR = 1, + RCAR_LVDS_MODE_VESA = 4, +}; + #if IS_ENABLED(CONFIG_DRM_RCAR_LVDS) int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu); +void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds, + enum rcar_lvds_mode mode); int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc, bool enable); void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds, @@ -37,6 +46,10 @@ static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu) { return 0; } +static inline void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds, + enum rcar_lvds_mode mode) +{ +} static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds, struct drm_crtc *crtc, bool enable) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h index fedb0161e234..d5bae99d3cfe 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h @@ -277,6 +277,29 @@ #define DEFR10_TSEL_H3_TCON1 (0 << 1) /* DEFR102 register only (DU2/DU3) */ #define DEFR10_DEFE10 (1 << 0) +#define DPLLCR 0x20044 +#define DPLLCR_CODE (0x95 << 24) +#define DPLLCR_PLCS1 (1 << 23) +/* + * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20 + * isn't implemented by other SoC in the Gen3 family it can safely be set + * unconditionally. + */ +#define DPLLCR_PLCS0 (3 << 20) +#define DPLLCR_CLKE (1 << 18) +#define DPLLCR_FDPLL(n) ((n) << 12) +#define DPLLCR_N(n) ((n) << 5) +#define DPLLCR_M(n) ((n) << 3) +#define DPLLCR_STBY (1 << 2) +#define DPLLCR_INCS_DOTCLKIN0 (0 << 0) +#define DPLLCR_INCS_DOTCLKIN1 (1 << 1) + +#define DPLLC2R 0x20048 +#define DPLLC2R_CODE (0x95 << 24) +#define DPLLC2R_SELC (1 << 12) +#define DPLLC2R_M(n) ((n) << 8) +#define DPLLC2R_FDPLL(n) ((n) << 0) + /* ----------------------------------------------------------------------------- * Display Timing Generation Registers */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c deleted file mode 100644 index 8d6125c1c0f9..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector - * - * Copyright (C) 2013-2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> - -#include "rcar_du_drv.h" -#include "rcar_du_encoder.h" -#include "rcar_du_kms.h" -#include "rcar_du_vgacon.h" - -static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) -{ - return 0; -} - -static const struct drm_connector_helper_funcs connector_helper_funcs = { - .get_modes = rcar_du_vga_connector_get_modes, -}; - -static enum drm_connector_status -rcar_du_vga_connector_detect(struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - -static const struct drm_connector_funcs connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, - .reset = drm_atomic_helper_connector_reset, - .detect = rcar_du_vga_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc) -{ - struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); - struct rcar_du_connector *rcon; - struct drm_connector *connector; - int ret; - - rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL); - if (rcon == NULL) - return -ENOMEM; - - connector = &rcon->connector; - connector->display_info.width_mm = 0; - connector->display_info.height_mm = 0; - connector->interlace_allowed = true; - - ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs, - DRM_MODE_CONNECTOR_VGA); - if (ret < 0) - return ret; - - drm_connector_helper_add(connector, &connector_helper_funcs); - - connector->dpms = DRM_MODE_DPMS_OFF; - drm_object_property_set_value(&connector->base, - rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); - - ret = drm_mode_connector_attach_encoder(connector, encoder); - if (ret < 0) - return ret; - - return 0; -} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h deleted file mode 100644 index 112f50316e01..000000000000 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector - * - * Copyright (C) 2013-2014 Renesas Electronics Corporation - * - * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __RCAR_DU_VGACON_H__ -#define __RCAR_DU_VGACON_H__ - -struct rcar_du_device; -struct rcar_du_encoder; - -int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, - struct rcar_du_encoder *renc); - -#endif /* __RCAR_DU_VGACON_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h index 510dcc9c6816..f1d0f1824528 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -68,7 +68,7 @@ void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); #else -static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; }; +static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; }; static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { }; diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c new file mode 100644 index 000000000000..7539626b8ebd --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c @@ -0,0 +1,100 @@ +/* + * R-Car Gen3 HDMI PHY + * + * Copyright (C) 2016 Renesas Electronics Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <drm/bridge/dw_hdmi.h> + +#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */ +#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */ +#define RCAR_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */ + +struct rcar_hdmi_phy_params { + unsigned long mpixelclock; + u16 opmode_div; /* Mode of operation and PLL dividers */ + u16 curr_gmp; /* PLL current and Gmp (conductance) */ + u16 div; /* PLL dividers */ +}; + +static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = { + { 35500000, 0x0003, 0x0344, 0x0328 }, + { 44900000, 0x0003, 0x0285, 0x0128 }, + { 71000000, 0x0002, 0x1184, 0x0314 }, + { 90000000, 0x0002, 0x1144, 0x0114 }, + { 140250000, 0x0001, 0x20c4, 0x030a }, + { 182750000, 0x0001, 0x2084, 0x010a }, + { 281250000, 0x0000, 0x0084, 0x0305 }, + { 297000000, 0x0000, 0x0084, 0x0105 }, + { ~0UL, 0x0000, 0x0000, 0x0000 }, +}; + +static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi, + const struct dw_hdmi_plat_data *pdata, + unsigned long mpixelclock) +{ + const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params; + + for (; params && params->mpixelclock != ~0UL; ++params) { + if (mpixelclock <= params->mpixelclock) + break; + } + + if (params->mpixelclock == ~0UL) + return -EINVAL; + + dw_hdmi_phy_i2c_write(hdmi, params->opmode_div, + RCAR_HDMI_PHY_OPMODE_PLLCFG); + dw_hdmi_phy_i2c_write(hdmi, params->curr_gmp, + RCAR_HDMI_PHY_PLLCURRGMPCTRL); + dw_hdmi_phy_i2c_write(hdmi, params->div, RCAR_HDMI_PHY_PLLDIVCTRL); + + return 0; +} + +static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = { + .configure_phy = rcar_hdmi_phy_configure, +}; + +static int rcar_dw_hdmi_probe(struct platform_device *pdev) +{ + return dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data); +} + +static int rcar_dw_hdmi_remove(struct platform_device *pdev) +{ + dw_hdmi_remove(pdev); + + return 0; +} + +static const struct of_device_id rcar_dw_hdmi_of_table[] = { + { .compatible = "renesas,rcar-gen3-hdmi" }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table); + +static struct platform_driver rcar_dw_hdmi_platform_driver = { + .probe = rcar_dw_hdmi_probe, + .remove = rcar_dw_hdmi_remove, + .driver = { + .name = "rcar-dw-hdmi", + .of_match_table = rcar_dw_hdmi_of_table, + }, +}; + +module_platform_driver(rcar_dw_hdmi_platform_driver); + +MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); +MODULE_DESCRIPTION("Renesas R-Car Gen3 HDMI Encoder Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 86279f5022c2..88f16cdf6a4b 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -66,7 +66,9 @@ static struct gdp_format_to_str { #define GAM_GDP_ALPHARANGE_255 BIT(5) #define GAM_GDP_AGC_FULL_RANGE 0x00808080 #define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) -#define GAM_GDP_SIZE_MAX 0x7FF + +#define GAM_GDP_SIZE_MAX_WIDTH 3840 +#define GAM_GDP_SIZE_MAX_HEIGHT 2160 #define GDP_NODE_NB_BANK 2 #define GDP_NODE_PER_FIELD 2 @@ -632,8 +634,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, /* src_x are in 16.16 format */ src_x = state->src_x >> 16; src_y = state->src_y >> 16; - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); + src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); + src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); format = sti_gdp_fourcc2format(fb->format->format); if (format == -1) { @@ -741,8 +743,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, /* src_x are in 16.16 format */ src_x = state->src_x >> 16; src_y = state->src_y >> 16; - src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); - src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); + src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH); + src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT); list = sti_gdp_get_free_nodes(gdp); top_field = list->top_field; diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile index d625a82a6e5f..59b757350a1f 100644 --- a/drivers/gpu/drm/sun4i/Makefile +++ b/drivers/gpu/drm/sun4i/Makefile @@ -1,11 +1,11 @@ -sun4i-drm-y += sun4i_crtc.o sun4i-drm-y += sun4i_drv.o sun4i-drm-y += sun4i_framebuffer.o -sun4i-drm-y += sun4i_layer.o sun4i-tcon-y += sun4i_tcon.o sun4i-tcon-y += sun4i_rgb.o sun4i-tcon-y += sun4i_dotclock.o +sun4i-tcon-y += sun4i_crtc.o +sun4i-tcon-y += sun4i_layer.o obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 08ce15070f80..d660741ba475 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -24,7 +24,7 @@ #include "sun4i_backend.h" #include "sun4i_drv.h" -static u32 sunxi_rgb2yuv_coef[12] = { +static const u32 sunxi_rgb2yuv_coef[12] = { 0x00000107, 0x00000204, 0x00000064, 0x00000108, 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808, 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808 diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index a5d546a68e16..3c876c3a356a 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -19,6 +19,7 @@ #include <linux/clk-provider.h> #include <linux/ioport.h> #include <linux/of_address.h> +#include <linux/of_graph.h> #include <linux/of_irq.h> #include <linux/regmap.h> @@ -27,6 +28,7 @@ #include "sun4i_backend.h" #include "sun4i_crtc.h" #include "sun4i_drv.h" +#include "sun4i_layer.h" #include "sun4i_tcon.h" static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc, @@ -50,12 +52,11 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); - struct sun4i_drv *drv = scrtc->drv; struct drm_pending_vblank_event *event = crtc->state->event; DRM_DEBUG_DRIVER("Committing plane changes\n"); - sun4i_backend_commit(drv->backend); + sun4i_backend_commit(scrtc->backend); if (event) { crtc->state->event = NULL; @@ -72,11 +73,10 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, static void sun4i_crtc_disable(struct drm_crtc *crtc) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); - struct sun4i_drv *drv = scrtc->drv; DRM_DEBUG_DRIVER("Disabling the CRTC\n"); - sun4i_tcon_disable(drv->tcon); + sun4i_tcon_disable(scrtc->tcon); if (crtc->state->event && !crtc->state->active) { spin_lock_irq(&crtc->dev->event_lock); @@ -90,11 +90,10 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc) static void sun4i_crtc_enable(struct drm_crtc *crtc) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); - struct sun4i_drv *drv = scrtc->drv; DRM_DEBUG_DRIVER("Enabling the CRTC\n"); - sun4i_tcon_enable(drv->tcon); + sun4i_tcon_enable(scrtc->tcon); } static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { @@ -107,11 +106,10 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = { static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); - struct sun4i_drv *drv = scrtc->drv; DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc); - sun4i_tcon_enable_vblank(drv->tcon, true); + sun4i_tcon_enable_vblank(scrtc->tcon, true); return 0; } @@ -119,11 +117,10 @@ static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc) static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc) { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); - struct sun4i_drv *drv = scrtc->drv; DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc); - sun4i_tcon_enable_vblank(drv->tcon, false); + sun4i_tcon_enable_vblank(scrtc->tcon, false); } static const struct drm_crtc_funcs sun4i_crtc_funcs = { @@ -137,28 +134,67 @@ static const struct drm_crtc_funcs sun4i_crtc_funcs = { .disable_vblank = sun4i_crtc_disable_vblank, }; -struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm) +struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm, + struct sun4i_backend *backend, + struct sun4i_tcon *tcon) { - struct sun4i_drv *drv = drm->dev_private; struct sun4i_crtc *scrtc; - int ret; + struct drm_plane *primary = NULL, *cursor = NULL; + int ret, i; scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL); if (!scrtc) + return ERR_PTR(-ENOMEM); + scrtc->backend = backend; + scrtc->tcon = tcon; + + /* Create our layers */ + scrtc->layers = sun4i_layers_init(drm, scrtc->backend); + if (IS_ERR(scrtc->layers)) { + dev_err(drm->dev, "Couldn't create the planes\n"); return NULL; - scrtc->drv = drv; + } + + /* find primary and cursor planes for drm_crtc_init_with_planes */ + for (i = 0; scrtc->layers[i]; i++) { + struct sun4i_layer *layer = scrtc->layers[i]; + + switch (layer->plane.type) { + case DRM_PLANE_TYPE_PRIMARY: + primary = &layer->plane; + break; + case DRM_PLANE_TYPE_CURSOR: + cursor = &layer->plane; + break; + default: + break; + } + } ret = drm_crtc_init_with_planes(drm, &scrtc->crtc, - drv->primary, - NULL, + primary, + cursor, &sun4i_crtc_funcs, NULL); if (ret) { dev_err(drm->dev, "Couldn't init DRM CRTC\n"); - return NULL; + return ERR_PTR(ret); } drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs); + /* Set crtc.port to output port node of the tcon */ + scrtc->crtc.port = of_graph_get_port_by_id(scrtc->tcon->dev->of_node, + 1); + + /* Set possible_crtcs to this crtc for overlay planes */ + for (i = 0; scrtc->layers[i]; i++) { + uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc)); + struct sun4i_layer *layer = scrtc->layers[i]; + + if (layer->plane.type == DRM_PLANE_TYPE_OVERLAY) + layer->plane.possible_crtcs = possible_crtcs; + } + return scrtc; } diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.h b/drivers/gpu/drm/sun4i/sun4i_crtc.h index dec8ce4d9b25..230cb8f0d601 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.h +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.h @@ -17,7 +17,9 @@ struct sun4i_crtc { struct drm_crtc crtc; struct drm_pending_vblank_event *event; - struct sun4i_drv *drv; + struct sun4i_backend *backend; + struct sun4i_tcon *tcon; + struct sun4i_layer **layers; }; static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc) @@ -25,6 +27,8 @@ static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc) return container_of(crtc, struct sun4i_crtc, crtc); } -struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm); +struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm, + struct sun4i_backend *backend, + struct sun4i_tcon *tcon); #endif /* _SUN4I_CRTC_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 329ea56106a5..8ddd72cd5873 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -12,6 +12,7 @@ #include <linux/component.h> #include <linux/of_graph.h> +#include <linux/of_reserved_mem.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -20,10 +21,9 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_of.h> -#include "sun4i_crtc.h" #include "sun4i_drv.h" #include "sun4i_framebuffer.h" -#include "sun4i_layer.h" +#include "sun4i_tcon.h" DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); @@ -92,30 +92,25 @@ static int sun4i_drv_bind(struct device *dev) } drm->dev_private = drv; - drm_vblank_init(drm, 1); + ret = of_reserved_mem_device_init(dev); + if (ret && ret != -ENODEV) { + dev_err(drm->dev, "Couldn't claim our memory region\n"); + goto free_drm; + } + + /* drm_vblank_init calls kcalloc, which can fail */ + ret = drm_vblank_init(drm, 1); + if (ret) + goto free_mem_region; + drm_mode_config_init(drm); ret = component_bind_all(drm->dev, drm); if (ret) { dev_err(drm->dev, "Couldn't bind all pipelines components\n"); - goto free_drm; - } - - /* Create our layers */ - drv->layers = sun4i_layers_init(drm); - if (IS_ERR(drv->layers)) { - dev_err(drm->dev, "Couldn't create the planes\n"); - ret = PTR_ERR(drv->layers); - goto free_drm; + goto cleanup_mode_config; } - /* Create our CRTC */ - drv->crtc = sun4i_crtc_init(drm); - if (!drv->crtc) { - dev_err(drm->dev, "Couldn't create the CRTC\n"); - ret = -EINVAL; - goto free_drm; - } drm->irq_enabled = true; /* Remove early framebuffers (ie. simplefb) */ @@ -126,7 +121,7 @@ static int sun4i_drv_bind(struct device *dev) if (IS_ERR(drv->fbdev)) { dev_err(drm->dev, "Couldn't create our framebuffer\n"); ret = PTR_ERR(drv->fbdev); - goto free_drm; + goto cleanup_mode_config; } /* Enable connectors polling */ @@ -134,10 +129,18 @@ static int sun4i_drv_bind(struct device *dev) ret = drm_dev_register(drm, 0); if (ret) - goto free_drm; + goto finish_poll; return 0; +finish_poll: + drm_kms_helper_poll_fini(drm); + sun4i_framebuffer_free(drm); +cleanup_mode_config: + drm_mode_config_cleanup(drm); + drm_vblank_cleanup(drm); +free_mem_region: + of_reserved_mem_device_release(dev); free_drm: drm_dev_unref(drm); return ret; @@ -150,7 +153,9 @@ static void sun4i_drv_unbind(struct device *dev) drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); + drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); + of_reserved_mem_device_release(dev); drm_dev_unref(drm); } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h index 597353eab728..5df50126ff52 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.h +++ b/drivers/gpu/drm/sun4i/sun4i_drv.h @@ -18,13 +18,9 @@ struct sun4i_drv { struct sun4i_backend *backend; - struct sun4i_crtc *crtc; struct sun4i_tcon *tcon; - struct drm_plane *primary; struct drm_fbdev_cma *fbdev; - - struct sun4i_layer **layers; }; #endif /* _SUN4I_DRV_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c index 2c3beff8b53e..9872e0fc03b0 100644 --- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c +++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c @@ -48,5 +48,4 @@ void sun4i_framebuffer_free(struct drm_device *drm) struct sun4i_drv *drv = drm->dev_private; drm_fbdev_cma_fini(drv->fbdev); - drm_mode_config_cleanup(drm); } diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index 5d53c977bca5..f26bde5b9117 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -16,7 +16,6 @@ #include <drm/drmP.h> #include "sun4i_backend.h" -#include "sun4i_drv.h" #include "sun4i_layer.h" struct sun4i_plane_desc { @@ -36,8 +35,7 @@ static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun4i_layer *layer = plane_to_sun4i_layer(plane); - struct sun4i_drv *drv = layer->drv; - struct sun4i_backend *backend = drv->backend; + struct sun4i_backend *backend = layer->backend; sun4i_backend_layer_enable(backend, layer->id, false); } @@ -46,8 +44,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sun4i_layer *layer = plane_to_sun4i_layer(plane); - struct sun4i_drv *drv = layer->drv; - struct sun4i_backend *backend = drv->backend; + struct sun4i_backend *backend = layer->backend; sun4i_backend_update_layer_coord(backend, layer->id, plane); sun4i_backend_update_layer_formats(backend, layer->id, plane); @@ -104,9 +101,9 @@ static const struct sun4i_plane_desc sun4i_backend_planes[] = { }; static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm, + struct sun4i_backend *backend, const struct sun4i_plane_desc *plane) { - struct sun4i_drv *drv = drm->dev_private; struct sun4i_layer *layer; int ret; @@ -114,7 +111,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm, if (!layer) return ERR_PTR(-ENOMEM); - ret = drm_universal_plane_init(drm, &layer->plane, BIT(0), + /* possible crtcs are set later */ + ret = drm_universal_plane_init(drm, &layer->plane, 0, &sun4i_backend_layer_funcs, plane->formats, plane->nformats, plane->type, NULL); @@ -125,22 +123,19 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm, drm_plane_helper_add(&layer->plane, &sun4i_backend_layer_helper_funcs); - layer->drv = drv; - - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - drv->primary = &layer->plane; + layer->backend = backend; return layer; } -struct sun4i_layer **sun4i_layers_init(struct drm_device *drm) +struct sun4i_layer **sun4i_layers_init(struct drm_device *drm, + struct sun4i_backend *backend) { - struct sun4i_drv *drv = drm->dev_private; struct sun4i_layer **layers; int i; - layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes), - sizeof(**layers), GFP_KERNEL); + layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes) + 1, + sizeof(*layers), GFP_KERNEL); if (!layers) return ERR_PTR(-ENOMEM); @@ -167,9 +162,9 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm) */ for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) { const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i]; - struct sun4i_layer *layer = layers[i]; + struct sun4i_layer *layer; - layer = sun4i_layer_init_one(drm, plane); + layer = sun4i_layer_init_one(drm, backend, plane); if (IS_ERR(layer)) { dev_err(drm->dev, "Couldn't initialize %s plane\n", i ? "overlay" : "primary"); @@ -178,11 +173,12 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm) DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n", i ? "overlay" : "primary", plane->pipe); - regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i), + regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i), SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK, SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe)); layer->id = i; + layers[i] = layer; }; return layers; diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h index a2f65d7a3f4e..4be1f0919df2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.h +++ b/drivers/gpu/drm/sun4i/sun4i_layer.h @@ -16,6 +16,7 @@ struct sun4i_layer { struct drm_plane plane; struct sun4i_drv *drv; + struct sun4i_backend *backend; int id; }; @@ -25,6 +26,7 @@ plane_to_sun4i_layer(struct drm_plane *plane) return container_of(plane, struct sun4i_layer, plane); } -struct sun4i_layer **sun4i_layers_init(struct drm_device *drm); +struct sun4i_layer **sun4i_layers_init(struct drm_device *drm, + struct sun4i_backend *backend); #endif /* _SUN4I_LAYER_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 46280dd70c9e..67f0b91a99de 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -18,7 +18,7 @@ #include <drm/drm_of.h> #include <drm/drm_panel.h> -#include "sun4i_drv.h" +#include "sun4i_crtc.h" #include "sun4i_tcon.h" #include "sun4i_rgb.h" @@ -26,7 +26,7 @@ struct sun4i_rgb { struct drm_connector connector; struct drm_encoder encoder; - struct sun4i_drv *drv; + struct sun4i_tcon *tcon; }; static inline struct sun4i_rgb * @@ -47,8 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; return drm_panel_get_modes(tcon->panel); } @@ -57,8 +56,7 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; u32 hsync = mode->hsync_end - mode->hsync_start; u32 vsync = mode->vsync_end - mode->vsync_start; unsigned long rate = mode->clock * 1000; @@ -115,8 +113,7 @@ static void sun4i_rgb_connector_destroy(struct drm_connector *connector) { struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; drm_panel_detach(tcon->panel); drm_connector_cleanup(connector); @@ -141,8 +138,7 @@ static int sun4i_rgb_atomic_check(struct drm_encoder *encoder, static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Enabling RGB output\n"); @@ -158,8 +154,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; DRM_DEBUG_DRIVER("Disabling RGB output\n"); @@ -177,8 +172,7 @@ static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder); - struct sun4i_drv *drv = rgb->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_tcon *tcon = rgb->tcon; sun4i_tcon0_mode_set(tcon, mode); @@ -204,10 +198,8 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = { .destroy = sun4i_rgb_enc_destroy, }; -int sun4i_rgb_init(struct drm_device *drm) +int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon) { - struct sun4i_drv *drv = drm->dev_private; - struct sun4i_tcon *tcon = drv->tcon; struct drm_encoder *encoder; struct drm_bridge *bridge; struct sun4i_rgb *rgb; @@ -216,7 +208,7 @@ int sun4i_rgb_init(struct drm_device *drm) rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); if (!rgb) return -ENOMEM; - rgb->drv = drv; + rgb->tcon = tcon; encoder = &rgb->encoder; ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0, @@ -239,7 +231,7 @@ int sun4i_rgb_init(struct drm_device *drm) } /* The RGB encoder can only work with the TCON channel 0 */ - rgb->encoder.possible_crtcs = BIT(0); + rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc)); if (tcon->panel) { drm_connector_helper_add(&rgb->connector, diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.h b/drivers/gpu/drm/sun4i/sun4i_rgb.h index 7c4da4c8acdd..40c18f4a6c7e 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.h +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.h @@ -13,6 +13,6 @@ #ifndef _SUN4I_RGB_H_ #define _SUN4I_RGB_H_ -int sun4i_rgb_init(struct drm_device *drm); +int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon); #endif /* _SUN4I_RGB_H_ */ diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 2e4e365cecf9..9a83a85529ac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -142,7 +142,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, /* * This is called a backporch in the register documentation, - * but it really is the front porch + hsync + * but it really is the back porch + hsync */ bp = mode->crtc_htotal - mode->crtc_hsync_start; DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n", @@ -155,7 +155,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon, /* * This is called a backporch in the register documentation, - * but it really is the front porch + hsync + * but it really is the back porch + hsync */ bp = mode->crtc_vtotal - mode->crtc_vsync_start; DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n", @@ -289,8 +289,7 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private) { struct sun4i_tcon *tcon = private; struct drm_device *drm = tcon->drm; - struct sun4i_drv *drv = drm->dev_private; - struct sun4i_crtc *scrtc = drv->crtc; + struct sun4i_crtc *scrtc = tcon->crtc; unsigned int status; regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status); @@ -335,12 +334,11 @@ static int sun4i_tcon_init_clocks(struct device *dev, } } - return sun4i_dclk_create(dev, tcon); + return 0; } static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) { - sun4i_dclk_free(tcon); clk_disable_unprepare(tcon->clk); } @@ -437,30 +435,45 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, return ret; } + ret = sun4i_tcon_init_clocks(dev, tcon); + if (ret) { + dev_err(dev, "Couldn't init our TCON clocks\n"); + goto err_assert_reset; + } + ret = sun4i_tcon_init_regmap(dev, tcon); if (ret) { dev_err(dev, "Couldn't init our TCON regmap\n"); - goto err_assert_reset; + goto err_free_clocks; } - ret = sun4i_tcon_init_clocks(dev, tcon); + ret = sun4i_dclk_create(dev, tcon); if (ret) { - dev_err(dev, "Couldn't init our TCON clocks\n"); - goto err_assert_reset; + dev_err(dev, "Couldn't create our TCON dot clock\n"); + goto err_free_clocks; } ret = sun4i_tcon_init_irq(dev, tcon); if (ret) { dev_err(dev, "Couldn't init our TCON interrupts\n"); + goto err_free_dotclock; + } + + tcon->crtc = sun4i_crtc_init(drm, drv->backend, tcon); + if (IS_ERR(tcon->crtc)) { + dev_err(dev, "Couldn't create our CRTC\n"); + ret = PTR_ERR(tcon->crtc); goto err_free_clocks; } - ret = sun4i_rgb_init(drm); + ret = sun4i_rgb_init(drm, tcon); if (ret < 0) goto err_free_clocks; return 0; +err_free_dotclock: + sun4i_dclk_free(tcon); err_free_clocks: sun4i_tcon_free_clocks(tcon); err_assert_reset: @@ -473,6 +486,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master, { struct sun4i_tcon *tcon = dev_get_drvdata(dev); + sun4i_dclk_free(tcon); sun4i_tcon_free_clocks(tcon); } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index 166064bafe2e..f636343a935d 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -169,6 +169,9 @@ struct sun4i_tcon { /* Platform adjustments */ const struct sun4i_tcon_quirks *quirks; + + /* Associated crtc */ + struct sun4i_crtc *crtc; }; struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node); diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index c6f47222e8fc..49c49431a053 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -19,9 +19,11 @@ #include <drm/drmP.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_of.h> #include <drm/drm_panel.h> #include "sun4i_backend.h" +#include "sun4i_crtc.h" #include "sun4i_drv.h" #include "sun4i_tcon.h" @@ -349,8 +351,9 @@ static int sun4i_tv_atomic_check(struct drm_encoder *encoder, static void sun4i_tv_disable(struct drm_encoder *encoder) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); - struct sun4i_drv *drv = tv->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); + struct sun4i_tcon *tcon = crtc->tcon; + struct sun4i_backend *backend = crtc->backend; DRM_DEBUG_DRIVER("Disabling the TV Output\n"); @@ -359,18 +362,19 @@ static void sun4i_tv_disable(struct drm_encoder *encoder) regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, SUN4I_TVE_EN_ENABLE, 0); - sun4i_backend_disable_color_correction(drv->backend); + sun4i_backend_disable_color_correction(backend); } static void sun4i_tv_enable(struct drm_encoder *encoder) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); - struct sun4i_drv *drv = tv->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); + struct sun4i_tcon *tcon = crtc->tcon; + struct sun4i_backend *backend = crtc->backend; DRM_DEBUG_DRIVER("Enabling the TV Output\n"); - sun4i_backend_apply_color_correction(drv->backend); + sun4i_backend_apply_color_correction(backend); regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG, SUN4I_TVE_EN_ENABLE, @@ -384,8 +388,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder); - struct sun4i_drv *drv = tv->drv; - struct sun4i_tcon *tcon = drv->tcon; + struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc); + struct sun4i_tcon *tcon = crtc->tcon; const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode); sun4i_tcon1_mode_set(tcon, mode); @@ -623,7 +627,12 @@ static int sun4i_tv_bind(struct device *dev, struct device *master, goto err_disable_clk; } - tv->encoder.possible_crtcs = BIT(0); + tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm, + dev->of_node); + if (!tv->encoder.possible_crtcs) { + ret = -EPROBE_DEFER; + goto err_disable_clk; + } drm_connector_helper_add(&tv->connector, &sun4i_tv_comp_connector_helper_funcs); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index 17e62ecb5d4d..8672f5d2f237 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -619,10 +619,10 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { .map_dma_buf = tegra_gem_prime_map_dma_buf, .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, .release = tegra_gem_prime_release, - .kmap_atomic = tegra_gem_prime_kmap_atomic, - .kunmap_atomic = tegra_gem_prime_kunmap_atomic, - .kmap = tegra_gem_prime_kmap, - .kunmap = tegra_gem_prime_kunmap, + .map_atomic = tegra_gem_prime_kmap_atomic, + .unmap_atomic = tegra_gem_prime_kunmap_atomic, + .map = tegra_gem_prime_kmap, + .unmap = tegra_gem_prime_kunmap, .mmap = tegra_gem_prime_mmap, .vmap = tegra_gem_prime_vmap, .vunmap = tegra_gem_prime_vunmap, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 412240a3ba90..e44626a2e698 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1020,37 +1020,44 @@ out_unlock: return ret; } -bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags) +static bool ttm_bo_places_compat(const struct ttm_place *places, + unsigned num_placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) { - int i; - - for (i = 0; i < placement->num_placement; i++) { - const struct ttm_place *heap = &placement->placement[i]; - if (mem->mm_node && - (mem->start < heap->fpfn || - (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) - continue; + unsigned i; - *new_flags = heap->flags; - if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM)) - return true; - } + for (i = 0; i < num_placement; i++) { + const struct ttm_place *heap = &places[i]; - for (i = 0; i < placement->num_busy_placement; i++) { - const struct ttm_place *heap = &placement->busy_placement[i]; - if (mem->mm_node && - (mem->start < heap->fpfn || + if (mem->mm_node && (mem->start < heap->fpfn || (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn))) continue; *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (*new_flags & mem->placement & TTM_PL_MASK_MEM)) + (*new_flags & mem->placement & TTM_PL_MASK_MEM) && + (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || + (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; } + return false; +} + +bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) +{ + if (ttm_bo_places_compat(placement->placement, placement->num_placement, + mem, new_flags)) + return true; + + if ((placement->busy_placement != placement->placement || + placement->num_busy_placement > placement->num_placement) && + ttm_bo_places_compat(placement->busy_placement, + placement->num_busy_placement, + mem, new_flags)) + return true; return false; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 35ffb3754feb..9f53df95f35c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) */ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { if (bo->mem.bus.is_iomem) - pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; + pfn = bdev->driver->io_mem_pfn(bo, page_offset); else { page = ttm->pages[page_offset]; if (unlikely(!page && i == 0)) { @@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, return bo; } +unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset) +{ + return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + + page_offset; +} +EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn); + int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index fdb451e3ec01..26a7ad0f4789 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile, if (unlikely(ret != 0)) goto out_err0; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) goto out_err1; @@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists); int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed) + enum ttm_ref_type ref_type, bool *existed, + bool require_existed) { struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; struct ttm_ref_object *ref; @@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile, } rcu_read_unlock(); + if (require_existed) + return -EPERM; + ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), false, false); if (unlikely(ret != 0)) @@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile) ttm_ref_object_release(&ref->kref); } + spin_unlock(&tfile->lock); for (i = 0; i < TTM_REF_NUM; ++i) drm_ht_remove(&tfile->ref_hash[i]); - spin_unlock(&tfile->lock); ttm_object_file_unref(&tfile); } EXPORT_SYMBOL(ttm_object_file_release); @@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) *p_tdev = NULL; - spin_lock(&tdev->object_lock); drm_ht_remove(&tdev->object_hash); - spin_unlock(&tdev->object_lock); kfree(tdev); } @@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile, prime = (struct ttm_prime_object *) dma_buf->priv; base = &prime->base; *handle = base->hash.key; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); dma_buf_put(dma_buf); diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c index ac90ffdb5912..ed0e636243b2 100644 --- a/drivers/gpu/drm/udl/udl_dmabuf.c +++ b/drivers/gpu/drm/udl/udl_dmabuf.c @@ -191,10 +191,10 @@ static struct dma_buf_ops udl_dmabuf_ops = { .detach = udl_detach_dma_buf, .map_dma_buf = udl_map_dma_buf, .unmap_dma_buf = udl_unmap_dma_buf, - .kmap = udl_dmabuf_kmap, - .kmap_atomic = udl_dmabuf_kmap_atomic, - .kunmap = udl_dmabuf_kunmap, - .kunmap_atomic = udl_dmabuf_kunmap_atomic, + .map = udl_dmabuf_kmap, + .map_atomic = udl_dmabuf_kmap_atomic, + .unmap = udl_dmabuf_kunmap, + .unmap_atomic = udl_dmabuf_kunmap_atomic, .mmap = udl_dmabuf_mmap, .release = drm_gem_dmabuf_release, }; diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c index 917dcb978c2c..0c87b1ac6b68 100644 --- a/drivers/gpu/drm/udl/udl_transfer.c +++ b/drivers/gpu/drm/udl/udl_transfer.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/fb.h> #include <linux/prefetch.h> +#include <asm/unaligned.h> #include <drm/drmP.h> #include "udl_drv.h" @@ -163,7 +164,7 @@ static void udl_compress_hline16( const u8 *const start = pixel; const uint16_t repeating_pixel_val16 = pixel_val16; - *(uint16_t *)cmd = cpu_to_be16(pixel_val16); + put_unaligned_be16(pixel_val16, cmd); cmd += 2; pixel += bpp; diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 70ec8ca8d9b1..4e8e27d50922 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = { .verify_access = &virtio_gpu_verify_access, .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, .io_mem_free = &virtio_gpu_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, .move_notify = &virtio_gpu_bo_move_notify, .swap_notify = &virtio_gpu_bo_swap_notify, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 4c7f24a67a2e..35bf781e418e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = { .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, .io_mem_free = &vmw_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f6e936d90517..4a641555b960 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -254,7 +254,7 @@ module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); module_param_named(force_coherent, vmw_force_coherent, int, 0600); MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); -module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); +module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index b399f03a988d..6b2708b4eafe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, struct vmw_fence_obj **p_fence) { struct vmw_fence_obj *fence; - int ret; + int ret; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (unlikely(fence == NULL)) @@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman) } +/** + * vmw_fence_obj_lookup - Look up a user-space fence object + * + * @tfile: A struct ttm_object_file identifying the caller. + * @handle: A handle identifying the fence object. + * @return: A struct vmw_user_fence base ttm object on success or + * an error pointer on failure. + * + * The fence object is looked up and type-checked. The caller needs + * to have opened the fence object first, but since that happens on + * creation and fence objects aren't shareable, that's not an + * issue currently. + */ +static struct ttm_base_object * +vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle) +{ + struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle); + + if (!base) { + pr_err("Invalid fence object handle 0x%08lx.\n", + (unsigned long)handle); + return ERR_PTR(-EINVAL); + } + + if (base->refcount_release != vmw_user_fence_base_release) { + pr_err("Invalid fence object handle 0x%08lx.\n", + (unsigned long)handle); + ttm_base_object_unref(&base); + return ERR_PTR(-EINVAL); + } + + return base; +} + + int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -726,12 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, arg->kernel_cookie = jiffies + wait_timeout; } - base = ttm_base_object_lookup(tfile, arg->handle); - if (unlikely(base == NULL)) { - pr_err("Wait invalid fence object handle 0x%08lx\n", - (unsigned long)arg->handle); - return -EINVAL; - } + base = vmw_fence_obj_lookup(tfile, arg->handle); + if (IS_ERR(base)) + return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); @@ -770,12 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); - base = ttm_base_object_lookup(tfile, arg->handle); - if (unlikely(base == NULL)) { - pr_err("Fence signaled invalid fence object handle 0x%08lx\n", - (unsigned long)arg->handle); - return -EINVAL; - } + base = vmw_fence_obj_lookup(tfile, arg->handle); + if (IS_ERR(base)) + return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); fman = fman_from_fence(fence); @@ -1022,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_fence_event_arg *) data; struct vmw_fence_obj *fence = NULL; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); + struct ttm_object_file *tfile = vmw_fp->tfile; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *)(unsigned long) arg->fence_rep; @@ -1035,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, */ if (arg->handle) { struct ttm_base_object *base = - ttm_base_object_lookup_for_ref(dev_priv->tdev, - arg->handle); - - if (unlikely(base == NULL)) { - DRM_ERROR("Fence event invalid fence object handle " - "0x%08lx.\n", - (unsigned long)arg->handle); - return -EINVAL; - } + vmw_fence_obj_lookup(tfile, arg->handle); + + if (IS_ERR(base)) + return PTR_ERR(base); + fence = &(container_of(base, struct vmw_user_fence, base)->fence); (void) vmw_fence_obj_reference(fence); if (user_fence_rep != NULL) { - bool existed; - ret = ttm_ref_object_add(vmw_fp->tfile, base, - TTM_REF_USAGE, &existed); + TTM_REF_USAGE, NULL, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed to reference a fence " "object.\n"); @@ -1095,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, return 0; out_no_create: if (user_fence_rep != NULL) - ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, - handle, TTM_REF_USAGE); + ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); out_no_ref_obj: vmw_fence_obj_unreference(&fence); return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index b8c6a03c8c54..5ec24fd801cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, param->value = dev_priv->has_dx; break; default: - DRM_ERROR("Illegal vmwgfx get param request: %d\n", - param->param); return -EINVAL; } @@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); - if (unlikely(arg->pad64 != 0)) { + if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c index 31fe32d8d65a..0d42a46521fc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c @@ -108,10 +108,10 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = { .map_dma_buf = vmw_prime_map_dma_buf, .unmap_dma_buf = vmw_prime_unmap_dma_buf, .release = NULL, - .kmap = vmw_prime_dmabuf_kmap, - .kmap_atomic = vmw_prime_dmabuf_kmap_atomic, - .kunmap = vmw_prime_dmabuf_kunmap, - .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic, + .map = vmw_prime_dmabuf_kmap, + .map_atomic = vmw_prime_dmabuf_kmap_atomic, + .unmap = vmw_prime_dmabuf_kunmap, + .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic, .mmap = vmw_prime_dmabuf_mmap, .vmap = vmw_prime_dmabuf_vmap, .vunmap = vmw_prime_dmabuf_vunmap, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index fa1037ec8e5f..7d591f653dfa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -546,7 +546,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, return ret; ret = ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_SYNCCPU_WRITE, &existed); + TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) ttm_bo_synccpu_write_release(&user_bo->dma.base); @@ -730,7 +730,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, *handle = user_bo->prime.base.hash.key; return ttm_ref_object_add(tfile, &user_bo->prime.base, - TTM_REF_USAGE, NULL); + TTM_REF_USAGE, NULL, false); } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 41b9d20d6ae7..7681341fe32b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 128; num_sizes = 0; - for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) + for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { + if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) + return -EINVAL; num_sizes += req->mip_levels[i]; + } - if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * - DRM_VMW_MAX_MIP_LEVELS) + if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS || + num_sizes == 0) return -EINVAL; size = vmw_user_surface_size + 128 + @@ -890,17 +893,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, uint32_t handle; struct ttm_base_object *base; int ret; + bool require_exist = false; if (handle_type == DRM_VMW_HANDLE_PRIME) { ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); if (unlikely(ret != 0)) return ret; } else { - if (unlikely(drm_is_render_client(file_priv))) { - DRM_ERROR("Render client refused legacy " - "surface reference.\n"); - return -EACCES; - } + if (unlikely(drm_is_render_client(file_priv))) + require_exist = true; + if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { DRM_ERROR("Locked master refused legacy " "surface reference.\n"); @@ -928,17 +930,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, /* * Make sure the surface creator has the same - * authenticating master. + * authenticating master, or is already registered with us. */ if (drm_is_primary_client(file_priv) && - user_srf->master != file_priv->master) { - DRM_ERROR("Trying to reference surface outside of" - " master domain.\n"); - ret = -EACCES; - goto out_bad_resource; - } + user_srf->master != file_priv->master) + require_exist = true; - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); + ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, + require_exist); if (unlikely(ret != 0)) { DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource; diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile index 1ab9bceee755..8cdf9e4ae772 100644 --- a/drivers/gpu/ipu-v3/Makefile +++ b/drivers/gpu/ipu-v3/Makefile @@ -2,4 +2,8 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \ ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \ - ipu-pre.o ipu-prg.o ipu-smfc.o ipu-vdi.o + ipu-smfc.o ipu-vdi.o + +ifdef CONFIG_DRM + imx-ipu-v3-objs += ipu-pre.o ipu-prg.o +endif diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 7aefccec31b1..16d556816b5f 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -1401,7 +1401,8 @@ static int ipu_probe(struct platform_device *pdev) ipu->id = of_alias_get_id(np, "ipu"); - if (of_device_is_compatible(np, "fsl,imx6qp-ipu")) { + if (of_device_is_compatible(np, "fsl,imx6qp-ipu") && + IS_ENABLED(CONFIG_DRM)) { ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev, "fsl,prg", ipu->id); if (!ipu->prg_priv) @@ -1538,8 +1539,10 @@ static struct platform_driver imx_ipu_driver = { }; static struct platform_driver * const drivers[] = { +#if IS_ENABLED(CONFIG_DRM) &ipu_pre_drv, &ipu_prg_drv, +#endif &imx_ipu_driver, }; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 63ec1993eaaa..d162f0dc76e3 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid) hid->group = HID_GROUP_WACOM; break; case USB_VENDOR_ID_SYNAPTICS: - if (hid->group == HID_GROUP_GENERIC || - hid->group == HID_GROUP_MULTITOUCH_WIN_8) + if (hid->group == HID_GROUP_GENERIC) if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) /* @@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 4e2648c86c8c..b26c030926c1 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1028,6 +1028,9 @@ #define USB_DEVICE_ID_UGEE_TABLET_45 0x0045 #define USB_DEVICE_ID_YIYNOVA_TABLET 0x004d +#define USB_VENDOR_ID_UGEE 0x28bd +#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071 + #define USB_VENDOR_ID_UNITEC 0x227d #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19 0x0a19 diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c index 1509d7287ff3..e3e6e5c893cc 100644 --- a/drivers/hid/hid-uclogic.c +++ b/drivers/hid/hid-uclogic.c @@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev, } break; case USB_DEVICE_ID_UGTIZER_TABLET_GP0610: + case USB_DEVICE_ID_UGEE_TABLET_EX07S: /* If this is the pen interface */ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { rc = uclogic_tablet_enable(hdev); @@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { } }; MODULE_DEVICE_TABLE(hid, uclogic_devices); diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c index ca5759c0c318..43a6cb078193 100644 --- a/drivers/iio/accel/hid-sensor-accel-3d.c +++ b/drivers/iio/accel/hid-sensor-accel-3d.c @@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev) name = "accel_3d"; channel_spec = accel_3d_channels; channel_size = sizeof(accel_3d_channels); + indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); } else { name = "gravity"; channel_spec = gravity_channels; channel_size = sizeof(gravity_channels); + indio_dev->num_channels = ARRAY_SIZE(gravity_channels); } ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage, &accel_state->common_attributes); @@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev) goto error_free_dev_mem; } - indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels); indio_dev->dev.parent = &pdev->dev; indio_dev->info = &accel_3d_info; indio_dev->name = name; diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index d6c372bb433b..c17596f7ed2c 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data); if (ret < 0) break; - + ret = IIO_VAL_INT; *val = data; break; case IIO_CHAN_INFO_CALIBBIAS: @@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev, for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++) st->core.calib[i] = st->core.resp->sensor_offset.offset[i]; - + ret = IIO_VAL_INT; *val = st->core.calib[idx]; break; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c index 7afdac42ed42..01e02b9926d4 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c @@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, { struct hid_sensor_hub_attribute_info timestamp; + s32 value; + int ret; hid_sensor_get_reporting_interval(hsdev, usage_id, st); @@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, st->sensitivity.index, st->sensitivity.report_id, timestamp.index, timestamp.report_id); + ret = sensor_hub_get_feature(hsdev, + st->power_state.report_id, + st->power_state.index, sizeof(value), &value); + if (ret < 0) + return ret; + if (value < 0) + return -EINVAL; + return 0; } EXPORT_SYMBOL(hid_sensor_parse_common_attributes); diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index f7fcfa886f72..821919dd245b 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -27,6 +27,7 @@ #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/regmap.h> +#include <linux/delay.h> #include "bmg160.h" #define BMG160_IRQ_NAME "bmg160_event" @@ -52,6 +53,9 @@ #define BMG160_DEF_BW 100 #define BMG160_REG_PMU_BW_RES BIT(7) +#define BMG160_GYRO_REG_RESET 0x14 +#define BMG160_GYRO_RESET_VAL 0xb6 + #define BMG160_REG_INT_MAP_0 0x17 #define BMG160_INT_MAP_0_BIT_ANY BIT(1) @@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data) int ret; unsigned int val; + /* + * Reset chip to get it in a known good state. A delay of 30ms after + * reset is required according to the datasheet. + */ + regmap_write(data->regmap, BMG160_GYRO_REG_RESET, + BMG160_GYRO_RESET_VAL); + usleep_range(30000, 30700); + ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val); if (ret < 0) { dev_err(dev, "Error reading reg_chip_id\n"); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index d18ded45bedd..3ff91e02fee3 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type, tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1); return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_FRACTIONAL_LOG2: - tmp = (s64)vals[0] * 1000000000LL >> vals[1]; - tmp1 = do_div(tmp, 1000000000LL); - tmp0 = tmp; - return snprintf(buf, len, "%d.%09u", tmp0, tmp1); + tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]); + tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1); + return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1)); case IIO_VAL_INT_MULTIPLE: { int i; diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 5f2680855552..fd0edca0e656 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, .multi_read_bit = true, + .bootime = 2, }, }; diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 91cbe86b25c8..fcbed35e95a8 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count) rx_wr->sg_list = &rx_desc->rx_sg; rx_wr->num_sge = 1; rx_wr->next = rx_wr + 1; + rx_desc->in_use = false; } rx_wr--; rx_wr->next = NULL; /* mark end of work requests list */ @@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc) struct ib_recv_wr *rx_wr_failed, rx_wr; int ret; + if (!rx_desc->in_use) { + /* + * if the descriptor is not in-use we already reposted it + * for recv, so just silently return + */ + return 0; + } + + rx_desc->in_use = false; rx_wr.wr_cqe = &rx_desc->rx_cqe; rx_wr.sg_list = &rx_desc->rx_sg; rx_wr.num_sge = 1; @@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) return; } + rx_desc->in_use = true; + ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); @@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr); isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn); - if (ret) - transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); - else - isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); + if (ret) { + /* + * transport_generic_request_failure() expects to have + * plus two references to handle queue-full, so re-add + * one here as target-core will have already dropped + * it after the first isert_put_datain() callback. + */ + kref_get(&cmd->cmd_kref); + transport_generic_request_failure(cmd, cmd->pi_err); + } else { + /* + * XXX: isert_put_response() failure is not retried. + */ + ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd); + if (ret) + pr_warn_ratelimited("isert_put_response() ret: %d\n", ret); + } } static void @@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; spin_unlock_bh(&cmd->istate_lock); - if (ret) { - target_put_sess_cmd(se_cmd); - transport_send_check_condition_and_sense(se_cmd, - se_cmd->pi_err, 0); - } else { + /* + * transport_generic_request_failure() will drop the extra + * se_cmd->cmd_kref reference after T10-PI error, and handle + * any non-zero ->queue_status() callback error retries. + */ + if (ret) + transport_generic_request_failure(se_cmd, se_cmd->pi_err); + else target_execute_cmd(se_cmd); - } } static void @@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) chain_wr = &isert_cmd->tx_desc.send_wr; } - isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); - isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd); - return 1; + rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr); + isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n", + isert_cmd, rc); + return rc; } static int isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) { struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); + int ret; isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done); isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done; - isert_rdma_rw_ctx_post(isert_cmd, conn->context, - &isert_cmd->tx_desc.tx_cqe, NULL); + ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context, + &isert_cmd->tx_desc.tx_cqe, NULL); - isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", - isert_cmd); - return 0; + isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n", + isert_cmd, ret); + return ret; } static int diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index c02ada57d7f5..87d994de8c91 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -60,7 +60,7 @@ #define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \ (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \ - sizeof(struct ib_cqe))) + sizeof(struct ib_cqe) + sizeof(bool))) #define ISCSI_ISER_SG_TABLESIZE 256 @@ -85,6 +85,7 @@ struct iser_rx_desc { u64 dma_addr; struct ib_sge rx_sg; struct ib_cqe rx_cqe; + bool in_use; char pad[ISER_RX_PAD_SIZE]; } __packed; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 155fcb3b6230..153b1ee13e03 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -202,6 +202,7 @@ static const struct xpad_device { { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, @@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ + XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 15af9a9753e5..2d203b422129 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c @@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, return -ENOMEM; } + raw_spin_lock_init(&cd->rlock); + cd->gpc_base = of_iomap(node, 0); if (!cd->gpc_base) { pr_err("fsl-gpcv2: unable to map gpc registers\n"); diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 1dfd1085a04f..9ca691d6c13b 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data) sizeof(avmb1_carddef)))) return -EFAULT; cdef.cardtype = AVM_CARDTYPE_B1; + cdef.cardnr = 0; } else { if ((retval = copy_from_user(&cdef, data, sizeof(avmb1_extcarddef)))) diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index e4c2c1a1e993..6735c8d6a445 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, *result = true; r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, - from_cblock(begin), &cmd->dirty_cursor); + from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); if (r) { DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__); return r; @@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, return 0; } + begin = to_cblock(from_cblock(begin) + 1); + if (begin == end) + break; + r = dm_bitset_cursor_next(&cmd->dirty_cursor); if (r) { DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__); dm_bitset_cursor_end(&cmd->dirty_cursor); return r; } - - begin = to_cblock(from_cblock(begin) + 1); } dm_bitset_cursor_end(&cmd->dirty_cursor); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f8564d63982f..1e217ba84d09 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti) return r; /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ - if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && + if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, to_bytes(rs->requested_bitmap_chunk_sectors), 0); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 28955b94d2b2..0b081d170087 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, /* Undo dm_start_request() before requeuing */ rq_end_stats(md, rq); rq_completed(md, rq_data_dir(rq), false); + blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); return BLK_MQ_RQ_QUEUE_BUSY; } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 0f0eb8a3d922..78f36012eaca 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, block = fec_buffer_rs_block(v, fio, n, i); res = fec_decode_rs8(v, fio, block, &par[offset], neras); if (res < 0) { - dm_bufio_release(buf); - r = res; goto error; } @@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, done: r = corrected; error: + dm_bufio_release(buf); + if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); @@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, &is_zero) == 0) { /* skip known zero blocks entirely */ if (is_zero) - continue; + goto done; /* * skip if we have already found the theoretical @@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (!verity_fec_is_enabled(v)) return -EOPNOTSUPP; + if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) { + DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); + return -EIO; + } + + fio->level++; + if (type == DM_VERITY_BLOCK_TYPE_METADATA) block += v->data_blocks; @@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (r < 0) { r = fec_decode_rsb(v, io, fio, rsb, offset, true); if (r < 0) - return r; + goto done; } if (dest) @@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, r = verity_for_bv_block(v, io, iter, fec_bv_copy); } +done: + fio->level--; return r; } @@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io) memset(fio->bufs, 0, sizeof(fio->bufs)); fio->nbufs = 0; fio->output = NULL; + fio->level = 0; } /* diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 7fa0298b995e..bb31ce87a933 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -27,6 +27,9 @@ #define DM_VERITY_FEC_BUF_MAX \ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) +/* maximum recursion level for verity_fec_decode */ +#define DM_VERITY_FEC_MAX_RECURSION 4 + #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" #define DM_VERITY_OPT_FEC_START "fec_start" @@ -58,6 +61,7 @@ struct dm_verity_fec_io { unsigned nbufs; /* number of buffers allocated */ u8 *output; /* buffer for corrected output */ size_t output_pos; + unsigned level; /* recursion level */ }; #ifdef CONFIG_DM_VERITY_FEC diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index fb6a177be461..2db0413f5d57 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -356,8 +356,8 @@ static struct dma_buf_ops vb2_dc_dmabuf_ops = { .detach = vb2_dc_dmabuf_ops_detach, .map_dma_buf = vb2_dc_dmabuf_ops_map, .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, - .kmap = vb2_dc_dmabuf_ops_kmap, - .kmap_atomic = vb2_dc_dmabuf_ops_kmap, + .map = vb2_dc_dmabuf_ops_kmap, + .map_atomic = vb2_dc_dmabuf_ops_kmap, .vmap = vb2_dc_dmabuf_ops_vmap, .mmap = vb2_dc_dmabuf_ops_mmap, .release = vb2_dc_dmabuf_ops_release, diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index ecff8f492c4f..6fd1343b7c13 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -504,8 +504,8 @@ static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { .detach = vb2_dma_sg_dmabuf_ops_detach, .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, - .kmap = vb2_dma_sg_dmabuf_ops_kmap, - .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, + .map = vb2_dma_sg_dmabuf_ops_kmap, + .map_atomic = vb2_dma_sg_dmabuf_ops_kmap, .vmap = vb2_dma_sg_dmabuf_ops_vmap, .mmap = vb2_dma_sg_dmabuf_ops_mmap, .release = vb2_dma_sg_dmabuf_ops_release, diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 3f778147cdef..27d1db3bb8cf 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -342,8 +342,8 @@ static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { .detach = vb2_vmalloc_dmabuf_ops_detach, .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, - .kmap = vb2_vmalloc_dmabuf_ops_kmap, - .kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap, + .map = vb2_vmalloc_dmabuf_ops_kmap, + .map_atomic = vb2_vmalloc_dmabuf_ops_kmap, .vmap = vb2_vmalloc_dmabuf_ops_vmap, .mmap = vb2_vmalloc_dmabuf_ops_mmap, .release = vb2_vmalloc_dmabuf_ops_release, diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 138f5ae75c0b..4d1fe8d95042 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) int work_done = 0; u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); - u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); + u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); /* Handle bus state changes */ diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index caed4e6960f8..11662f479e76 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev) devm_can_led_init(ndev); - dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", - priv->regs, ndev->irq); + dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq); return 0; fail_candev: diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index d05fbfdce5e5..5d6c40d86775 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -100,11 +100,6 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) goto err_exit; ndev->mtu = new_mtu; - if (netif_running(ndev)) { - aq_ndev_close(ndev); - aq_ndev_open(ndev); - } - err_exit: return err; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ee78444bfb88..cdb02991f249 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, dx_buff->mss = skb_shinfo(skb)->gso_size; dx_buff->is_txc = 1U; + dx_buff->is_ipv6 = + (ip_hdr(skb)->version == 6) ? 1U : 0U; + dx = aq_ring_next_dx(ring, dx); dx_buff = &ring->buff_ring[dx]; ++ret; @@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, if (skb->ip_summed == CHECKSUM_PARTIAL) { dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U; - dx_buff->is_tcp_cso = - (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U; - dx_buff->is_udp_cso = - (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U; + + if (ip_hdr(skb)->version == 4) { + dx_buff->is_tcp_cso = + (ip_hdr(skb)->protocol == IPPROTO_TCP) ? + 1U : 0U; + dx_buff->is_udp_cso = + (ip_hdr(skb)->protocol == IPPROTO_UDP) ? + 1U : 0U; + } else if (ip_hdr(skb)->version == 6) { + dx_buff->is_tcp_cso = + (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ? + 1U : 0U; + dx_buff->is_udp_cso = + (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ? + 1U : 0U; + } } for (; nr_frags--; ++frag_count) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0358e6072d45..3a8a4aa13687 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self) self->hw_head = 0; self->sw_head = 0; self->sw_tail = 0; + spin_lock_init(&self->header.lock); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 257254645068..eecd6d1c4d73 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s { u8 len_l2; u8 len_l3; u8 len_l4; - u8 rsvd2; + u8 is_ipv6:1; + u8 rsvd2:7; u32 len_pkt; }; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index a2b746a2dd50..4ee15ff06a44 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, buff->len_l3 + buff->len_l2); is_gso = true; + + if (buff->is_ipv6) + txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6; } else { buff_pa_len = buff->len; @@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self, if (unlikely(buff->is_eop)) { txd->ctl |= HW_ATL_A0_TXD_CTL_EOP; txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB; + is_gso = false; } } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index cab2931dab9a..42150708191d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, buff->len_l3 + buff->len_l2); is_gso = true; + + if (buff->is_ipv6) + txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6; } else { buff_pa_len = buff->len; @@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self, if (unlikely(buff->is_eop)) { txd->ctl |= HW_ATL_B0_TXD_CTL_EOP; txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB; + is_gso = false; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 0a23034bbe3f..352beff796ae 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \ GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC)) -#define HW_INTERRUT_ASSERT_SET_0 \ +#define HW_INTERRUPT_ASSERT_SET_0 \ (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ @@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_1 \ +#define HW_INTERRUPT_ASSERT_SET_1 \ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \ @@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) -#define HW_INTERRUT_ASSERT_SET_2 \ +#define HW_INTERRUPT_ASSERT_SET_2 \ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ac76fc251d26..a851f95c307a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) bnx2x_release_phy_lock(bp); } - if (attn & HW_INTERRUT_ASSERT_SET_0) { + if (attn & HW_INTERRUPT_ASSERT_SET_0) { val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set0 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_0)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_0)); bnx2x_panic(); } } @@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) BNX2X_ERR("FATAL error from DORQ\n"); } - if (attn & HW_INTERRUT_ASSERT_SET_1) { + if (attn & HW_INTERRUPT_ASSERT_SET_1) { int port = BP_PORT(bp); int reg_offset; @@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set1 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_1)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_1)); bnx2x_panic(); } } @@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) } } - if (attn & HW_INTERRUT_ASSERT_SET_2) { + if (attn & HW_INTERRUPT_ASSERT_SET_2) { int port = BP_PORT(bp); int reg_offset; @@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(bp, reg_offset); - val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); + val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2); REG_WR(bp, reg_offset, val); BNX2X_ERR("FATAL HW block attention set2 0x%x\n", - (u32)(attn & HW_INTERRUT_ASSERT_SET_2)); + (u32)(attn & HW_INTERRUPT_ASSERT_SET_2)); bnx2x_panic(); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 32de4589d16a..1f1e54ba0ecb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) for (j = 0; j < max_idx; j++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j]; + dma_addr_t mapping = rx_buf->mapping; void *data = rx_buf->data; if (!data) continue; - dma_unmap_single(&pdev->dev, rx_buf->mapping, - bp->rx_buf_use_size, bp->rx_dir); - rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) + if (BNXT_RX_PAGE_MODE(bp)) { + mapping -= bp->rx_dma_offset; + dma_unmap_page(&pdev->dev, mapping, + PAGE_SIZE, bp->rx_dir); __free_page(data); - else + } else { + dma_unmap_single(&pdev->dev, mapping, + bp->rx_buf_use_size, + bp->rx_dir); kfree(data); + } } for (j = 0; j < max_agg_idx; j++) { @@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) return 0; } +static void bnxt_init_cp_rings(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + } +} + static int bnxt_init_rx_rings(struct bnxt *bp) { int i, rc = 0; @@ -4732,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); if (rc) { netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", - rc, i); + i, rc); return rc; } } @@ -5006,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) { + bnxt_init_cp_rings(bp); bnxt_init_rx_rings(bp); bnxt_init_tx_rings(bp); bnxt_init_ring_grps(bp, irq_re_init); diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 9e59663a6ead..0f6811860ad5 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1930,13 +1930,13 @@ static void bfa_ioc_send_enable(struct bfa_ioc *ioc) { struct bfi_ioc_ctrl_req enable_req; - struct timeval tv; bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, bfa_ioc_portid(ioc)); enable_req.clscode = htons(ioc->clscode); - do_gettimeofday(&tv); - enable_req.tv_sec = ntohl(tv.tv_sec); + enable_req.rsvd = htons(0); + /* overflow in 2106 */ + enable_req.tv_sec = ntohl(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); } @@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc) bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, bfa_ioc_portid(ioc)); + disable_req.clscode = htons(ioc->clscode); + disable_req.rsvd = htons(0); + /* overflow in 2106 */ + disable_req.tv_sec = ntohl(ktime_get_real_seconds()); bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 30e855004c57..02dd5246dfae 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -4939,8 +4939,9 @@ static int __be_cmd_set_logical_link_config(struct be_adapter *adapter, int link_state, int version, u8 domain) { - struct be_mcc_wrb *wrb; struct be_cmd_req_set_ll_link *req; + struct be_mcc_wrb *wrb; + u32 link_config = 0; int status; mutex_lock(&adapter->mcc_lock); @@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, if (link_state == IFLA_VF_LINK_STATE_ENABLE || link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= PLINK_ENABLE; + link_config |= PLINK_ENABLE; if (link_state == IFLA_VF_LINK_STATE_AUTO) - req->link_config |= PLINK_TRACK; + link_config |= PLINK_TRACK; + + req->link_config = cpu_to_le32(link_config); status = be_mcc_notify_wait(adapter); err: diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 992ebe973d25..f819843e2bae 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); - if (work_done < budget) { + if ((work_done < budget) && napi_complete_done(napi, work_done)) { u32 buf_int_enable_value = 0; - napi_complete_done(napi, work_done); - /* set tx_done and rx_rdy bits */ buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 928b0df2b8e0..ade6b3e4ed13 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -28,8 +28,10 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/property.h> #include <net/ip.h> #include <net/ncsi.h> diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 3239d27143b9..bdd8cdd732fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) else *link_status = 0; - ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); - if (!ret) - *link_status = *link_status && sfp_prsnt; + if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) { + ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, + &sfp_prsnt); + if (!ret) + *link_status = *link_status && sfp_prsnt; + } mac_cb->link = *link_status; } @@ -855,7 +858,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) of_node_put(np); np = of_parse_phandle(to_of_node(mac_cb->fw_port), - "serdes-syscon", 0); + "serdes-syscon", 0); syscon = syscon_node_to_regmap(np); of_node_put(np); if (IS_ERR_OR_NULL(syscon)) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 90dbda792614..403ea9db6dbd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key( mac_key->high.bits.mac_3 = addr[3]; mac_key->low.bits.mac_4 = addr[4]; mac_key->low.bits.mac_5 = addr[5]; + mac_key->low.bits.port_vlan = 0; dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M, DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, @@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev, /* find the tcam entry index for promisc */ entry_index = dsaf_promisc_tcam_entry(port); + memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); + memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); + /* config key mask */ if (enable) { - memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data)); - memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask)); dsaf_set_field(tbl_tcam_data.low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, DSAF_TBL_TCAM_KEY_PORT_S, port); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a2c22d084ce9..e13aa064a8e9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) return 0; } +int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +{ + union acpi_object *obj; + union acpi_object obj_args, argv4; + + obj_args.integer.type = ACPI_TYPE_INTEGER; + obj_args.integer.value = mac_cb->mac_id; + + argv4.type = ACPI_TYPE_PACKAGE, + argv4.package.count = 1, + argv4.package.elements = &obj_args, + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_GET_SFP_STAT_FUNC, &argv4); + + if (!obj || obj->type != ACPI_TYPE_INTEGER) + return -ENODEV; + + *sfp_prsnt = obj->integer.value; + + ACPI_FREE(obj); + + return 0; +} + /** * hns_mac_config_sds_loopback - set loop back for serdes * @mac_cb: mac control block @@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi; misc_op->get_phy_if = hns_mac_get_phy_if_acpi; - misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi; misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; } else { diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2175cced402f..e9af89ad039c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev) /* Quiesce the device without resetting the hardware */ e1000e_down(adapter, false); e1000_free_irq(adapter); - e1000e_reset_interrupt_capability(adapter); } + e1000e_reset_interrupt_capability(adapter); /* Allow time for pending master requests to run */ e1000e_disable_pcie_master(&adapter->hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e8a8351c8ea9..82a95cc2c8ee 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); + } } /** @@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_disable(&q_vector->napi); + } } /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 55957246c0e8..b5d5519542e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, struct netdev_notifier_changeupper_info *info) { struct net_device *upper = info->upper_dev, *ndev_tmp; - struct netdev_lag_upper_info *lag_upper_info; + struct netdev_lag_upper_info *lag_upper_info = NULL; bool is_bonded; int bond_status = 0; int num_slaves = 0; @@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, if (!netif_is_lag_master(upper)) return 0; - lag_upper_info = info->upper_info; + if (info->linking) + lag_upper_info = info->upper_info; /* The event may still be of interest if the slave does not belong to * us, but is enslaved to a master which has one or more of our netdevs diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 06c9f4100cb9..6ad44be08b33 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -25,6 +25,7 @@ #include <linux/of_irq.h> #include <linux/crc32.h> #include <linux/crc32c.h> +#include <linux/circ_buf.h> #include "moxart_ether.h" @@ -278,6 +279,13 @@ rx_next: return rx; } +static int moxart_tx_queue_space(struct net_device *ndev) +{ + struct moxart_mac_priv_t *priv = netdev_priv(ndev); + + return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM); +} + static void moxart_tx_finished(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); @@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev) tx_tail = TX_NEXT(tx_tail); } priv->tx_tail = tx_tail; + if (netif_queue_stopped(ndev) && + moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD) + netif_wake_queue(ndev); } static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) @@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct moxart_mac_priv_t *priv = netdev_priv(ndev); void *desc; unsigned int len; - unsigned int tx_head = priv->tx_head; + unsigned int tx_head; u32 txdes1; int ret = NETDEV_TX_BUSY; + spin_lock_irq(&priv->txlock); + + tx_head = priv->tx_head; desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); - spin_lock_irq(&priv->txlock); + if (moxart_tx_queue_space(ndev) == 1) + netif_stop_queue(ndev); + if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 93a9563ac7c6..afc32ec998c0 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -59,6 +59,7 @@ #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) #define TX_BUF_SIZE 1600 #define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) +#define TX_WAKE_THRESHOLD 16 #define RX_DESC_NUM 64 #define RX_DESC_NUM_MASK (RX_DESC_NUM-1) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 9179a99563af..a41377e26c07 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); + unregister_netdev(nn->netdev); + if (nn->xdp_prog) bpf_prog_put(nn->xdp_prog); if (nn->bpf_offload_xdp) nfp_net_xdp_offload(nn, NULL); - unregister_netdev(nn->netdev); } diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 7cd76b6b5cb9..2ae852454780 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, { bool want[OFDPA_CTRL_MAX] = { 0, }; bool prev_ctrls[OFDPA_CTRL_MAX]; - u8 uninitialized_var(prev_state); + u8 prev_state; int err; int i; - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); - prev_state = ofdpa_port->stp_state; - } - - if (ofdpa_port->stp_state == state) + prev_state = ofdpa_port->stp_state; + if (prev_state == state) return 0; + memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); ofdpa_port->stp_state = state; switch (state) { diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 9f3d9c67e3fe..fa674a8bda0c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave) static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) { u32 slave_port; + struct phy_device *phy; struct cpsw_common *cpsw = priv->cpsw; soft_reset_slave(slave); @@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); if (slave->data->phy_node) { - slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, + phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); - if (!slave->phy) { + if (!phy) { dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", slave->data->phy_node->full_name, slave->slave_num); return; } } else { - slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); - if (IS_ERR(slave->phy)) { + if (IS_ERR(phy)) { dev_err(priv->dev, "phy \"%s\" not found on slave %d, err %ld\n", slave->data->phy_id, slave->slave_num, - PTR_ERR(slave->phy)); - slave->phy = NULL; + PTR_ERR(phy)); return; } } + slave->phy = phy; + phy_attached_info(slave->phy); phy_start(slave->phy); @@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) } cpsw_intr_enable(cpsw); + netif_trans_update(ndev); + netif_tx_wake_all_queues(ndev); } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index ffedad2a360a..15b920086251 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr memset(rd, 0, sizeof(*rd)); rd->hw = hwmap + i; rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); - if (rd->buf == NULL || - !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { + if (rd->buf) + busaddr = pci_map_single(pdev, rd->buf, len, dir); + if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) { if (rd->buf) { net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", __func__, rd->buf); @@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr rd = r->rd + j; busaddr = rd_get_addr(rd); rd_set_addr_status(rd, 0, 0); - if (busaddr) - pci_unmap_single(pdev, busaddr, len, dir); + pci_unmap_single(pdev, busaddr, len, dir); kfree(rd->buf); rd->buf = NULL; } diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 6b988f77da08..61941e29daae 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info, return 0; } +EXPORT_SYMBOL(mdiobus_register_board_info); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1be69d8bc909..a2bfc82e95d7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (phydev->state > PHY_UP) + if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 1b52520715ae..f8c81f12d988 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -static void ___team_compute_features(struct team *team) +static void __team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; @@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; } -static void __team_compute_features(struct team *team) -{ - ___team_compute_features(team); - netdev_change_features(team->dev); -} - static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - ___team_compute_features(team); + __team_compute_features(team); mutex_unlock(&team->lock); netdev_change_features(team->dev); } @@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev) team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); + netdev_change_features(dev); } static void team_destructor(struct net_device *dev) @@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev) mutex_lock(&team->lock); err = team_port_add(team, port_dev); mutex_unlock(&team->lock); + + if (!err) + netdev_change_features(dev); + return err; } @@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev) mutex_lock(&team->lock); err = team_port_del(team, port_dev); mutex_unlock(&team->lock); + + if (!err) + netdev_change_features(dev); + return err; } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index f5552aaaa77a..f3ae88fdf332 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -532,6 +532,7 @@ static const struct driver_info wwan_info = { #define LENOVO_VENDOR_ID 0x17ef #define NVIDIA_VENDOR_ID 0x0955 #define HP_VENDOR_ID 0x03f0 +#define MICROSOFT_VENDOR_ID 0x045e static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -761,6 +762,20 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Microsoft Surface 2 dock (based on Realtek RTL8152) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* Microsoft Surface 3 dock (based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 156f7f85e486..2474618404f5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -908,7 +908,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ - {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0b1b9188625d..07f788c49d57 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -517,6 +517,7 @@ enum rtl8152_flags { /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda +#define VENDOR_ID_MICROSOFT 0x045e #define VENDOR_ID_SAMSUNG 0x04e8 #define VENDOR_ID_LENOVO 0x17ef #define VENDOR_ID_NVIDIA 0x0955 @@ -1294,6 +1295,7 @@ static void intr_callback(struct urb *urb) } } else { if (netif_carrier_ok(tp->netdev)) { + netif_stop_queue(tp->netdev); set_bit(RTL8152_LINK_CHG, &tp->flags); schedule_delayed_work(&tp->schedule, 0); } @@ -3169,6 +3171,9 @@ static void set_carrier(struct r8152 *tp) napi_enable(&tp->napi); netif_wake_queue(netdev); netif_info(tp, link, netdev, "carrier on\n"); + } else if (netif_queue_stopped(netdev) && + skb_queue_len(&tp->tx_queue) < tp->tx_qlen) { + netif_wake_queue(netdev); } } else { if (netif_carrier_ok(netdev)) { @@ -3702,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf) tp->rtl_ops.autosuspend_en(tp, false); napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); - if (netif_carrier_ok(tp->netdev)) - rtl_start_rx(tp); + + if (netif_carrier_ok(tp->netdev)) { + if (rtl8152_get_speed(tp) & LINK_STATUS) { + rtl_start_rx(tp); + } else { + netif_carrier_off(tp->netdev); + tp->rtl_ops.disable(tp); + netif_info(tp, link, tp->netdev, + "linking down\n"); + } + } + napi_enable(&tp->napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); @@ -4507,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf) static struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)}, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3de65ea6531a..453244805c52 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, " value=0x%04x index=0x%04x size=%d\n", cmd, reqtype, value, index, size); - if (data) { + if (size) { buf = kmalloc(size, GFP_KERNEL); if (!buf) goto out; @@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, USB_CTRL_GET_TIMEOUT); - if (err > 0 && err <= size) - memcpy(data, buf, err); + if (err > 0 && err <= size) { + if (data) + memcpy(data, buf, err); + else + netdev_dbg(dev->net, + "Huh? Data requested but thrown away.\n"); + } kfree(buf); out: return err; @@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, buf = kmemdup(data, size, GFP_KERNEL); if (!buf) goto out; - } + } else { + if (size) { + WARN_ON_ONCE(1); + err = -EINVAL; + goto out; + } + } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ea9890d61967..f36584616e7d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev) #define MIN_MTU ETH_MIN_MTU #define MAX_MTU ETH_MAX_MTU -static int virtnet_probe(struct virtio_device *vdev) +static int virtnet_validate(struct virtio_device *vdev) { - int i, err; - struct net_device *dev; - struct virtnet_info *vi; - u16 max_queue_pairs; - int mtu; - if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); @@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev) if (!virtnet_validate_features(vdev)) return -EINVAL; + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { + int mtu = virtio_cread16(vdev, + offsetof(struct virtio_net_config, + mtu)); + if (mtu < MIN_MTU) + __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } + + return 0; +} + +static int virtnet_probe(struct virtio_device *vdev) +{ + int i, err; + struct net_device *dev; + struct virtnet_info *vi; + u16 max_queue_pairs; + int mtu; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, @@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev) offsetof(struct virtio_net_config, mtu)); if (mtu < dev->min_mtu) { - __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); - } else { - dev->mtu = mtu; - dev->max_mtu = mtu; + /* Should never trigger: MTU was previously validated + * in virtnet_validate. + */ + dev_err(&vdev->dev, "device MTU appears to have changed " + "it is now %d < %d", mtu, dev->min_mtu); + goto free_stats; } + + dev->mtu = mtu; + dev->max_mtu = mtu; + + /* TODO: size buffers correctly in this case. */ + if (dev->mtu > ETH_DATA_LEN) + vi->big_packets = true; } if (vi->any_header_sg) @@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, + .validate = virtnet_validate, .probe = virtnet_probe, .remove = virtnet_remove, .config_changed = virtnet_config_changed, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index de19c7c92bc6..85d949e03f79 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct brcmf_p2p_info *p2p = &cfg->p2p; struct brcmf_cfg80211_vif *vif; + enum nl80211_iftype iftype; bool wait_for_disable = false; int err; brcmf_dbg(TRACE, "delete P2P vif\n"); vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev); + iftype = vif->wdev.iftype; brcmf_cfg80211_arm_vif_event(cfg, vif); - switch (vif->wdev.iftype) { + switch (iftype) { case NL80211_IFTYPE_P2P_CLIENT: if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) wait_for_disable = true; @@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) BRCMF_P2P_DISABLE_TIMEOUT); err = 0; - if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) { + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { brcmf_vif_clear_mgmt_ies(vif); err = brcmf_p2p_release_p2p_if(vif); } @@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) brcmf_remove_interface(vif->ifp, true); brcmf_cfg80211_arm_vif_event(cfg, NULL); - if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) + if (iftype != NL80211_IFTYPE_P2P_DEVICE) p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; return err; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a260cd503200..077bfd8f4c0c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (ret) return ret; + if (count == 0) + return 0; iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, (count - 1), NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 99132ea16ede..c5734e1a02d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) qmask |= BIT(vif->hw_queue[ac]); } - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) qmask |= BIT(vif->cab_queue); return qmask; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 6927caecd48e..486dcceed17a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2401,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) return; rcu_read_lock(); - sta = mvm->fw_id_to_mac_id[notif->sta_id]; + sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); if (WARN_ON(IS_ERR_OR_NULL(sta))) { rcu_read_unlock(); return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b51a2853cc80..9d28db7f56aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_get_wd_timeout(mvm, vif, false, false); int queue; - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; @@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * enabled-cab_queue to the mask) */ if (iwl_mvm_is_dqa_supported(mvm) && - vif->type == NL80211_IFTYPE_AP) { + (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvmvif->bcast_sta.sta_id, @@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC) iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, IWL_MAX_TID_COUNT, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 3f37075f4cde..1ba0a6f55503 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -506,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, switch (info->control.vif->type) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: /* * Handle legacy hostapd as well, where station may be added * only after assoc. Take care of the case where we send a @@ -517,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, if (info->hw_queue == info->control.vif->cab_queue) return info->hw_queue; - WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc)); + WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, + "fc=0x%02x", le16_to_cpu(fc)); return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) @@ -584,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) iwl_mvm_vif_from_mac80211(info.control.vif); if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP) { + info.control.vif->type == NL80211_IFTYPE_AP || + info.control.vif->type == NL80211_IFTYPE_ADHOC) { sta_id = mvmvif->bcast_sta.sta_id; queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index caea350f05aa..bdc379178e87 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val) unsigned long flags; struct rtl_c2hcmd *c2hcmd; - c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL); + c2hcmd = kmalloc(sizeof(*c2hcmd), + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!c2hcmd) goto label_err; - c2hcmd->val = kmalloc(len, GFP_KERNEL); + c2hcmd->val = kmalloc(len, + in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); if (!c2hcmd->val) goto label_err2; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 23d4a1728cdf..351bac8f6503 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); if (rc < 0) goto out_unlock; + nvdimm_bus_unlock(&nvdimm_bus->dev); + if (copy_to_user(p, buf, buf_len)) rc = -EFAULT; + + vfree(buf); + return rc; + out_unlock: nvdimm_bus_unlock(&nvdimm_bus->dev); out: diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index b3323c0697f6..ca6d572c48fc 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, } if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { - if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { + /* + * FIXME: nsio_rw_bytes() may be called from atomic + * context in the btt case and nvdimm_clear_poison() + * takes a sleeping lock. Until the locking can be + * reworked this capability requires that the namespace + * is not claimed by btt. + */ + if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) + && (!ndns->claim || !is_nd_btt(ndns->claim))) { long cleared; cleared = nvdimm_clear_poison(&ndns->dev, offset, size); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 0eedc49e0d47..8b721321be5b 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create); int alias_dpa_busy(struct device *dev, void *data) { - resource_size_t map_end, blk_start, new, busy; + resource_size_t map_end, blk_start, new; struct blk_alloc_info *info = data; struct nd_mapping *nd_mapping; struct nd_region *nd_region; @@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data) retry: /* * Find the free dpa from the end of the last pmem allocation to - * the end of the interleave-set mapping that is not already - * covered by a blk allocation. + * the end of the interleave-set mapping. */ - busy = 0; for_each_dpa_resource(ndd, res) { + if (strncmp(res->name, "pmem", 4) != 0) + continue; if ((res->start >= blk_start && res->start < map_end) || (res->end >= blk_start && res->end <= map_end)) { - if (strncmp(res->name, "pmem", 4) == 0) { - new = max(blk_start, min(map_end + 1, - res->end + 1)); - if (new != blk_start) { - blk_start = new; - goto retry; - } - } else - busy += min(map_end, res->end) - - max(nd_mapping->start, res->start) + 1; - } else if (nd_mapping->start > res->start - && map_end < res->end) { - /* total eclipse of the PMEM region mapping */ - busy += nd_mapping->size; - break; + new = max(blk_start, min(map_end + 1, res->end + 1)); + if (new != blk_start) { + blk_start = new; + goto retry; + } } } @@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data) return 1; } - info->available -= blk_start - nd_mapping->start + busy; + info->available -= blk_start - nd_mapping->start; return 0; } -static int blk_dpa_busy(struct device *dev, void *data) -{ - struct blk_alloc_info *info = data; - struct nd_mapping *nd_mapping; - struct nd_region *nd_region; - resource_size_t map_end; - int i; - - if (!is_nd_pmem(dev)) - return 0; - - nd_region = to_nd_region(dev); - for (i = 0; i < nd_region->ndr_mappings; i++) { - nd_mapping = &nd_region->mapping[i]; - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) - break; - } - - if (i >= nd_region->ndr_mappings) - return 0; - - map_end = nd_mapping->start + nd_mapping->size - 1; - if (info->res->start >= nd_mapping->start - && info->res->start < map_end) { - if (info->res->end <= map_end) { - info->busy = 0; - return 1; - } else { - info->busy -= info->res->end - map_end; - return 0; - } - } else if (info->res->end >= nd_mapping->start - && info->res->end <= map_end) { - info->busy -= nd_mapping->start - info->res->start; - return 0; - } else { - info->busy -= nd_mapping->size; - return 0; - } -} - /** * nd_blk_available_dpa - account the unused dpa of BLK region * @nd_mapping: container of dpa-resource-root + labels @@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) for_each_dpa_resource(ndd, res) { if (strncmp(res->name, "blk", 3) != 0) continue; - - info.res = res; - info.busy = resource_size(res); - device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy); - info.available -= info.busy; + info.available -= resource_size(res); } return info.available; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9b3b57fef446..9583a5f58a1d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, memset(cmnd, 0, sizeof(*cmnd)); cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); - cmnd->dsm.nr = segments - 1; + cmnd->dsm.nr = cpu_to_le32(segments - 1); cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); req->special_vec.bv_page = virt_to_page(range); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 9690beb15e69..d996ca73d3be 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 47a479f26e5d..16f84eb0b95e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index a7bcff45f437..76450b0c55f1 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req, u16 status; WARN_ON(req == NULL || slog == NULL); - if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) + if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF)) status = nvmet_get_smart_log_all(req, slog); else status = nvmet_get_smart_log_nsid(req, slog); diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 4195115c7e54..6b0baa9caab9 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req) sector = le64_to_cpu(write_zeroes->slba) << (req->ns->blksize_shift - 9); - nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) << + nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) << (req->ns->blksize_shift - 9)) + 1; if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, @@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_dsm: req->execute = nvmet_execute_dsm; - req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * + req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * sizeof(struct nvme_dsm_range); return 0; case nvme_cmd_write_zeroes: diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 22f7bc6bac7f..c7b0b6a52708 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index dfb8a69afc28..d2d2ba5b8a68 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig @@ -89,6 +89,7 @@ config PCI_HISI depends on PCI_MSI_IRQ_DOMAIN select PCIEPORTBUS select PCIE_DW_HOST + select PCI_HOST_COMMON help Say Y here if you want PCIe controller support on HiSilicon Hip05 and Hip06 SoCs diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index fcd3ef845883..6d23683c0892 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c @@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, return 0; } +static const struct dw_pcie_ops dw_pcie_ops = { +}; + static int artpec6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; + pci->ops = &dw_pcie_ops; artpec6_pcie->pci = pci; diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index b6c832ba39dd..f20d494922ab 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c @@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, return 0; } +static const struct dw_pcie_ops dw_pcie_ops = { +}; + static int dw_plat_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; + pci->ops = &dw_pcie_ops; dw_plat_pcie->pci = pci; diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index b89c373555c5..6e031b522529 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -375,7 +375,6 @@ static void thunder_pem_legacy_fw(struct acpi_pci_root *root, index -= node * PEM_MAX_DOM_IN_NODE; res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | FIELD_PREP(PEM_INDX_MASK, index); - res_pem->end = res_pem->start + SZ_16M - 1; res_pem->flags = IORESOURCE_MEM; } @@ -399,8 +398,15 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg) */ if (ret) { thunder_pem_legacy_fw(root, res_pem); - /* Reserve PEM-specific resources and PCI configuration space */ + /* + * Reserve 64K size PEM specific resources. The full 16M range + * size is required for thunder_pem_init() call. + */ + res_pem->end = res_pem->start + SZ_64K - 1; thunder_pem_reserve_range(dev, root->segment, res_pem); + res_pem->end = res_pem->start + SZ_16M - 1; + + /* Reserve PCI configuration space as well. */ thunder_pem_reserve_range(dev, root->segment, &cfg->res); } diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index d69046537b75..32822b0d9cd0 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2010,29 +2010,57 @@ out_err: return ERR_PTR(ret); } -static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) +static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev) { pctldev->p = create_pinctrl(pctldev->dev, pctldev); - if (!IS_ERR(pctldev->p)) { - kref_get(&pctldev->p->users); - pctldev->hog_default = - pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); - if (IS_ERR(pctldev->hog_default)) { - dev_dbg(pctldev->dev, - "failed to lookup the default state\n"); - } else { - if (pinctrl_select_state(pctldev->p, - pctldev->hog_default)) - dev_err(pctldev->dev, - "failed to select default state\n"); - } + if (PTR_ERR(pctldev->p) == -ENODEV) { + dev_dbg(pctldev->dev, "no hogs found\n"); - pctldev->hog_sleep = - pinctrl_lookup_state(pctldev->p, - PINCTRL_STATE_SLEEP); - if (IS_ERR(pctldev->hog_sleep)) - dev_dbg(pctldev->dev, - "failed to lookup the sleep state\n"); + return 0; + } + + if (IS_ERR(pctldev->p)) { + dev_err(pctldev->dev, "error claiming hogs: %li\n", + PTR_ERR(pctldev->p)); + + return PTR_ERR(pctldev->p); + } + + kref_get(&pctldev->p->users); + pctldev->hog_default = + pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT); + if (IS_ERR(pctldev->hog_default)) { + dev_dbg(pctldev->dev, + "failed to lookup the default state\n"); + } else { + if (pinctrl_select_state(pctldev->p, + pctldev->hog_default)) + dev_err(pctldev->dev, + "failed to select default state\n"); + } + + pctldev->hog_sleep = + pinctrl_lookup_state(pctldev->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(pctldev->hog_sleep)) + dev_dbg(pctldev->dev, + "failed to lookup the sleep state\n"); + + return 0; +} + +int pinctrl_enable(struct pinctrl_dev *pctldev) +{ + int error; + + error = pinctrl_claim_hogs(pctldev); + if (error) { + dev_err(pctldev->dev, "could not claim hogs: %i\n", + error); + mutex_destroy(&pctldev->mutex); + kfree(pctldev); + + return error; } mutex_lock(&pinctrldev_list_mutex); @@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev) return 0; } +EXPORT_SYMBOL_GPL(pinctrl_enable); /** * pinctrl_register() - register a pin controller device @@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, if (IS_ERR(pctldev)) return pctldev; - error = pinctrl_create_and_start(pctldev); - if (error) { - mutex_destroy(&pctldev->mutex); - kfree(pctldev); - + error = pinctrl_enable(pctldev); + if (error) return ERR_PTR(error); - } return pctldev; } EXPORT_SYMBOL_GPL(pinctrl_register); +/** + * pinctrl_register_and_init() - register and init pin controller device + * @pctldesc: descriptor for this pin controller + * @dev: parent device for this pin controller + * @driver_data: private pin controller data for this pin controller + * @pctldev: pin controller device + * + * Note that pinctrl_enable() still needs to be manually called after + * this once the driver is ready. + */ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev) { struct pinctrl_dev *p; - int error; p = pinctrl_init_controller(pctldesc, dev, driver_data); if (IS_ERR(p)) @@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, */ *pctldev = p; - error = pinctrl_create_and_start(p); - if (error) { - mutex_destroy(&p->mutex); - kfree(p); - *pctldev = NULL; - - return error; - } - return 0; } EXPORT_SYMBOL_GPL(pinctrl_register_and_init); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index a7ace9e1ad81..74bd90dfd7b1 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, dev_info(&pdev->dev, "initialized IMX pinctrl driver\n"); - return 0; + return pinctrl_enable(ipctl->pctl); free: imx_free_resources(ipctl); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index f80134e3e0b6..9ff790174906 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -13,6 +13,7 @@ * published by the Free Software Foundation. */ +#include <linux/dmi.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } +/* + * Certain machines seem to hardcode Linux IRQ numbers in their ACPI + * tables. Since we leave GPIOs that are not capable of generating + * interrupts out of the irqdomain the numbering will be different and + * cause devices using the hardcoded IRQ numbers fail. In order not to + * break such machines we will only mask pins from irqdomain if the machine + * is not listed below. + */ +static const struct dmi_system_id chv_no_valid_mask[] = { + { + /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ + .ident = "Acer Chromebook (CYAN)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), + DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), + DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), + }, + } +}; + static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) { const struct chv_gpio_pinrange *range; struct gpio_chip *chip = &pctrl->chip; + bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); int ret, i, offset; *chip = chv_gpio_chip; @@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) chip->label = dev_name(pctrl->dev); chip->parent = pctrl->dev; chip->base = -1; - chip->irq_need_valid_mask = true; + chip->irq_need_valid_mask = need_valid_mask; ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl); if (ret) { @@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; - if (intsel >= pctrl->community->nirqs) + if (need_valid_mask && intsel >= pctrl->community->nirqs) clear_bit(i, chip->irq_valid_mask); } diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 8b2d45e85bae..9c267dcda094 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev) dev_info(pcs->dev, "%i pins at pa %p size %u\n", pcs->desc.npins, pcs->base, pcs->size); - return 0; + return pinctrl_enable(pcs->pctl); free: pcs_free_resources(pcs); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index f9b49967f512..63e51b56a22a 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { /* pin banks of exynos5433 pin-controller - ALIVE */ static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = { - EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), - EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), - EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), - EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), - EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), - EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), - EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), + EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), + EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04), + EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08), + EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1), + EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1), }; /* pin banks of exynos5433 pin-controller - AUD */ static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = { - EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), - EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), + EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), }; /* pin banks of exynos5433 pin-controller - CPIF */ static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = { - EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), + EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00), }; /* pin banks of exynos5433 pin-controller - eSE */ static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00), }; /* pin banks of exynos5433 pin-controller - FINGER */ static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = { - EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), + EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00), }; /* pin banks of exynos5433 pin-controller - FSYS */ static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = { - EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), - EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), - EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), - EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), - EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), - EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), + EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00), + EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04), + EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08), + EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c), + EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10), + EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14), }; /* pin banks of exynos5433 pin-controller - IMEM */ static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = { - EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00), }; /* pin banks of exynos5433 pin-controller - NFC */ static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00), }; /* pin banks of exynos5433 pin-controller - PERIC */ static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = { - EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), - EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), - EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), - EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), - EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), - EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), - EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), - EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), - EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), - EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), - EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), - EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), - EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), - EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), - EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), - EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), - EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), + EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00), + EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04), + EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08), + EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c), + EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10), + EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14), + EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18), + EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c), + EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20), + EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24), + EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28), + EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c), + EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30), + EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34), + EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38), + EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c), + EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40), }; /* pin banks of exynos5433 pin-controller - TOUCH */ static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = { - EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), + EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00), }; /* diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h index a473092fb8d2..cd046eb7d705 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.h +++ b/drivers/pinctrl/samsung/pinctrl-exynos.h @@ -79,17 +79,6 @@ .name = id \ } -#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \ - { \ - .type = &bank_type_alive, \ - .pctl_offset = reg, \ - .nr_pins = pins, \ - .eint_type = EINT_TYPE_WKUP, \ - .eint_offset = offs, \ - .name = id, \ - .pctl_res_idx = pctl_idx, \ - } \ - #define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs) \ { \ .type = &exynos5433_bank_type_off, \ diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 08150a321be6..a70157f0acf4 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc) pmx->pctl_desc.pins = pmx->pins; pmx->pctl_desc.npins = pfc->info->nr_pins; - return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, - &pmx->pctl); + ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx, + &pmx->pctl); + if (ret) { + dev_err(pfc->dev, "could not register: %i\n", ret); + + return ret; + } + + return pinctrl_enable(pmx->pctl); } diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index 717e3404900c..362c50918c13 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iod); + return pinctrl_enable(iod->pctl); + exit_out: of_node_put(np); return ret; diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c index 053088b9b66e..c1527cb645be 100644 --- a/drivers/pwm/pwm-lpss-pci.c +++ b/drivers/pwm/pwm-lpss-pci.c @@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { .clk_rate = 19200000, .npwm = 4, .base_unit_bits = 22, + .bypass = true, +}; + +/* Tangier */ +static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = { + .clk_rate = 19200000, + .npwm = 4, + .base_unit_bits = 22, }; static int pwm_lpss_probe_pci(struct pci_dev *pdev, @@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info}, { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info}, - { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info}, + { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info}, { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info}, { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info}, { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info}, diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c index b22b6fdadb9a..5d6ed1507d29 100644 --- a/drivers/pwm/pwm-lpss-platform.c +++ b/drivers/pwm/pwm-lpss-platform.c @@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = { .clk_rate = 19200000, .npwm = 4, .base_unit_bits = 22, + .bypass = true, }; static int pwm_lpss_probe_platform(struct platform_device *pdev) diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c index 689d2c1cbead..8db0d40ccacd 100644 --- a/drivers/pwm/pwm-lpss.c +++ b/drivers/pwm/pwm-lpss.c @@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value) writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM); } -static int pwm_lpss_update(struct pwm_device *pwm) +static int pwm_lpss_wait_for_update(struct pwm_device *pwm) { struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip); const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM; @@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm) u32 val; int err; - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); - /* * PWM Configuration register has SW_UPDATE bit that is set when a new * configuration is written to the register. The bit is automatically @@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm, pwm_lpss_write(pwm, ctrl); } +static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond) +{ + if (cond) + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); +} + static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { @@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; } pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - ret = pwm_lpss_update(pwm); + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false); + ret = pwm_lpss_wait_for_update(pwm); if (ret) { pm_runtime_put(chip->dev); return ret; } - pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE); + pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true); } else { ret = pwm_lpss_is_updating(pwm); if (ret) return ret; pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period); - return pwm_lpss_update(pwm); + pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE); + return pwm_lpss_wait_for_update(pwm); } } else if (pwm_is_enabled(pwm)) { pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE); diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h index c94cd7c2695d..98306bb02cfe 100644 --- a/drivers/pwm/pwm-lpss.h +++ b/drivers/pwm/pwm-lpss.h @@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo { unsigned long clk_rate; unsigned int npwm; unsigned long base_unit_bits; + bool bypass; }; struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r, diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c index ef89df1f7336..744d56197286 100644 --- a/drivers/pwm/pwm-rockchip.c +++ b/drivers/pwm/pwm-rockchip.c @@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +static int rockchip_pwm_enable(struct pwm_chip *chip, + struct pwm_device *pwm, + bool enable, + enum pwm_polarity polarity) +{ + struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); + int ret; + + if (enable) { + ret = clk_enable(pc->clk); + if (ret) + return ret; + } + + pc->data->set_enable(chip, pwm, enable, polarity); + + if (!enable) + clk_disable(pc->clk); + + return 0; +} + static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { @@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return ret; if (state->polarity != curstate.polarity && enabled) { - pc->data->set_enable(chip, pwm, false, state->polarity); + ret = rockchip_pwm_enable(chip, pwm, false, state->polarity); + if (ret) + goto out; enabled = false; } ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period); if (ret) { if (enabled != curstate.enabled) - pc->data->set_enable(chip, pwm, !enabled, - state->polarity); - + rockchip_pwm_enable(chip, pwm, !enabled, + state->polarity); goto out; } - if (state->enabled != enabled) - pc->data->set_enable(chip, pwm, state->enabled, - state->polarity); + if (state->enabled != enabled) { + ret = rockchip_pwm_enable(chip, pwm, state->enabled, + state->polarity); + if (ret) + goto out; + } /* * Update the state with the real hardware, which can differ a bit diff --git a/drivers/reset/core.c b/drivers/reset/core.c index f1e5e65388bb..cd739d2fa160 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc) } EXPORT_SYMBOL_GPL(reset_control_status); -static struct reset_control *__reset_control_get( +static struct reset_control *__reset_control_get_internal( struct reset_controller_dev *rcdev, unsigned int index, bool shared) { @@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get( return rstc; } -static void __reset_control_put(struct reset_control *rstc) +static void __reset_control_put_internal(struct reset_control *rstc) { lockdep_assert_held(&reset_list_mutex); @@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } /* reset_list_mutex also protects the rcdev's reset_control list */ - rstc = __reset_control_get(rcdev, rstc_id, shared); + rstc = __reset_control_get_internal(rcdev, rstc_id, shared); mutex_unlock(&reset_list_mutex); @@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } EXPORT_SYMBOL_GPL(__of_reset_control_get); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + if (dev->of_node) + return __of_reset_control_get(dev->of_node, id, index, shared, + optional); + + return optional ? NULL : ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(__reset_control_get); + /** * reset_control_put - free the reset controller * @rstc: reset controller @@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc) return; mutex_lock(&reset_list_mutex); - __reset_control_put(rstc); + __reset_control_put_internal(rstc); mutex_unlock(&reset_list_mutex); } EXPORT_SYMBOL_GPL(reset_control_put); @@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - rstc = __of_reset_control_get(dev ? dev->of_node : NULL, - id, index, shared, optional); + rstc = __reset_control_get(dev, id, index, shared, optional); if (!IS_ERR(rstc)) { *ptr = rstc; devres_add(dev, ptr); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 40f1136f5568..058db724b5a2 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain, rc = -EIO; goto out; } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } /* process response cprb param block */ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); @@ -761,9 +767,10 @@ out: } /* - * Fetch just the mkvp value via query_crypto_facility from adapter. + * Fetch the current and old mkvp values via + * query_crypto_facility from adapter. */ -static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) +static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc, found = 0; size_t rlen, vlen; @@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) rc = query_crypto_facility(cardnr, domain, "STATICSA", rarray, &rlen, varray, &vlen); if (rc == 0 && rlen > 8*8 && vlen > 184+8) { - if (rarray[64] == '2') { + if (rarray[8*8] == '2') { /* current master key state is valid */ - *mkvp = *((u64 *)(varray + 184)); + mkvp[0] = *((u64 *)(varray + 184)); + mkvp[1] = *((u64 *)(varray + 172)); found = 1; } } @@ -796,14 +804,14 @@ struct mkvp_info { struct list_head list; u16 cardnr; u16 domain; - u64 mkvp; + u64 mkvp[2]; }; /* a list with mkvp_info entries */ static LIST_HEAD(mkvp_list); static DEFINE_SPINLOCK(mkvp_list_lock); -static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) +static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc = -ENOENT; struct mkvp_info *ptr; @@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - *mkvp = ptr->mkvp; + memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64)); rc = 0; break; } @@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) return rc; } -static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) +static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2]) { int found = 0; struct mkvp_info *ptr; @@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); found = 1; break; } @@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) } ptr->cardnr = cardnr; ptr->domain = domain; - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); list_add(&ptr->list, &mkvp_list); } spin_unlock_bh(&mkvp_list_lock); @@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey, struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; struct zcrypt_device_matrix *device_matrix; u16 card, dom; - u64 mkvp; - int i, rc; + u64 mkvp[2]; + int i, rc, oi = -1; /* mkvp must not be zero */ if (t->mkvp == 0) @@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey, device_matrix->device[i].functions & 0x04) { /* an enabled CCA Coprocessor card */ /* try cached mkvp */ - if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && - t->mkvp == mkvp) { + if (mkvp_cache_fetch(card, dom, mkvp) == 0 && + t->mkvp == mkvp[0]) { if (!verify) break; /* verify: fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; } } @@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey, card = AP_QID_CARD(device_matrix->device[i].qid); dom = AP_QID_QUEUE(device_matrix->device[i].qid); /* fresh fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; + if (t->mkvp == mkvp[1] && oi < 0) + oi = i; } } + if (i >= MAX_ZDEV_ENTRIES && oi >= 0) { + /* old mkvp matched, use this card then */ + card = AP_QID_CARD(device_matrix->device[oi].qid); + dom = AP_QID_QUEUE(device_matrix->device[oi].qid); + } } - if (i < MAX_ZDEV_ENTRIES) { + if (i < MAX_ZDEV_ENTRIES || oi >= 0) { if (pcardnr) *pcardnr = card; if (pdomain) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index e7addea8741b..d9561e39c3b2 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); -int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, + int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int, int, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 315d8a2db7c0..9a5f99ccb122 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * @card: qeth card structure, to check max. elems. * @skb: SKB address * @extra_elems: extra elems needed, to check against max. + * @data_offset: range starts at skb->data + data_offset * * Returns the number of pages, and thus QDIO buffer elements, needed to cover * skb data, including linear part and fragments. Checks if the result plus @@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * Note: extra_elems is not included in the returned result. */ int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int extra_elems) + struct sk_buff *skb, int extra_elems, int data_offset) { int elements = qeth_get_elements_for_range( - (addr_t)skb->data, + (addr_t)skb->data + data_offset, (addr_t)skb->data + skb_headlen(skb)) + qeth_get_elements_for_frags(skb); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bea483307618..af4e6a639fec 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * chaining we can not send long frag lists */ if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0)) { + !qeth_get_elements_no(card, new_skb, 0, 0)) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - elements = qeth_get_elements_no(card, new_skb, elements_needed); + elements = qeth_get_elements_no(card, new_skb, elements_needed, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 06d0addcc058..653f0fb76573 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; - skb_pull(skb, 14); - card->dev->header_ops->create(skb, card->dev, 0, - card->dev->dev_addr, card->dev->dev_addr, - card->dev->addr_len); - skb_pull(skb, 14); - iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len; + hdr->hdr.l3.length = skb->len - ETH_HLEN; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + + iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((card->info.type == QETH_CARD_TYPE_IQD) && !skb_is_nonlinear(skb)) { new_skb = skb; - if (new_skb->protocol == ETH_P_AF_IUCV) - data_offset = 0; - else - data_offset = ETH_HLEN; + data_offset = ETH_HLEN; hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((card->info.type != QETH_CARD_TYPE_IQD) && ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements); + qeth_get_elements_no(card, new_skb, hdr_elements, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d036a806f31c..d281492009fb 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1690,9 +1690,6 @@ struct aac_dev #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) -#define aac_adapter_check_health(dev) \ - (dev)->a_ops.adapter_check_health(dev) - #define aac_adapter_restart(dev, bled, reset_type) \ ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) @@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) return capacity; } +static inline int aac_adapter_check_health(struct aac_dev *dev) +{ + if (unlikely(pci_channel_offline(dev->pdev))) + return -1; + + return (dev)->a_ops.adapter_check_health(dev); +} + /* SCp.phase values */ #define AAC_OWNER_MIDLEVEL 0x101 #define AAC_OWNER_LOWLEVEL 0x102 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index c8172f16cf33..1f4918355fdb 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac) spin_unlock_irqrestore(&aac->fib_lock, flagv); if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); + printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", + aac->name, BlinkLED); goto out; } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b29afafc2885..5d5e272fd815 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, break; case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ case IPR_IOASA_IR_DUAL_IOA_DISABLED: - scsi_cmd->result |= (DID_PASSTHROUGH << 16); + /* + * exception: do not set DID_PASSTHROUGH on CHECK CONDITION + * so SCSI mid-layer and upper layers handle it accordingly. + */ + if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) + scsi_cmd->result |= (DID_PASSTHROUGH << 16); break; case IPR_IOASC_BUS_WAS_RESET: case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index ed58b9104f58..e10b91cc3c62 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, qedf_set_vlan_id(qedf, vid); /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ - complete(&qedf->fipvlan_compl); + if (!completion_done(&qedf->fipvlan_compl)) + complete(&qedf->fipvlan_compl); } } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 8e2a160490e6..cceddd995a4b 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) atomic_set(&qedf->num_offloads, 0); qedf->stop_io_on_error = false; pci_set_drvdata(pdev, qedf); + init_completion(&qedf->fipvlan_compl); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "QLogic FastLinQ FCoE Module qedf %s, " diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3e7011757c82..83d61d2142e9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1160,8 +1160,13 @@ static inline uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT); + if (IS_P3P_TYPE(ha)) + return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); + else + return ((RD_REG_DWORD(®->host_status)) == + ISP_REG_DISCONNECT); } /************************************************************************** diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 19125d72f322..e5a2d590a104 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q) scsi_starved_list_run(sdev->host); if (q->mq_ops) - blk_mq_start_stopped_hw_queues(q, false); + blk_mq_run_hw_queues(q, false); else blk_run_queue(q); } @@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error, !list_empty(&sdev->host->starved_list)) kblockd_schedule_work(&sdev->requeue_work); else - blk_mq_start_stopped_hw_queues(q, true); + blk_mq_run_hw_queues(q, true); } else { unsigned long flags; @@ -1974,7 +1974,7 @@ out: case BLK_MQ_RQ_QUEUE_BUSY: if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) - blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY); + blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); break; case BLK_MQ_RQ_QUEUE_ERROR: /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fcfeddc79331..35ad5e8a31ab 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 +/* + * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set + * and the reported logical block size is bigger than 512 bytes. Note + * that last_sector is a u64 and therefore logical_to_sectors() is not + * applicable. + */ +static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) +{ + u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); + + if (sizeof(sector_t) == 4 && last_sector > U32_MAX) + return false; + + return true; +} + static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, unsigned char *buffer) { @@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, return -ENODEV; } - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); @@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, return sector_size; } - if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); @@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk) q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); } else - rw_max = BLK_DEF_MAX_SECTORS; + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), + (sector_t)BLK_DEF_MAX_SECTORS); /* Combine with controller limits */ q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 0b29b9329b1c..a8f630213a1a 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd) unsigned char *buffer; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + unsigned int ms_len = 128; int rc, n; static const char *loadmech[] = @@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd) scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, + rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); - if (!scsi_status_is_good(rc)) { + if (!scsi_status_is_good(rc) || data.length > ms_len || + data.header_length + data.block_descriptor_length > data.length) { /* failed, drive doesn't have capabilities mode page */ cd->cdi.speed = 1; cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 7cbad0d45b9c..6ba270e0494d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) ret = PTR_ERR(vmfile); goto out; } + vmfile->f_mode |= FMODE_LSEEK; asma->file = vmfile; } get_file(asma->file); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index f45115fce4eb..95a7f1648c00 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1020,10 +1020,10 @@ static const struct dma_buf_ops dma_buf_ops = { .release = ion_dma_buf_release, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, - .kmap_atomic = ion_dma_buf_kmap, - .kunmap_atomic = ion_dma_buf_kunmap, - .kmap = ion_dma_buf_kmap, - .kunmap = ion_dma_buf_kunmap, + .map_atomic = ion_dma_buf_kmap, + .unmap_atomic = ion_dma_buf_kunmap, + .map = ion_dma_buf_kmap, + .unmap = ion_dma_buf_kunmap, }; struct dma_buf *ion_share_dma_buf(struct ion_client *client, diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a91802432f2f..e3f9ed3690b7 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *); int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); - return 0; + return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); } EXPORT_SYMBOL(iscsit_queue_rsp); diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index bf40f03755dd..344e8448869c 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid( static int lio_queue_data_in(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + struct iscsi_conn *conn = cmd->conn; cmd->i_state = ISTATE_SEND_DATAIN; - cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd); - - return 0; + return conn->conn_transport->iscsit_queue_data_in(conn, cmd); } static int lio_write_pending(struct se_cmd *se_cmd) @@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd) static int lio_queue_status(struct se_cmd *se_cmd) { struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd); + struct iscsi_conn *conn = cmd->conn; cmd->i_state = ISTATE_SEND_STATUS; if (cmd->se_cmd.scsi_status || cmd->sense_reason) { - iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state); - return 0; + return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state); } - cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd); - - return 0; + return conn->conn_transport->iscsit_queue_status(conn, cmd); } static void lio_queue_tm_rsp(struct se_cmd *se_cmd) diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index e65bf78ceef3..fce627628200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) SET_PSTATE_REPLY_OPTIONAL(param); /* - * The GlobalSAN iSCSI Initiator for MacOSX does - * not respond to MaxBurstLength, FirstBurstLength, - * DefaultTime2Wait or DefaultTime2Retain parameter keys. - * So, we set them to 'reply optional' here, and assume the - * the defaults from iscsi_parameters.h if the initiator - * is not RFC compliant and the keys are not negotiated. - */ - if (!strcmp(param->name, MAXBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, FIRSTBURSTLENGTH)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2WAIT)) - SET_PSTATE_REPLY_OPTIONAL(param); - if (!strcmp(param->name, DEFAULTTIME2RETAIN)) - SET_PSTATE_REPLY_OPTIONAL(param); - /* * Required for gPXE iSCSI boot client */ if (!strcmp(param->name, MAXCONNECTIONS)) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 5041a9c8bdcb..7d3e2fcc26a0 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue( } } -void iscsit_add_cmd_to_response_queue( +int iscsit_add_cmd_to_response_queue( struct iscsi_cmd *cmd, struct iscsi_conn *conn, u8 state) @@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue( if (!qr) { pr_err("Unable to allocate memory for" " struct iscsi_queue_req\n"); - return; + return -ENOMEM; } INIT_LIST_HEAD(&qr->qr_list); qr->cmd = cmd; @@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue( spin_unlock_bh(&conn->response_queue_lock); wake_up(&conn->queues_wq); + return 0; } struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn) @@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { struct se_cmd *se_cmd = NULL; int rc; + bool op_scsi = false; /* * Determine if a struct se_cmd is associated with * this struct iscsi_cmd. */ switch (cmd->iscsi_opcode) { case ISCSI_OP_SCSI_CMD: - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); + op_scsi = true; /* * Fallthrough */ case ISCSI_OP_SCSI_TMFUNC: - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); - if (!rc && shutdown && se_cmd && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); + se_cmd = &cmd->se_cmd; + __iscsit_free_cmd(cmd, op_scsi, shutdown); + rc = transport_generic_free_cmd(se_cmd, shutdown); + if (!rc && shutdown && se_cmd->se_sess) { + __iscsit_free_cmd(cmd, op_scsi, shutdown); target_put_sess_cmd(se_cmd); } break; diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 8ff08856516a..9e4197af8708 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd struct iscsi_conn_recovery **, itt_t); extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *); -extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); +extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8); extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *); extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fd7c16a7ca6e..fc4a9c303d55 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set the ASYMMETRIC ACCESS State */ - buf[off++] |= (atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff); + buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff; /* * Set supported ASYMMETRIC ACCESS State bits */ @@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd) spin_lock(&lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; - out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); + out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; // XXX: keeps using tg_pt_gp witout reference after unlock @@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata( } /* - * Called with tg_pt_gp->tg_pt_gp_md_mutex held + * Called with tg_pt_gp->tg_pt_gp_transition_mutex held */ static int core_alua_update_tpg_primary_metadata( struct t10_alua_tg_pt_gp *tg_pt_gp) @@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata( "alua_access_state=0x%02x\n" "alua_access_status=0x%02x\n", tg_pt_gp->tg_pt_gp_id, - tg_pt_gp->tg_pt_gp_alua_pending_state, + tg_pt_gp->tg_pt_gp_alua_access_state, tg_pt_gp->tg_pt_gp_alua_access_status); snprintf(path, ALUA_METADATA_PATH_LEN, @@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } -static void core_alua_do_transition_tg_pt_work(struct work_struct *work) -{ - struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, - struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == - ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); - - /* - * Update the ALUA metadata buf that has been allocated in - * core_alua_do_port_transition(), this metadata will be written - * to struct file. - * - * Note that there is the case where we do not want to update the - * metadata when the saved metadata is being parsed in userspace - * when setting the existing port access state and access status. - * - * Also note that the failure to write out the ALUA metadata to - * struct file does NOT affect the actual ALUA transition. - */ - if (tg_pt_gp->tg_pt_gp_write_metadata) { - mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex); - core_alua_update_tpg_primary_metadata(tg_pt_gp); - mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex); - } - /* - * Set the current primary ALUA access state to the requested new state - */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - tg_pt_gp->tg_pt_gp_alua_pending_state); - - pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" - " from primary access state %s to %s\n", (explicit) ? "explicit" : - "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), - tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state), - core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); - - core_alua_queue_state_change_ua(tg_pt_gp); - - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - - if (tg_pt_gp->tg_pt_gp_transition_complete) - complete(tg_pt_gp->tg_pt_gp_transition_complete); -} - static int core_alua_do_transition_tg_pt( struct t10_alua_tg_pt_gp *tg_pt_gp, int new_state, int explicit) { - struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; - DECLARE_COMPLETION_ONSTACK(wait); + int prev_state; + mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex); /* Nothing to be done here */ - if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) + if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; + } - if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) + if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return -EAGAIN; - - /* - * Flush any pending transitions - */ - if (!explicit) - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); + } /* * Save the old primary ALUA access state, and set the current state * to ALUA_ACCESS_STATE_TRANSITION. */ - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_TRANSITION); + prev_state = tg_pt_gp->tg_pt_gp_alua_access_state; + tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION; tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA; core_alua_queue_state_change_ua(tg_pt_gp); - if (new_state == ALUA_ACCESS_STATE_TRANSITION) + if (new_state == ALUA_ACCESS_STATE_TRANSITION) { + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; - - tg_pt_gp->tg_pt_gp_alua_previous_state = - atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); - tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; + } /* * Check for the optional ALUA primary state transition delay @@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt( msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs); /* - * Take a reference for workqueue item + * Set the current primary ALUA access state to the requested new state */ - spin_lock(&dev->t10_alua.tg_pt_gps_lock); - atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&dev->t10_alua.tg_pt_gps_lock); + tg_pt_gp->tg_pt_gp_alua_access_state = new_state; - schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); - if (explicit) { - tg_pt_gp->tg_pt_gp_transition_complete = &wait; - wait_for_completion(&wait); - tg_pt_gp->tg_pt_gp_transition_complete = NULL; + /* + * Update the ALUA metadata buf that has been allocated in + * core_alua_do_port_transition(), this metadata will be written + * to struct file. + * + * Note that there is the case where we do not want to update the + * metadata when the saved metadata is being parsed in userspace + * when setting the existing port access state and access status. + * + * Also note that the failure to write out the ALUA metadata to + * struct file does NOT affect the actual ALUA transition. + */ + if (tg_pt_gp->tg_pt_gp_write_metadata) { + core_alua_update_tpg_primary_metadata(tg_pt_gp); } + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" + " from primary access state %s to %s\n", (explicit) ? "explicit" : + "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), + tg_pt_gp->tg_pt_gp_id, + core_alua_dump_state(prev_state), + core_alua_dump_state(new_state)); + + core_alua_queue_state_change_ua(tg_pt_gp); + + mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex); return 0; } @@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list); - mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); + mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex); spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); - INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, - core_alua_do_transition_tg_pt_work); tg_pt_gp->tg_pt_gp_dev = dev; - atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, - ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); + tg_pt_gp->tg_pt_gp_alua_access_state = + ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED; /* * Enable both explicit and implicit ALUA support by default */ @@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp( dev->t10_alua.alua_tg_pt_gps_counter--; spin_unlock(&dev->t10_alua.tg_pt_gps_lock); - flush_work(&tg_pt_gp->tg_pt_gp_transition_work); - /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Primary Access Status: %s\nTG Port Secondary Access" " State: %s\nTG Port Secondary Access Status: %s\n", config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id, - core_alua_dump_state(atomic_read( - &tg_pt_gp->tg_pt_gp_alua_access_state)), + core_alua_dump_state( + tg_pt_gp->tg_pt_gp_alua_access_state), core_alua_dump_status( tg_pt_gp->tg_pt_gp_alua_access_status), atomic_read(&lun->lun_tg_pt_secondary_offline) ? diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 38b5025e4c7a..70657fd56440 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", - atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state)); + to_tg_pt_gp(item)->tg_pt_gp_alua_access_state); } static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index d8a16ca6baa5..d1e6cab8e3d3 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link( pr_err("Source se_lun->lun_se_dev does not exist\n"); return -EINVAL; } + if (lun->lun_shutdown) { + pr_err("Unable to create mappedlun symlink because" + " lun->lun_shutdown=true\n"); + return -EINVAL; + } se_tpg = lun->lun_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 6fb191914f45..dfaef4d3b2d2 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -642,6 +642,8 @@ void core_tpg_remove_lun( */ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev); + lun->lun_shutdown = true; + core_clear_lun_from_tpg(lun, tpg); /* * Wait for any active I/O references to percpu se_lun->lun_ref to @@ -663,6 +665,8 @@ void core_tpg_remove_lun( } if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) hlist_del_rcu(&lun->link); + + lun->lun_shutdown = false; mutex_unlock(&tpg->tpg_lun_mutex); percpu_ref_exit(&lun->lun_ref); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b1a3cdb29468..a0cd56ee5fe9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache; struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); +static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev); + struct se_device *dev, int err, bool write_pending); static int transport_put_cmd(struct se_cmd *cmd); static void target_complete_ok_work(struct work_struct *work); @@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work) if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) transport_write_pending_qf(cmd); - else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) + else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || + cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) transport_complete_qf(cmd); } } @@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; goto check_stop; default: @@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, } ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; check_stop: @@ -1739,8 +1741,7 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } EXPORT_SYMBOL(transport_generic_request_failure); @@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd) int ret = 0; transport_complete_task_attr(cmd); + /* + * If a fabric driver ->write_pending() or ->queue_data_in() callback + * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and + * the same callbacks should not be retried. Return CHECK_CONDITION + * if a scsi_status is not already set. + * + * If a fabric driver ->queue_status() has returned non zero, always + * keep retrying no matter what.. + */ + if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { + if (cmd->scsi_status) + goto queue_status; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { - trace_target_cmd_complete(cmd); - ret = cmd->se_tfo->queue_status(cmd); - goto out; + cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; + cmd->scsi_status = SAM_STAT_CHECK_CONDITION; + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; + translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + goto queue_status; } + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + goto queue_status; + switch (cmd->data_direction) { case DMA_FROM_DEVICE: if (cmd->scsi_status) @@ -2007,19 +2024,33 @@ queue_status: break; } -out: if (ret < 0) { - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } -static void transport_handle_queue_full( - struct se_cmd *cmd, - struct se_device *dev) +static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, + int err, bool write_pending) { + /* + * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or + * ->queue_data_in() callbacks from new process context. + * + * Otherwise for other errors, transport_complete_qf() will send + * CHECK_CONDITION via ->queue_status() instead of attempting to + * retry associated fabric driver data-transfer callbacks. + */ + if (err == -EAGAIN || err == -ENOMEM) { + cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : + TRANSPORT_COMPLETE_QF_OK; + } else { + pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); + cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; + } + spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc_mb(&dev->dev_qf_count); @@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work) WARN_ON(!cmd->scsi_status); ret = transport_send_check_condition_and_sense( cmd, 0, 1); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work) } else if (rc) { ret = transport_send_check_condition_and_sense(cmd, rc, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2134,7 +2165,7 @@ queue_rsp: if (target_read_prot_action(cmd)) { ret = transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; transport_lun_remove_cmd(cmd); @@ -2144,7 +2175,7 @@ queue_rsp: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; case DMA_TO_DEVICE: @@ -2157,7 +2188,7 @@ queue_rsp: atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.tx_data_octets); ret = cmd->se_tfo->queue_data_in(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; } @@ -2166,7 +2197,7 @@ queue_rsp: queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; break; default: @@ -2180,8 +2211,8 @@ queue_status: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - cmd->t_state = TRANSPORT_COMPLETE_QF_OK; - transport_handle_queue_full(cmd, cmd->se_dev); + + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } void target_free_sgl(struct scatterlist *sgl, int nents) @@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) + if (ret) goto queue_full; - /* fabric drivers should only return -EAGAIN or -ENOMEM as error */ - WARN_ON(ret); - - return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return 0; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); @@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd) int ret; ret = cmd->se_tfo->write_pending(cmd); - if (ret == -EAGAIN || ret == -ENOMEM) { + if (ret) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); - transport_handle_queue_full(cmd, cmd->se_dev); + transport_handle_queue_full(cmd, cmd->se_dev, ret, true); } } @@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { + int ret; + assert_spin_locked(&cmd->t_state_lock); WARN_ON_ONCE(!irqs_disabled()); @@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) trace_target_cmd_complete(cmd); spin_unlock_irq(&cmd->t_state_lock); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); spin_lock_irq(&cmd->t_state_lock); return 1; @@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { unsigned long flags; + int ret; spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) { @@ -3090,7 +3122,9 @@ send_abort: cmd->t_task_cdb[0], cmd->tag); trace_target_cmd_complete(cmd); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } static void target_tmr_work(struct work_struct *work) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c6874c38a10b..f615c3bbb73e 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd) DATA_BLOCK_BITS); } -static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap, - struct scatterlist *data_sg, unsigned int data_nents) +static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, + bool bidi) { + struct se_cmd *se_cmd = cmd->se_cmd; int i, block; int block_remaining = 0; void *from, *to; size_t copy_bytes, from_offset; - struct scatterlist *sg; + struct scatterlist *sg, *data_sg; + unsigned int data_nents; + DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); + + bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); + + if (!bidi) { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } else { + uint32_t count; + + /* + * For bidi case, the first count blocks are for Data-Out + * buffer blocks, and before gathering the Data-In buffer + * the Data-Out buffer blocks should be discarded. + */ + count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); + while (count--) { + block = find_first_bit(bitmap, DATA_BLOCK_BITS); + clear_bit(block, bitmap); + } + + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } for_each_sg(data_sg, sg, data_nents, i) { int sg_remaining = sg->length; to = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - block = find_first_bit(cmd_bitmap, + block = find_first_bit(bitmap, DATA_BLOCK_BITS); block_remaining = DATA_BLOCK_SIZE; - clear_bit(block, cmd_bitmap); + clear_bit(block, bitmap); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); @@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d return true; } +static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) +{ + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); + data_length += round_up(se_cmd->t_bidi_data_sg->length, + DATA_BLOCK_SIZE); + } + + return data_length; +} + +static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) +{ + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); + + return data_length / DATA_BLOCK_SIZE; +} + static sense_reason_t tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) { @@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) uint32_t cmd_head; uint64_t cdb_off; bool copy_to_data_area; - size_t data_length; + size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS); if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) @@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) * expensive to tell how many regions are freed in the bitmap */ base_command_size = max(offsetof(struct tcmu_cmd_entry, - req.iov[se_cmd->t_bidi_data_nents + - se_cmd->t_data_nents]), + req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]), sizeof(struct tcmu_cmd_entry)); command_size = base_command_size + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); @@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) mb = udev->mb_addr; cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ - data_length = se_cmd->data_length; - if (se_cmd->se_cmd_flags & SCF_BIDI) { - BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); - data_length += se_cmd->t_bidi_data_sg->length; - } if ((command_size > (udev->cmdr_size / 2)) || data_length > udev->data_size) { pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " @@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) entry->req.iov_dif_cnt = 0; /* Handle BIDI commands */ - iov_cnt = 0; - alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, - se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); - entry->req.iov_bidi_cnt = iov_cnt; - + if (se_cmd->se_cmd_flags & SCF_BIDI) { + iov_cnt = 0; + iov++; + alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, + se_cmd->t_bidi_data_nents, &iov, &iov_cnt, + false); + entry->req.iov_bidi_cnt = iov_cnt; + } /* cmd's data_bitmap is what changed in process */ bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS); @@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * se_cmd->scsi_sense_length); free_data_area(udev, cmd); } else if (se_cmd->se_cmd_flags & SCF_BIDI) { - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); - /* Get Data-In buffer before clean up */ - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); - gather_data_area(udev, bitmap, - se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); + gather_data_area(udev, cmd, true); free_data_area(udev, cmd); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS); - - bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS); - gather_data_area(udev, bitmap, - se_cmd->t_data_sg, se_cmd->t_data_nents); + gather_data_area(udev, cmd, false); free_data_area(udev, cmd); } else if (se_cmd->data_direction == DMA_TO_DEVICE) { free_data_area(udev, cmd); @@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag if (ret < 0) return ret; - if (!val) { - pr_err("Illegal value for cmd_time_out\n"); - return -EINVAL; - } - udev->cmd_time_out = val * MSEC_PER_SEC; return count; } diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index b0500a0a87b8..e4603b09863a 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -492,6 +492,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) } /** + * tty_ldisc_restore - helper for tty ldisc change + * @tty: tty to recover + * @old: previous ldisc + * + * Restore the previous line discipline or N_TTY when a line discipline + * change fails due to an open error + */ + +static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) +{ + struct tty_ldisc *new_ldisc; + int r; + + /* There is an outstanding reference here so this is safe */ + old = tty_ldisc_get(tty, old->ops->num); + WARN_ON(IS_ERR(old)); + tty->ldisc = old; + tty_set_termios_ldisc(tty, old->ops->num); + if (tty_ldisc_open(tty, old) < 0) { + tty_ldisc_put(old); + /* This driver is always present */ + new_ldisc = tty_ldisc_get(tty, N_TTY); + if (IS_ERR(new_ldisc)) + panic("n_tty: get"); + tty->ldisc = new_ldisc; + tty_set_termios_ldisc(tty, N_TTY); + r = tty_ldisc_open(tty, new_ldisc); + if (r < 0) + panic("Couldn't open N_TTY ldisc for " + "%s --- error %d.", + tty_name(tty), r); + } +} + +/** * tty_set_ldisc - set line discipline * @tty: the terminal to set * @ldisc: the line discipline @@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) int tty_set_ldisc(struct tty_struct *tty, int disc) { - int retval, old_disc; + int retval; + struct tty_ldisc *old_ldisc, *new_ldisc; + + new_ldisc = tty_ldisc_get(tty, disc); + if (IS_ERR(new_ldisc)) + return PTR_ERR(new_ldisc); tty_lock(tty); retval = tty_ldisc_lock(tty, 5 * HZ); @@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) } /* Check the no-op case */ - old_disc = tty->ldisc->ops->num; - if (old_disc == disc) + if (tty->ldisc->ops->num == disc) goto out; if (test_bit(TTY_HUPPED, &tty->flags)) { @@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) goto out; } - retval = tty_ldisc_reinit(tty, disc); + old_ldisc = tty->ldisc; + + /* Shutdown the old discipline. */ + tty_ldisc_close(tty, old_ldisc); + + /* Now set up the new line discipline. */ + tty->ldisc = new_ldisc; + tty_set_termios_ldisc(tty, disc); + + retval = tty_ldisc_open(tty, new_ldisc); if (retval < 0) { /* Back to the old one or N_TTY if we can't */ - if (tty_ldisc_reinit(tty, old_disc) < 0) { - pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); - if (tty_ldisc_reinit(tty, N_TTY) < 0) { - /* At this point we have tty->ldisc == NULL. */ - pr_err("tty: reinitializing N_TTY failed\n"); - } - } + tty_ldisc_put(new_ldisc); + tty_ldisc_restore(tty, old_ldisc); } - if (tty->ldisc && tty->ldisc->ops->num != old_disc && - tty->ops->set_ldisc) { + if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { down_read(&tty->termios_rwsem); tty->ops->set_ldisc(tty); up_read(&tty->termios_rwsem); } + /* At this point we hold a reference to the new ldisc and a + reference to the old ldisc, or we hold two references to + the old ldisc (if it was restored as part of error cleanup + above). In either case, releasing a single reference from + the old ldisc is correct. */ + new_ldisc = old_ldisc; out: tty_ldisc_unlock(tty); @@ -553,6 +601,7 @@ out: already running */ tty_buffer_restart_work(tty->port); err: + tty_ldisc_put(new_ldisc); /* drop the extra reference */ tty_unlock(tty); return retval; } @@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) int retval; ld = tty_ldisc_get(tty, disc); - if (IS_ERR(ld)) + if (IS_ERR(ld)) { + BUG_ON(disc == N_TTY); return PTR_ERR(ld); + } if (tty->ldisc) { tty_ldisc_close(tty, tty->ldisc); @@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) tty_set_termios_ldisc(tty, disc); retval = tty_ldisc_open(tty, tty->ldisc); if (retval) { - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; + if (!WARN_ON(disc == N_TTY)) { + tty_ldisc_put(tty->ldisc); + tty->ldisc = NULL; + } } return retval; } diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index d2351139342f..a82e2bd5ea34 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu) usb_ep_free_request(fu->ep_in, fu->bot_req_in); usb_ep_free_request(fu->ep_out, fu->bot_req_out); usb_ep_free_request(fu->ep_out, fu->cmd.req); - usb_ep_free_request(fu->ep_out, fu->bot_status.req); + usb_ep_free_request(fu->ep_in, fu->bot_status.req); kfree(fu->cmd.buf); diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 8c4dc1e1f94f..b827a8113e26 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -10,6 +10,7 @@ #include <linux/efi.h> #include <linux/errno.h> #include <linux/fb.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/screen_info.h> #include <video/vga.h> @@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = { }; ATTRIBUTE_GROUPS(efifb); +static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */ + static int efifb_probe(struct platform_device *dev) { struct fb_info *info; @@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev) unsigned int size_total; char *option = NULL; - if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled) return -ENODEV; if (fb_get_options("efifb", &option)) @@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = { }; builtin_platform_driver(efifb_driver); + +#if defined(CONFIG_PCI) && !defined(CONFIG_X86) + +static bool pci_bar_found; /* did we find a BAR matching the efifb base? */ + +static void claim_efifb_bar(struct pci_dev *dev, int idx) +{ + u16 word; + + pci_bar_found = true; + + pci_read_config_word(dev, PCI_COMMAND, &word); + if (!(word & PCI_COMMAND_MEMORY)) { + pci_dev_disabled = true; + dev_err(&dev->dev, + "BAR %d: assigned to efifb but device is disabled!\n", + idx); + return; + } + + if (pci_claim_resource(dev, idx)) { + pci_dev_disabled = true; + dev_err(&dev->dev, + "BAR %d: failed to claim resource for efifb!\n", idx); + return; + } + + dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx); +} + +static void efifb_fixup_resources(struct pci_dev *dev) +{ + u64 base = screen_info.lfb_base; + u64 size = screen_info.lfb_size; + int i; + + if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + return; + + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + base |= (u64)screen_info.ext_lfb_base << 32; + + if (!base) + return; + + for (i = 0; i < PCI_STD_RESOURCE_END; i++) { + struct resource *res = &dev->resource[i]; + + if (!(res->flags & IORESOURCE_MEM)) + continue; + + if (res->start <= base && res->end >= base + size - 1) { + claim_efifb_bar(dev, i); + break; + } + } +} +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, + 16, efifb_fixup_resources); + +#endif diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index 1abba07b84b3..f4cbfb3b8a09 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev) return 0; } -static void check_required_callbacks(struct omapfb_device *fbdev) -{ -#define _C(x) (fbdev->ctrl->x != NULL) -#define _P(x) (fbdev->panel->x != NULL) - BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL); - BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) && - _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) && - _P(init) && _P(cleanup) && _P(enable) && _P(disable) && - _P(get_caps))); -#undef _P -#undef _C -} - /* * Called by LDM binding to probe and attach a new device. * Initialization sequence: @@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev, omapfb_ops.fb_mmap = omapfb_mmap; init_state++; - check_required_callbacks(fbdev); - r = planes_init(fbdev); if (r) goto cleanup; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index bd017b57c47f..f599520374dd 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client, par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat"); if (IS_ERR(par->vbat_reg)) { - dev_err(&client->dev, "failed to get VBAT regulator: %ld\n", - PTR_ERR(par->vbat_reg)); ret = PTR_ERR(par->vbat_reg); - goto fb_alloc_error; + if (ret == -ENODEV) { + par->vbat_reg = NULL; + } else { + dev_err(&client->dev, "failed to get VBAT regulator: %d\n", + ret); + goto fb_alloc_error; + } } if (of_property_read_u32(node, "solomon,width", &par->width)) @@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client, udelay(4); } - ret = regulator_enable(par->vbat_reg); - if (ret) { - dev_err(&client->dev, "failed to enable VBAT: %d\n", ret); - goto reset_oled_error; + if (par->vbat_reg) { + ret = regulator_enable(par->vbat_reg); + if (ret) { + dev_err(&client->dev, "failed to enable VBAT: %d\n", + ret); + goto reset_oled_error; + } } ret = ssd1307fb_init(par); @@ -710,7 +717,8 @@ panel_init_error: pwm_put(par->pwm); }; regulator_enable_error: - regulator_disable(par->vbat_reg); + if (par->vbat_reg) + regulator_disable(par->vbat_reg); reset_oled_error: fb_deferred_io_cleanup(info); fb_alloc_error: diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index d0115a7af0a9..3ee309c50b2d 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: -InitWait: xenbus_switch_state(dev, XenbusStateConnected); break; @@ -654,7 +653,8 @@ InitWait: * get Connected twice here. */ if (dev->state != XenbusStateConnected) - goto InitWait; /* no InitWait seen yet, fudge it */ + /* no InitWait seen yet, fudge it */ + xenbus_switch_state(dev, XenbusStateConnected); if (xenbus_read_unsigned(info->xbdev->otherend, "request-update", 0)) diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 400d70b69379..48230a5e12f2 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + if (drv->validate) { + err = drv->validate(dev); + if (err) + goto err; + } + err = virtio_finalize_features(dev); if (err) goto err; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 590534910dc6..698d5d06fa03 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i; - synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0)); - for (i = 1; i < vp_dev->msix_vectors; i++) + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i)); } @@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque) static irqreturn_t vp_vring_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; irqreturn_t ret = IRQ_NONE; - struct virtqueue *vq; + unsigned long flags; - list_for_each_entry(vq, &vp_dev->vdev.vqs, list) { - if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED) + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) ret = IRQ_HANDLED; } + spin_unlock_irqrestore(&vp_dev->lock, flags); return ret; } @@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return vp_vring_interrupt(irq, opaque); } -static void vp_remove_vqs(struct virtio_device *vdev) +static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors, struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - struct virtqueue *vq, *n; + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; - list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->msix_vector_map) { - int v = vp_dev->msix_vector_map[vq->index]; + vp_dev->msix_vectors = nvectors; - if (v != VIRTIO_MSI_NO_VECTOR) - free_irq(pci_irq_vector(vp_dev->pci_dev, v), - vq); - } - vp_dev->del_vq(vq); + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; + + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, + nvectors, PCI_IRQ_MSIX | + (desc ? PCI_IRQ_AFFINITY : 0), + desc); + if (err < 0) + goto error; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + v = vp_dev->config_vector(vp_dev, v); + /* Verify we had enough resources to assign the vector */ + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; } + + if (!per_vq_vectors) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(pci_irq_vector(vp_dev->pci_dev, v), + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + } + return 0; +error: + return err; +} + +static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); + struct virtqueue *vq; + unsigned long flags; + + /* fill out our structure that represents an active queue */ + if (!info) + return ERR_PTR(-ENOMEM); + + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, + msix_vec); + if (IS_ERR(vq)) + goto out_info; + + info->vq = vq; + if (callback) { + spin_lock_irqsave(&vp_dev->lock, flags); + list_add(&info->node, &vp_dev->virtqueues); + spin_unlock_irqrestore(&vp_dev->lock, flags); + } else { + INIT_LIST_HEAD(&info->node); + } + + vp_dev->vqs[index] = info; + return vq; + +out_info: + kfree(info); + return vq; +} + +static void vp_del_vq(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); + + vp_dev->del_vq(info); + kfree(info); } /* the config->del_vqs() implementation */ void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq, *n; int i; - if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs))) - return; + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + if (vp_dev->per_vq_vectors) { + int v = vp_dev->vqs[vq->index]->msix_vector; - vp_remove_vqs(vdev); + if (v != VIRTIO_MSI_NO_VECTOR) { + int irq = pci_irq_vector(vp_dev->pci_dev, v); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, vq); + } + } + vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } - if (vp_dev->pci_dev->msix_enabled) { - for (i = 0; i < vp_dev->msix_vectors; i++) + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev); + + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) free_cpumask_var(vp_dev->msix_affinity_masks[i]); + if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); - kfree(vp_dev->msix_affinity_masks); - kfree(vp_dev->msix_names); - kfree(vp_dev->msix_vector_map); + pci_free_irq_vectors(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; } - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); - pci_free_irq_vectors(vp_dev->pci_dev); + vp_dev->msix_vectors = 0; + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; + kfree(vp_dev->vqs); + vp_dev->vqs = NULL; } static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], struct irq_affinity *desc) + const char * const names[], bool per_vq_vectors, + struct irq_affinity *desc) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - int i, j, err = -ENOMEM, allocated_vectors, nvectors; - unsigned flags = PCI_IRQ_MSIX; - bool shared = false; u16 msix_vec; + int i, err, nvectors, allocated_vectors; - if (desc) { - flags |= PCI_IRQ_AFFINITY; - desc->pre_vectors++; /* virtio config vector */ - } - - nvectors = 1; - for (i = 0; i < nvqs; i++) - if (callbacks[i]) - nvectors++; - - /* Try one vector per queue first. */ - err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, - nvectors, flags, desc); - if (err < 0) { - /* Fallback to one vector for config, one shared for queues. */ - shared = true; - err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2, - PCI_IRQ_MSIX); - if (err < 0) - return err; - } - if (err < 0) - return err; - - vp_dev->msix_vectors = nvectors; - vp_dev->msix_names = kmalloc_array(nvectors, - sizeof(*vp_dev->msix_names), GFP_KERNEL); - if (!vp_dev->msix_names) - goto out_free_irq_vectors; - - vp_dev->msix_affinity_masks = kcalloc(nvectors, - sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL); - if (!vp_dev->msix_affinity_masks) - goto out_free_msix_names; + vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; - for (i = 0; i < nvectors; ++i) { - if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], - GFP_KERNEL)) - goto out_free_msix_affinity_masks; + if (per_vq_vectors) { + /* Best option: one for change interrupt, one per vq. */ + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + /* Second best: one for change, shared for all vqs. */ + nvectors = 2; } - /* Set the vector used for configuration */ - snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names), - "%s-config", name); - err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed, - 0, vp_dev->msix_names[0], vp_dev); + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors, + per_vq_vectors ? desc : NULL); if (err) - goto out_free_msix_affinity_masks; + goto error_find; - /* Verify we had enough resources to assign the vector */ - if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto out_free_config_irq; - } - - vp_dev->msix_vector_map = kmalloc_array(nvqs, - sizeof(*vp_dev->msix_vector_map), GFP_KERNEL); - if (!vp_dev->msix_vector_map) - goto out_disable_config_irq; - - allocated_vectors = j = 1; /* vector 0 is the config interrupt */ + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - if (callbacks[i]) - msix_vec = allocated_vectors; - else + if (!callbacks[i]) msix_vec = VIRTIO_MSI_NO_VECTOR; - - vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], - msix_vec); + else if (vp_dev->per_vq_vectors) + msix_vec = allocated_vectors++; + else + msix_vec = VP_MSIX_VQ_VECTOR; + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + msix_vec); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_remove_vqs; + goto error_find; } - if (msix_vec == VIRTIO_MSI_NO_VECTOR) { - vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) continue; - } - snprintf(vp_dev->msix_names[j], - sizeof(*vp_dev->msix_names), "%s-%s", + /* allocate per-vq irq if available and necessary */ + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", dev_name(&vp_dev->vdev.dev), names[i]); err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), - vring_interrupt, IRQF_SHARED, - vp_dev->msix_names[j], vqs[i]); - if (err) { - /* don't free this irq on error */ - vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; - goto out_remove_vqs; - } - vp_dev->msix_vector_map[i] = msix_vec; - j++; - - /* - * Use a different vector for each queue if they are available, - * else share the same vector for all VQs. - */ - if (!shared) - allocated_vectors++; + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) + goto error_find; } - return 0; -out_remove_vqs: - vp_remove_vqs(vdev); - kfree(vp_dev->msix_vector_map); -out_disable_config_irq: - vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); -out_free_config_irq: - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); -out_free_msix_affinity_masks: - for (i = 0; i < nvectors; i++) { - if (vp_dev->msix_affinity_masks[i]) - free_cpumask_var(vp_dev->msix_affinity_masks[i]); - } - kfree(vp_dev->msix_affinity_masks); -out_free_msix_names: - kfree(vp_dev->msix_names); -out_free_irq_vectors: - pci_free_irq_vectors(vp_dev->pci_dev); +error_find: + vp_del_vqs(vdev); return err; } @@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtio_pci_device *vp_dev = to_vp_device(vdev); int i, err; + vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vp_dev); if (err) - return err; + goto out_del_vqs; + vp_dev->intx_enabled = 1; + vp_dev->per_vq_vectors = false; for (i = 0; i < nvqs; ++i) { if (!names[i]) { vqs[i] = NULL; continue; } - vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i], - VIRTIO_MSI_NO_VECTOR); + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], + VIRTIO_MSI_NO_VECTOR); if (IS_ERR(vqs[i])) { err = PTR_ERR(vqs[i]); - goto out_remove_vqs; + goto out_del_vqs; } } return 0; - -out_remove_vqs: - vp_remove_vqs(vdev); - free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev); +out_del_vqs: + vp_del_vqs(vdev); return err; } @@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, { int err; - err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc); + /* Try MSI-X with one vector per queue. */ + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc); if (!err) return 0; + /* Fallback: MSI-X with one vector for config, one shared for queues. */ + err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names); } @@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) { struct virtio_device *vdev = vq->vdev; struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + struct cpumask *mask; + unsigned int irq; if (!vq->callback) return -EINVAL; - if (vp_dev->pci_dev->msix_enabled) { - int vec = vp_dev->msix_vector_map[vq->index]; - struct cpumask *mask = vp_dev->msix_affinity_masks[vec]; - unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec); - + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector); if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { @@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - unsigned int *map = vp_dev->msix_vector_map; - if (!map || map[index] == VIRTIO_MSI_NO_VECTOR) + if (!vp_dev->per_vq_vectors || + vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR) return NULL; - return pci_irq_get_affinity(vp_dev->pci_dev, map[index]); + return pci_irq_get_affinity(vp_dev->pci_dev, + vp_dev->vqs[index]->msix_vector); } #ifdef CONFIG_PM_SLEEP @@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); /* enable the device */ rc = pci_enable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index ac8c9d788964..e96334aec1e0 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -31,6 +31,17 @@ #include <linux/highmem.h> #include <linux/spinlock.h> +struct virtio_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + /* Our device structure */ struct virtio_pci_device { struct virtio_device vdev; @@ -64,25 +75,47 @@ struct virtio_pci_device { /* the IO mapping for the PCI config space */ void __iomem *ioaddr; + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* array of all queues for house-keeping */ + struct virtio_pci_vq_info **vqs; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; - /* Total Number of MSI-X vectors (including per-VQ ones). */ - int msix_vectors; - /* Map of per-VQ MSI-X vectors, may be NULL */ - unsigned *msix_vector_map; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Whether we have vector per vq */ + bool per_vq_vectors; struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned idx, void (*callback)(struct virtqueue *vq), const char *name, u16 msix_vec); - void (*del_vq)(struct virtqueue *vq); + void (*del_vq)(struct virtio_pci_vq_info *info); u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); }; +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index f7362c5fe18a..4bfa48fb1324 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) return ERR_PTR(-ENOENT); + info->msix_vector = msix_vec; + /* create the vring */ vq = vring_create_virtqueue(index, num, VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, @@ -159,13 +162,14 @@ out_deactivate: return ERR_PTR(err); } -static void del_vq(struct virtqueue *vq) +static void del_vq(struct virtio_pci_vq_info *info) { + struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 7bc3004b840e..8978f109d2d7 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) } static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, unsigned index, void (*callback)(struct virtqueue *vq), const char *name, @@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* get offset of notification word for this vq */ off = vp_ioread16(&cfg->queue_notify_off); + info->msix_vector = msix_vec; + /* create the vring */ vq = vring_create_virtqueue(index, num, SMP_CACHE_BYTES, &vp_dev->vdev, @@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, return 0; } -static void del_vq(struct virtqueue *vq) +static void del_vq(struct virtio_pci_vq_info *info) { + struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); vp_iowrite16(vq->index, &vp_dev->common->queue_select); - if (vp_dev->pci_dev->msix_enabled) { + if (vp_dev->msix_enabled) { vp_iowrite16(VIRTIO_MSI_NO_VECTOR, &vp_dev->common->queue_msix_vector); /* Flush the write out to device */ diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 1f4733b80c87..f3b089b7c0b6 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type, return xenbus_command_reply(u, XS_ERROR, "ENOENT"); rc = xenbus_dev_request_and_reply(&u->u.msg, u); - if (rc) + if (rc && trans) { + list_del(&trans->list); kfree(trans); + } out: return rc; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a18510be76c1..5e71f1ea3391 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7910,7 +7910,6 @@ struct btrfs_retry_complete { static void btrfs_retry_endio_nocsum(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; - struct inode *inode; struct bio_vec *bvec; int i; @@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) goto end; ASSERT(bio->bi_vcnt == 1); - inode = bio->bi_io_vec->bv_page->mapping->host; - ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); + ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode)); done->uptodate = 1; bio_for_each_segment_all(bvec, bio, i) - clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); + clean_io_failure(BTRFS_I(done->inode), done->start, + bvec->bv_page, 0); end: complete(&done->done); bio_put(bio); @@ -7973,8 +7972,10 @@ next_block_or_try_again: start += sectorsize; - if (nr_sectors--) { + nr_sectors--; + if (nr_sectors) { pgoff += sectorsize; + ASSERT(pgoff < PAGE_SIZE); goto next_block_or_try_again; } } @@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); - struct inode *inode; struct bio_vec *bvec; - u64 start; int uptodate; int ret; int i; @@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio) uptodate = 1; - start = done->start; - ASSERT(bio->bi_vcnt == 1); - inode = bio->bi_io_vec->bv_page->mapping->host; - ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode)); + ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode)); bio_for_each_segment_all(bvec, bio, i) { ret = __readpage_endio_check(done->inode, io_bio, i, @@ -8080,8 +8076,10 @@ next: ASSERT(nr_sectors); - if (--nr_sectors) { + nr_sectors--; + if (nr_sectors) { pgoff += sectorsize; + ASSERT(pgoff < PAGE_SIZE); goto next_block; } } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index da687dc79cce..9530a333d302 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, case Opt_ssd: btrfs_set_and_info(info, SSD, "use ssd allocation scheme"); + btrfs_clear_opt(info->mount_opt, NOSSD); break; case Opt_ssd_spread: btrfs_set_and_info(info, SSD_SPREAD, "use spread ssd allocation scheme"); btrfs_set_opt(info->mount_opt, SSD); + btrfs_clear_opt(info->mount_opt, NOSSD); break; case Opt_nossd: btrfs_set_and_info(info, NOSSD, "not using ssd allocation scheme"); btrfs_clear_opt(info->mount_opt, SSD); + btrfs_clear_opt(info->mount_opt, SSD_SPREAD); break; case Opt_barrier: btrfs_clear_and_info(info, NOBARRIER, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 73d56eef5e60..ab8a66d852f9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio, for (dev_nr = 0; dev_nr < total_devs; dev_nr++) { dev = bbio->stripes[dev_nr].dev; if (!dev || !dev->bdev || - (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) { + (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) { bbio_error(bbio, first_bio, logical); continue; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 15e1db8738ae..dd3f5fabfdf6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -972,6 +972,86 @@ out: return rc; } +ssize_t cifs_file_copychunk_range(unsigned int xid, + struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags) +{ + struct inode *src_inode = file_inode(src_file); + struct inode *target_inode = file_inode(dst_file); + struct cifsFileInfo *smb_file_src; + struct cifsFileInfo *smb_file_target; + struct cifs_tcon *src_tcon; + struct cifs_tcon *target_tcon; + ssize_t rc; + + cifs_dbg(FYI, "copychunk range\n"); + + if (src_inode == target_inode) { + rc = -EINVAL; + goto out; + } + + if (!src_file->private_data || !dst_file->private_data) { + rc = -EBADF; + cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); + goto out; + } + + rc = -EXDEV; + smb_file_target = dst_file->private_data; + smb_file_src = src_file->private_data; + src_tcon = tlink_tcon(smb_file_src->tlink); + target_tcon = tlink_tcon(smb_file_target->tlink); + + if (src_tcon->ses != target_tcon->ses) { + cifs_dbg(VFS, "source and target of copy not on same server\n"); + goto out; + } + + /* + * Note: cifs case is easier than btrfs since server responsible for + * checks for proper open modes and file type and if it wants + * server could even support copy of range where source = target + */ + lock_two_nondirectories(target_inode, src_inode); + + cifs_dbg(FYI, "about to flush pages\n"); + /* should we flush first and last page first */ + truncate_inode_pages(&target_inode->i_data, 0); + + if (target_tcon->ses->server->ops->copychunk_range) + rc = target_tcon->ses->server->ops->copychunk_range(xid, + smb_file_src, smb_file_target, off, len, destoff); + else + rc = -EOPNOTSUPP; + + /* force revalidate of size and timestamps of target file now + * that target is updated on the server + */ + CIFS_I(target_inode)->time = 0; + /* although unlocking in the reverse order from locking is not + * strictly necessary here it is a little cleaner to be consistent + */ + unlock_two_nondirectories(src_inode, target_inode); + +out: + return rc; +} + +static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags) +{ + unsigned int xid = get_xid(); + ssize_t rc; + + rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, + len, flags); + free_xid(xid); + return rc; +} + const struct file_operations cifs_file_ops = { .read_iter = cifs_loose_read_iter, .write_iter = cifs_file_write_iter, @@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = { .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, @@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .splice_read = generic_file_splice_read, .llseek = cifs_llseek, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .setlease = cifs_setlease, .fallocate = cifs_fallocate, @@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { .mmap = cifs_file_mmap, .splice_read = generic_file_splice_read, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = cifs_llseek, .setlease = cifs_setlease, @@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = { .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, + .copy_file_range = cifs_copy_file_range, .clone_file_range = cifs_clone_file_range, .llseek = generic_file_llseek, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index da717fee3026..30bf89b1fd9a 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -139,6 +139,11 @@ extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); # define cifs_listxattr NULL #endif +extern ssize_t cifs_file_copychunk_range(unsigned int xid, + struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, + size_t len, unsigned int flags); + extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); #ifdef CONFIG_CIFS_NFSD_EXPORT extern const struct export_operations cifs_export_ops; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d42dd3288647..37f5a41cc50c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -243,6 +243,7 @@ struct smb_version_operations { /* verify the message */ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); + int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); void (*downgrade_oplock)(struct TCP_Server_Info *, struct cifsInodeInfo *, bool); /* process transaction2 response */ @@ -407,9 +408,10 @@ struct smb_version_operations { char * (*create_lease_buf)(u8 *, u8); /* parse lease context buffer and return oplock/epoch info */ __u8 (*parse_lease_buf)(void *, unsigned int *); - int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file, - struct cifsFileInfo *target_file, u64 src_off, u64 len, - u64 dest_off); + ssize_t (*copychunk_range)(const unsigned int, + struct cifsFileInfo *src_file, + struct cifsFileInfo *target_file, + u64 src_off, u64 len, u64 dest_off); int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, struct cifsFileInfo *target_file, u64 src_off, u64 len, u64 dest_off); @@ -946,7 +948,6 @@ struct cifs_tcon { bool use_persistent:1; /* use persistent instead of durable handles */ #ifdef CONFIG_CIFS_SMB2 bool print:1; /* set if connection to printer share */ - bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */ __le32 capabilities; __u32 share_flags; __u32 maximal_access; @@ -1343,6 +1344,7 @@ struct mid_q_entry { void *callback_data; /* general purpose pointer for callback */ void *resp_buf; /* pointer to received SMB header */ int mid_state; /* wish this were enum but can not pass to wait_event */ + unsigned int mid_flags; __le16 command; /* smb command code */ bool large_buf:1; /* if valid response, is pointer to large buf */ bool multiRsp:1; /* multiple trans2 responses for one request */ @@ -1350,6 +1352,12 @@ struct mid_q_entry { bool decrypted:1; /* decrypted entry */ }; +struct close_cancelled_open { + struct cifs_fid fid; + struct cifs_tcon *tcon; + struct work_struct work; +}; + /* Make code in transport.c a little cleaner by moving update of optional stats into function below */ #ifdef CONFIG_CIFS_STATS2 @@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, #define MID_RESPONSE_MALFORMED 0x10 #define MID_SHUTDOWN 0x20 +/* Flags */ +#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ + /* Types of response buffer returned from SendReceive2 */ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ #define CIFS_SMALL_BUFFER 1 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 066950671929..5d21f00ae341 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) length = cifs_discard_remaining_data(server); dequeue_mid(mid, rdata->result); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; return length; } @@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; return length; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9ae695ae3ed7..d82467cfb0e2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p) server->lstrp = jiffies; if (mid_entry != NULL) { + if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) && + mid_entry->mid_state == MID_RESPONSE_RECEIVED && + server->ops->handle_cancelled_mid) + server->ops->handle_cancelled_mid( + mid_entry->resp_buf, + server); + if (!mid_entry->multiRsp || mid_entry->multiEnd) mid_entry->callback(mid_entry); - } else if (!server->ops->is_oplock_break || - !server->ops->is_oplock_break(buf, server)) { + } else if (server->ops->is_oplock_break && + server->ops->is_oplock_break(buf, server)) { + cifs_dbg(FYI, "Received oplock break\n"); + } else { cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", buf, @@ -3744,6 +3753,9 @@ try_mount_again: if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; + if (rc == -EACCES) + goto mount_fail_check; + goto remote_path_check; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index aa3debbba826..21d404535739 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, wdata->credits = credits; if (!wdata->cfile->invalidHandle || - !cifs_reopen_file(wdata->cfile, false)) + !(rc = cifs_reopen_file(wdata->cfile, false))) rc = server->ops->async_writev(wdata, cifs_uncached_writedata_release); if (rc) { @@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, rdata->credits = credits; if (!rdata->cfile->invalidHandle || - !cifs_reopen_file(rdata->cfile, true)) + !(rc = cifs_reopen_file(rdata->cfile, true))) rc = server->ops->async_readv(rdata); error: if (rc) { @@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, } if (!rdata->cfile->invalidHandle || - !cifs_reopen_file(rdata->cfile, true)) + !(rc = cifs_reopen_file(rdata->cfile, true))) rc = server->ops->async_readv(rdata); if (rc) { add_credits_and_wake_if(server, rdata->credits, 0); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 001528781b6b..265c45fe4ea5 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -34,71 +34,14 @@ #include "cifs_ioctl.h" #include <linux/btrfs.h> -static int cifs_file_clone_range(unsigned int xid, struct file *src_file, - struct file *dst_file) -{ - struct inode *src_inode = file_inode(src_file); - struct inode *target_inode = file_inode(dst_file); - struct cifsFileInfo *smb_file_src; - struct cifsFileInfo *smb_file_target; - struct cifs_tcon *src_tcon; - struct cifs_tcon *target_tcon; - int rc; - - cifs_dbg(FYI, "ioctl clone range\n"); - - if (!src_file->private_data || !dst_file->private_data) { - rc = -EBADF; - cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); - goto out; - } - - rc = -EXDEV; - smb_file_target = dst_file->private_data; - smb_file_src = src_file->private_data; - src_tcon = tlink_tcon(smb_file_src->tlink); - target_tcon = tlink_tcon(smb_file_target->tlink); - - if (src_tcon->ses != target_tcon->ses) { - cifs_dbg(VFS, "source and target of copy not on same server\n"); - goto out; - } - - /* - * Note: cifs case is easier than btrfs since server responsible for - * checks for proper open modes and file type and if it wants - * server could even support copy of range where source = target - */ - lock_two_nondirectories(target_inode, src_inode); - - cifs_dbg(FYI, "about to flush pages\n"); - /* should we flush first and last page first */ - truncate_inode_pages(&target_inode->i_data, 0); - - if (target_tcon->ses->server->ops->clone_range) - rc = target_tcon->ses->server->ops->clone_range(xid, - smb_file_src, smb_file_target, 0, src_inode->i_size, 0); - else - rc = -EOPNOTSUPP; - - /* force revalidate of size and timestamps of target file now - that target is updated on the server */ - CIFS_I(target_inode)->time = 0; - /* although unlocking in the reverse order from locking is not - strictly necessary here it is a little cleaner to be consistent */ - unlock_two_nondirectories(src_inode, target_inode); -out: - return rc; -} - -static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, +static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file, unsigned long srcfd) { int rc; struct fd src_file; struct inode *src_inode; - cifs_dbg(FYI, "ioctl clone range\n"); + cifs_dbg(FYI, "ioctl copychunk range\n"); /* the destination must be opened for writing */ if (!(dst_file->f_mode & FMODE_WRITE)) { cifs_dbg(FYI, "file target not open for write\n"); @@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, if (S_ISDIR(src_inode->i_mode)) goto out_fput; - rc = cifs_file_clone_range(xid, src_file.file, dst_file); + rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0, + src_inode->i_size, 0); out_fput: fdput(src_file); @@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) } break; case CIFS_IOC_COPYCHUNK_FILE: - rc = cifs_ioctl_clone(xid, filep, arg); + rc = cifs_ioctl_copychunk(xid, filep, arg); break; case CIFS_IOC_SET_INTEGRITY: if (pSMBFile == NULL) diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index fd516ea8b8f8..1a04b3a5beb1 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n"); return false; } + +void +smb2_cancelled_close_fid(struct work_struct *work) +{ + struct close_cancelled_open *cancelled = container_of(work, + struct close_cancelled_open, work); + + cifs_dbg(VFS, "Close unmatched open\n"); + + SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid, + cancelled->fid.volatile_fid); + cifs_put_tcon(cancelled->tcon); + kfree(cancelled); +} + +int +smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server) +{ + struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer); + struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer; + struct cifs_tcon *tcon; + struct close_cancelled_open *cancelled; + + if (sync_hdr->Command != SMB2_CREATE || + sync_hdr->Status != STATUS_SUCCESS) + return 0; + + cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL); + if (!cancelled) + return -ENOMEM; + + tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId, + sync_hdr->TreeId); + if (!tcon) { + kfree(cancelled); + return -ENOENT; + } + + cancelled->fid.persistent_fid = rsp->PersistentFileId; + cancelled->fid.volatile_fid = rsp->VolatileFileId; + cancelled->tcon = tcon; + INIT_WORK(&cancelled->work, smb2_cancelled_close_fid); + queue_work(cifsiod_wq, &cancelled->work); + + return 0; +} diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0231108d9387..152e37f2ad92 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -21,6 +21,7 @@ #include <linux/vfs.h> #include <linux/falloc.h> #include <linux/scatterlist.h> +#include <linux/uuid.h> #include <crypto/aead.h> #include "cifsglob.h" #include "smb2pdu.h" @@ -592,8 +593,8 @@ req_res_key_exit: return rc; } -static int -smb2_clone_range(const unsigned int xid, +static ssize_t +smb2_copychunk_range(const unsigned int xid, struct cifsFileInfo *srcfile, struct cifsFileInfo *trgtfile, u64 src_off, u64 len, u64 dest_off) @@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid, struct cifs_tcon *tcon; int chunks_copied = 0; bool chunk_sizes_updated = false; + ssize_t bytes_written, total_bytes_written = 0; pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL); if (pcchunk == NULL) return -ENOMEM; - cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n"); + cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n"); /* Request a key from the server to identify the source of the copy */ rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink), srcfile->fid.persistent_fid, @@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid, } chunks_copied++; - src_off += le32_to_cpu(retbuf->TotalBytesWritten); - dest_off += le32_to_cpu(retbuf->TotalBytesWritten); - len -= le32_to_cpu(retbuf->TotalBytesWritten); + bytes_written = le32_to_cpu(retbuf->TotalBytesWritten); + src_off += bytes_written; + dest_off += bytes_written; + len -= bytes_written; + total_bytes_written += bytes_written; - cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n", + cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n", le32_to_cpu(retbuf->ChunksWritten), le32_to_cpu(retbuf->ChunkBytesWritten), - le32_to_cpu(retbuf->TotalBytesWritten)); + bytes_written); } else if (rc == -EINVAL) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) goto cchunk_out; @@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid, cchunk_out: kfree(pcchunk); kfree(retbuf); - return rc; + if (rc) + return rc; + else + return total_bytes_written; } static int @@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = { .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = { .set_oplock_level = smb2_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .get_dfs_refer = smb2_get_dfs_refer, @@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = { .clear_stats = smb2_clear_stats, .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = { .set_oplock_level = smb21_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, .parse_lease_buf = smb2_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .wp_retry_size = smb2_wp_retry_size, .dir_needs_close = smb2_dir_needs_close, .enum_snapshots = smb3_enum_snapshots, @@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = { .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = { .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, .validate_negotiate = smb3_validate_negotiate, .wp_retry_size = smb2_wp_retry_size, @@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = { .print_stats = smb2_print_stats, .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, + .handle_cancelled_mid = smb2_handle_cancelled_mid, .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, @@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = { .set_oplock_level = smb3_set_oplock_level, .create_lease_buf = smb3_create_lease_buf, .parse_lease_buf = smb3_parse_lease_buf, - .clone_range = smb2_clone_range, + .copychunk_range = smb2_copychunk_range, .duplicate_extents = smb2_duplicate_extents, /* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */ .wp_retry_size = smb2_wp_retry_size, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 7446496850a3..02da648041fc 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) * but for time being this is our only auth choice so doesn't matter. * We just found a server which sets blob length to zero expecting raw. */ - if (blob_length == 0) + if (blob_length == 0) { cifs_dbg(FYI, "missing security blob on negprot\n"); + server->sec_ntlmssp = true; + } rc = cifs_enable_signing(server, ses->sign); if (rc) @@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, else return -EIO; - if (tcon && tcon->bad_network_name) - return -ENOENT; - unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL); if (unc_path == NULL) return -ENOMEM; @@ -1185,6 +1184,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, return -EINVAL; } + /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */ + if (tcon) + tcon->tid = 0; + rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); if (rc) { kfree(unc_path); @@ -1273,8 +1276,6 @@ tcon_exit: tcon_error_exit: if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); - if (tcon) - tcon->bad_network_name = true; } goto tcon_exit; } @@ -2177,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work) struct cifs_tcon *tcon, *tcon2; struct list_head tmp_list; int tcon_exist = false; + int rc; + int resched = false; + /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ mutex_lock(&server->reconnect_mutex); @@ -2204,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work) spin_unlock(&cifs_tcp_ses_lock); list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { - if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon)) + rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon); + if (!rc) cifs_reopen_persistent_handles(tcon); + else + resched = true; list_del_init(&tcon->rlist); cifs_put_tcon(tcon); } cifs_dbg(FYI, "Reconnecting tcons finished\n"); + if (resched) + queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); mutex_unlock(&server->reconnect_mutex); /* now we can safely release srv struct */ diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 69e35873b1de..6853454fc871 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst); extern struct mid_q_entry *smb2_setup_async_request( struct TCP_Server_Info *server, struct smb_rqst *rqst); +extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server, + __u64 ses_id); +extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, + __u64 ses_id, __u32 tid); extern int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server); extern int smb3_calc_signature(struct smb_rqst *rqst, @@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon, const u64 persistent_fid, const u64 volatile_fid, const __u8 oplock_level); +extern int smb2_handle_cancelled_mid(char *buffer, + struct TCP_Server_Info *server); +void smb2_cancelled_close_fid(struct work_struct *work); extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_file_id, u64 volatile_file_id, struct kstatfs *FSData); diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 7c3bb1bd7eed..506b67fc93d9 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) return 0; } -struct cifs_ses * -smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) +static struct cifs_ses * +smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id) { struct cifs_ses *ses; - spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (ses->Suid != ses_id) continue; - spin_unlock(&cifs_tcp_ses_lock); return ses; } + + return NULL; +} + +struct cifs_ses * +smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) +{ + struct cifs_ses *ses; + + spin_lock(&cifs_tcp_ses_lock); + ses = smb2_find_smb_ses_unlocked(server, ses_id); spin_unlock(&cifs_tcp_ses_lock); + return ses; +} + +static struct cifs_tcon * +smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid) +{ + struct cifs_tcon *tcon; + + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + if (tcon->tid != tid) + continue; + ++tcon->tc_count; + return tcon; + } + return NULL; } +/* + * Obtain tcon corresponding to the tid in the given + * cifs_ses + */ + +struct cifs_tcon * +smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) +{ + struct cifs_ses *ses; + struct cifs_tcon *tcon; + + spin_lock(&cifs_tcp_ses_lock); + ses = smb2_find_smb_ses_unlocked(server, ses_id); + if (!ses) { + spin_unlock(&cifs_tcp_ses_lock); + return NULL; + } + tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid); + spin_unlock(&cifs_tcp_ses_lock); + + return tcon; +} + int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 526f0533cb4e..f6e13a977fc8 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses, rc = wait_for_response(ses->server, midQ); if (rc != 0) { + cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid); send_cancel(ses->server, rqst, midQ); spin_lock(&GlobalMid_Lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { + midQ->mid_flags |= MID_WAIT_CANCELLED; midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); add_credits(ses->server, 1, optype); @@ -373,6 +373,22 @@ restart: } spin_lock_irq(&mapping->tree_lock); + if (!entry) { + /* + * We needed to drop the page_tree lock while calling + * radix_tree_preload() and we didn't have an entry to + * lock. See if another thread inserted an entry at + * our index during this time. + */ + entry = __radix_tree_lookup(&mapping->page_tree, index, + NULL, &slot); + if (entry) { + radix_tree_preload_end(); + spin_unlock_irq(&mapping->tree_lock); + goto restart; + } + } + if (pmd_downgrade) { radix_tree_delete(&mapping->page_tree, index); mapping->nrexceptional--; @@ -388,19 +404,12 @@ restart: if (err) { spin_unlock_irq(&mapping->tree_lock); /* - * Someone already created the entry? This is a - * normal failure when inserting PMDs in a range - * that already contains PTEs. In that case we want - * to return -EEXIST immediately. - */ - if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD)) - goto restart; - /* - * Our insertion of a DAX PMD entry failed, most - * likely because it collided with a PTE sized entry - * at a different index in the PMD range. We haven't - * inserted anything into the radix tree and have no - * waiters to wake. + * Our insertion of a DAX entry failed, most likely + * because we were inserting a PMD entry and it + * collided with a PTE sized entry at a different + * index in the PMD range. We haven't inserted + * anything into the radix tree and have no waiters to + * wake. */ return ERR_PTR(err); } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index f493af666591..fb69ee2388db 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2466,6 +2466,7 @@ extern int ext4_setattr(struct dentry *, struct iattr *); extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int); extern void ext4_evict_inode(struct inode *); extern void ext4_clear_inode(struct inode *); +extern int ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int); extern int ext4_sync_inode(handle_t *, struct inode *); extern void ext4_dirty_inode(struct inode *, int); extern int ext4_change_inode_journal_flag(struct inode *, int); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 8210c1f43556..cefa9835f275 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = { const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, - .getattr = ext4_getattr, + .getattr = ext4_file_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4247d8d25687..b9ffa9f4191f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5390,11 +5390,46 @@ err_out: int ext4_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { - struct inode *inode; - unsigned long long delalloc_blocks; + struct inode *inode = d_inode(path->dentry); + struct ext4_inode *raw_inode; + struct ext4_inode_info *ei = EXT4_I(inode); + unsigned int flags; + + if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = ei->i_crtime.tv_sec; + stat->btime.tv_nsec = ei->i_crtime.tv_nsec; + } + + flags = ei->i_flags & EXT4_FL_USER_VISIBLE; + if (flags & EXT4_APPEND_FL) + stat->attributes |= STATX_ATTR_APPEND; + if (flags & EXT4_COMPR_FL) + stat->attributes |= STATX_ATTR_COMPRESSED; + if (flags & EXT4_ENCRYPT_FL) + stat->attributes |= STATX_ATTR_ENCRYPTED; + if (flags & EXT4_IMMUTABLE_FL) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (flags & EXT4_NODUMP_FL) + stat->attributes |= STATX_ATTR_NODUMP; + + stat->attributes_mask |= (STATX_ATTR_APPEND | + STATX_ATTR_COMPRESSED | + STATX_ATTR_ENCRYPTED | + STATX_ATTR_IMMUTABLE | + STATX_ATTR_NODUMP); - inode = d_inode(path->dentry); generic_fillattr(inode, stat); + return 0; +} + +int ext4_file_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *inode = d_inode(path->dentry); + u64 delalloc_blocks; + + ext4_getattr(path, stat, request_mask, query_flags); /* * If there is inline data in the inode, the inode will normally not diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 6ad612c576fc..07e5e1405771 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = { .tmpfile = ext4_tmpfile, .rename = ext4_rename2, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, @@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = { const struct inode_operations ext4_special_inode_operations = { .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, .get_acl = ext4_get_acl, .set_acl = ext4_set_acl, diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 73b184d161fc..5c8fc53cb0e5 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -85,17 +85,20 @@ errout: const struct inode_operations ext4_encrypted_symlink_inode_operations = { .get_link = ext4_encrypted_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; const struct inode_operations ext4_symlink_inode_operations = { .get_link = page_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; const struct inode_operations ext4_fast_symlink_inode_operations = { .get_link = simple_get_link, .setattr = ext4_setattr, + .getattr = ext4_getattr, .listxattr = ext4_listxattr, }; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 7163fe014b57..dde861387a40 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; vma->vm_ops = &hugetlb_vm_ops; + /* + * Offset passed to mmap (before page shift) could have been + * negative when represented as a (l)off_t. + */ + if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0) + return -EINVAL; + if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); + len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); + /* check for overflow */ + if (len < vma_len) + return -EINVAL; inode_lock(inode); file_accessed(file); ret = -ENOMEM; - len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); - if (hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, @@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) ret = 0; if (vma->vm_flags & VM_WRITE && inode->i_size < len) - inode->i_size = len; + i_size_write(inode, len); out: inode_unlock(inode); diff --git a/fs/namei.c b/fs/namei.c index d41fab78798b..19dcf62133cc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags) int retval = 0; const char *s = nd->name->name; + if (!*s) + flags &= ~LOOKUP_RCU; + nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index c4ab6fdf17a0..e1534c9bab16 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c @@ -208,14 +208,19 @@ restart: continue; /* * Skip ops whose filesystem we don't know about unless - * it is being mounted. + * it is being mounted or unmounted. It is possible for + * a filesystem we don't know about to be unmounted if + * it fails to mount in the kernel after userspace has + * been sent the mount request. */ /* XXX: is there a better way to detect this? */ } else if (ret == -1 && !(op->upcall.type == ORANGEFS_VFS_OP_FS_MOUNT || op->upcall.type == - ORANGEFS_VFS_OP_GETATTR)) { + ORANGEFS_VFS_OP_GETATTR || + op->upcall.type == + ORANGEFS_VFS_OP_FS_UMOUNT)) { gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: skipping op tag %llu %s\n", llu(op->tag), get_opname_string(op)); diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 5e48a0be9761..8afac46fcc87 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -249,6 +249,7 @@ struct orangefs_sb_info_s { char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; struct super_block *sb; int mount_pending; + int no_list; struct list_head list; }; diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 67c24351a67f..629d8c917fa6 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb) if (!new_op) return -ENOMEM; new_op->upcall.req.features.features = 0; - ret = service_operation(new_op, "orangefs_features", 0); - orangefs_features = new_op->downcall.resp.features.features; + ret = service_operation(new_op, "orangefs_features", + ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); + if (!ret) + orangefs_features = + new_op->downcall.resp.features.features; + else + orangefs_features = 0; op_release(new_op); } else { orangefs_features = 0; @@ -488,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst, if (ret) { d = ERR_PTR(ret); - goto free_op; + goto free_sb_and_op; } /* @@ -514,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst, spin_unlock(&orangefs_superblocks_lock); op_release(new_op); + /* Must be removed from the list now. */ + ORANGEFS_SB(sb)->no_list = 0; + if (orangefs_userspace_version >= 20906) { new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); if (!new_op) @@ -528,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst, return dget(sb->s_root); +free_sb_and_op: + /* Will call orangefs_kill_sb with sb not in list. */ + ORANGEFS_SB(sb)->no_list = 1; + deactivate_locked_super(sb); free_op: gossip_err("orangefs_mount: mount request failed with %d\n", ret); if (ret == -EINVAL) { @@ -553,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb) */ orangefs_unmount_sb(sb); - /* remove the sb from our list of orangefs specific sb's */ - - spin_lock(&orangefs_superblocks_lock); - __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ - ORANGEFS_SB(sb)->list.prev = NULL; - spin_unlock(&orangefs_superblocks_lock); + if (!ORANGEFS_SB(sb)->no_list) { + /* remove the sb from our list of orangefs specific sb's */ + spin_lock(&orangefs_superblocks_lock); + /* not list_del_init */ + __list_del_entry(&ORANGEFS_SB(sb)->list); + ORANGEFS_SB(sb)->list.prev = NULL; + spin_unlock(&orangefs_superblocks_lock); + } /* * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 8f91ec66baa3..d04ea4349909 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table) if ((table->proc_handler == proc_dostring) || (table->proc_handler == proc_dointvec) || + (table->proc_handler == proc_douintvec) || (table->proc_handler == proc_dointvec_minmax) || (table->proc_handler == proc_dointvec_jiffies) || (table->proc_handler == proc_dointvec_userhz_jiffies) || diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f08bd31c1081..312578089544 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { - pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); + pmd_t pmd = *pmdp; + + /* See comment in change_huge_pmd() */ + pmdp_invalidate(vma, addr, pmdp); + if (pmd_dirty(*pmdp)) + pmd = pmd_mkdirty(pmd); + if (pmd_young(*pmdp)) + pmd = pmd_mkyoung(pmd); pmd = pmd_wrprotect(pmd); pmd = pmd_clear_soft_dirty(pmd); diff --git a/fs/stat.c b/fs/stat.c index fa0be59340cc..c6c963b2546b 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr); int vfs_statx_fd(unsigned int fd, struct kstat *stat, u32 request_mask, unsigned int query_flags) { - struct fd f = fdget_raw(fd); + struct fd f; int error = -EBADF; + if (query_flags & ~KSTAT_QUERY_FLAGS) + return -EINVAL; + + f = fdget_raw(fd); if (f.file) { error = vfs_getattr(&f.file->f_path, stat, request_mask, query_flags); @@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd); * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink * at the given name from being referenced. * - * The caller must have preset stat->request_mask as for vfs_getattr(). The - * flags are also used to load up stat->query_flags. - * * 0 will be returned on success, and a -ve error code if unsuccessful. */ int vfs_statx(int dfd, const char __user *filename, int flags, @@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ -static inline int __put_timestamp(struct timespec *kts, - struct statx_timestamp __user *uts) -{ - return (__put_user(kts->tv_sec, &uts->tv_sec ) || - __put_user(kts->tv_nsec, &uts->tv_nsec ) || - __put_user(0, &uts->__reserved )); -} - -/* - * Set the statx results. - */ -static long statx_set_result(struct kstat *stat, struct statx __user *buffer) +static noinline_for_stack int +cp_statx(const struct kstat *stat, struct statx __user *buffer) { - uid_t uid = from_kuid_munged(current_user_ns(), stat->uid); - gid_t gid = from_kgid_munged(current_user_ns(), stat->gid); - - if (__put_user(stat->result_mask, &buffer->stx_mask ) || - __put_user(stat->mode, &buffer->stx_mode ) || - __clear_user(&buffer->__spare0, sizeof(buffer->__spare0)) || - __put_user(stat->nlink, &buffer->stx_nlink ) || - __put_user(uid, &buffer->stx_uid ) || - __put_user(gid, &buffer->stx_gid ) || - __put_user(stat->attributes, &buffer->stx_attributes ) || - __put_user(stat->blksize, &buffer->stx_blksize ) || - __put_user(MAJOR(stat->rdev), &buffer->stx_rdev_major ) || - __put_user(MINOR(stat->rdev), &buffer->stx_rdev_minor ) || - __put_user(MAJOR(stat->dev), &buffer->stx_dev_major ) || - __put_user(MINOR(stat->dev), &buffer->stx_dev_minor ) || - __put_timestamp(&stat->atime, &buffer->stx_atime ) || - __put_timestamp(&stat->btime, &buffer->stx_btime ) || - __put_timestamp(&stat->ctime, &buffer->stx_ctime ) || - __put_timestamp(&stat->mtime, &buffer->stx_mtime ) || - __put_user(stat->ino, &buffer->stx_ino ) || - __put_user(stat->size, &buffer->stx_size ) || - __put_user(stat->blocks, &buffer->stx_blocks ) || - __clear_user(&buffer->__spare1, sizeof(buffer->__spare1)) || - __clear_user(&buffer->__spare2, sizeof(buffer->__spare2))) - return -EFAULT; - - return 0; + struct statx tmp; + + memset(&tmp, 0, sizeof(tmp)); + + tmp.stx_mask = stat->result_mask; + tmp.stx_blksize = stat->blksize; + tmp.stx_attributes = stat->attributes; + tmp.stx_nlink = stat->nlink; + tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); + tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); + tmp.stx_mode = stat->mode; + tmp.stx_ino = stat->ino; + tmp.stx_size = stat->size; + tmp.stx_blocks = stat->blocks; + tmp.stx_attributes_mask = stat->attributes_mask; + tmp.stx_atime.tv_sec = stat->atime.tv_sec; + tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; + tmp.stx_btime.tv_sec = stat->btime.tv_sec; + tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; + tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; + tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; + tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; + tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; + tmp.stx_rdev_major = MAJOR(stat->rdev); + tmp.stx_rdev_minor = MINOR(stat->rdev); + tmp.stx_dev_major = MAJOR(stat->dev); + tmp.stx_dev_minor = MINOR(stat->dev); + + return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } /** @@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx, struct kstat stat; int error; + if (mask & STATX__RESERVED) + return -EINVAL; if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) return -EINVAL; - if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer))) - return -EFAULT; if (filename) error = vfs_statx(dfd, filename, flags, &stat, mask); @@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx, error = vfs_statx_fd(dfd, &stat, mask, flags); if (error) return error; - return statx_set_result(&stat, buffer); + + return cp_statx(&stat, buffer); } /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index b803213d1307..39c75a86c67f 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, { const struct sysfs_ops *ops = sysfs_file_ops(of->kn); struct kobject *kobj = of->kn->parent->priv; - size_t len; + ssize_t len; /* * If buf != of->prealloc_buf, we don't know how @@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf, if (WARN_ON_ONCE(buf != of->prealloc_buf)) return 0; len = ops->show(kobj, of->kn->priv, buf); + if (len < 0) + return len; if (pos) { if (len <= pos) return 0; len -= pos; memmove(buf, buf + pos, len); } - return min(count, len); + return min_t(ssize_t, count, len); } /* kernfs write callback for regular sysfs files */ diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1d227b0fcf49..f7555fc25877 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) * protocols: aa:... bb:... */ seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", - pending, total, UFFD_API, UFFD_API_FEATURES, + pending, total, UFFD_API, ctx->features, UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); } #endif diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index eb00bc133bca..39f8604f764e 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h @@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); extern int xfs_dir2_sf_removename(struct xfs_da_args *args); extern int xfs_dir2_sf_replace(struct xfs_da_args *args); -extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, - int size); +extern int xfs_dir2_sf_verify(struct xfs_inode *ip); /* xfs_dir2_readdir.c */ extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 96b45cd6c63f..e84af093b2ab 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -632,36 +632,49 @@ xfs_dir2_sf_check( /* Verify the consistency of an inline directory. */ int xfs_dir2_sf_verify( - struct xfs_mount *mp, - struct xfs_dir2_sf_hdr *sfp, - int size) + struct xfs_inode *ip) { + struct xfs_mount *mp = ip->i_mount; + struct xfs_dir2_sf_hdr *sfp; struct xfs_dir2_sf_entry *sfep; struct xfs_dir2_sf_entry *next_sfep; char *endp; const struct xfs_dir_ops *dops; + struct xfs_ifork *ifp; xfs_ino_t ino; int i; int i8count; int offset; + int size; + int error; __uint8_t filetype; + ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL); + /* + * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops, + * so we can only trust the mountpoint to have the right pointer. + */ dops = xfs_dir_get_ops(mp, NULL); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data; + size = ifp->if_bytes; + /* * Give up if the directory is way too short. */ - XFS_WANT_CORRUPTED_RETURN(mp, size > - offsetof(struct xfs_dir2_sf_hdr, parent)); - XFS_WANT_CORRUPTED_RETURN(mp, size >= - xfs_dir2_sf_hdr_size(sfp->i8count)); + if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) || + size < xfs_dir2_sf_hdr_size(sfp->i8count)) + return -EFSCORRUPTED; endp = (char *)sfp + size; /* Check .. entry */ ino = dops->sf_get_parent_ino(sfp); i8count = ino > XFS_DIR2_MAX_SHORT_INUM; - XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; offset = dops->data_first_offset; /* Check all reported entries */ @@ -672,12 +685,12 @@ xfs_dir2_sf_verify( * Check the fixed-offset parts of the structure are * within the data buffer. */ - XFS_WANT_CORRUPTED_RETURN(mp, - ((char *)sfep + sizeof(*sfep)) < endp); + if (((char *)sfep + sizeof(*sfep)) >= endp) + return -EFSCORRUPTED; /* Don't allow names with known bad length. */ - XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); - XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); + if (sfep->namelen == 0) + return -EFSCORRUPTED; /* * Check that the variable-length part of the structure is @@ -685,33 +698,39 @@ xfs_dir2_sf_verify( * name component, so nextentry is an acceptable test. */ next_sfep = dops->sf_nextentry(sfp, sfep); - XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); + if (endp < (char *)next_sfep) + return -EFSCORRUPTED; /* Check that the offsets always increase. */ - XFS_WANT_CORRUPTED_RETURN(mp, - xfs_dir2_sf_get_offset(sfep) >= offset); + if (xfs_dir2_sf_get_offset(sfep) < offset) + return -EFSCORRUPTED; /* Check the inode number. */ ino = dops->sf_get_ino(sfp, sfep); i8count += ino > XFS_DIR2_MAX_SHORT_INUM; - XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); + error = xfs_dir_ino_validate(mp, ino); + if (error) + return error; /* Check the file type. */ filetype = dops->sf_get_ftype(sfep); - XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); + if (filetype >= XFS_DIR3_FT_MAX) + return -EFSCORRUPTED; offset = xfs_dir2_sf_get_offset(sfep) + dops->data_entsize(sfep->namelen); sfep = next_sfep; } - XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); - XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); + if (i8count != sfp->i8count) + return -EFSCORRUPTED; + if ((void *)sfep != (void *)endp) + return -EFSCORRUPTED; /* Make sure this whole thing ought to be in local format. */ - XFS_WANT_CORRUPTED_RETURN(mp, offset + - (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + - (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); + if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize) + return -EFSCORRUPTED; return 0; } diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 9653e964eda4..8a37efe04de3 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -212,6 +212,16 @@ xfs_iformat_fork( if (error) return error; + /* Check inline dir contents. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + dip->di_format == XFS_DINODE_FMT_LOCAL) { + error = xfs_dir2_sf_verify(ip); + if (error) { + xfs_idestroy_fork(ip, XFS_DATA_FORK); + return error; + } + } + if (xfs_is_reflink_inode(ip)) { ASSERT(ip->i_cowfp == NULL); xfs_ifork_init_cow(ip); @@ -322,8 +332,6 @@ xfs_iformat_local( int whichfork, int size) { - int error; - /* * If the size is unreasonable, then something * is wrong and we just bail out rather than crash in @@ -339,14 +347,6 @@ xfs_iformat_local( return -EFSCORRUPTED; } - if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { - error = xfs_dir2_sf_verify(ip->i_mount, - (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip), - size); - if (error) - return error; - } - xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); return 0; } @@ -867,7 +867,7 @@ xfs_iextents_copy( * In these cases, the format always takes precedence, because the * format indicates the current state of the fork. */ -int +void xfs_iflush_fork( xfs_inode_t *ip, xfs_dinode_t *dip, @@ -877,7 +877,6 @@ xfs_iflush_fork( char *cp; xfs_ifork_t *ifp; xfs_mount_t *mp; - int error; static const short brootflag[2] = { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; static const short dataflag[2] = @@ -886,7 +885,7 @@ xfs_iflush_fork( { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; if (!iip) - return 0; + return; ifp = XFS_IFORK_PTR(ip, whichfork); /* * This can happen if we gave up in iformat in an error path, @@ -894,19 +893,12 @@ xfs_iflush_fork( */ if (!ifp) { ASSERT(whichfork == XFS_ATTR_FORK); - return 0; + return; } cp = XFS_DFORK_PTR(dip, whichfork); mp = ip->i_mount; switch (XFS_IFORK_FORMAT(ip, whichfork)) { case XFS_DINODE_FMT_LOCAL: - if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { - error = xfs_dir2_sf_verify(mp, - (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data, - ifp->if_bytes); - if (error) - return error; - } if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_u1.if_data != NULL); @@ -959,7 +951,6 @@ xfs_iflush_fork( ASSERT(0); break; } - return 0; } /* diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 132dc59fdde6..7fb8365326d1 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -140,7 +140,7 @@ typedef struct xfs_ifork { struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); -int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, +void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, struct xfs_inode_log_item *, int); void xfs_idestroy_fork(struct xfs_inode *, int); void xfs_idata_realloc(struct xfs_inode *, int, int); diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 8b75dcea5966..828532ce0adc 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1311,8 +1311,16 @@ xfs_free_file_space( /* * Now that we've unmap all full blocks we'll have to zero out any * partial block at the beginning and/or end. xfs_zero_range is - * smart enough to skip any holes, including those we just created. + * smart enough to skip any holes, including those we just created, + * but we must take care not to zero beyond EOF and enlarge i_size. */ + + if (offset >= XFS_ISIZE(ip)) + return 0; + + if (offset + len > XFS_ISIZE(ip)) + len = XFS_ISIZE(ip) - offset; + return xfs_zero_range(ip, offset, len, NULL); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c7fe2c2123ab..7605d8396596 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -50,6 +50,7 @@ #include "xfs_log.h" #include "xfs_bmap_btree.h" #include "xfs_reflink.h" +#include "xfs_dir2_priv.h" kmem_zone_t *xfs_inode_zone; @@ -3475,7 +3476,6 @@ xfs_iflush_int( struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_dinode *dip; struct xfs_mount *mp = ip->i_mount; - int error; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isiflocked(ip)); @@ -3547,6 +3547,12 @@ xfs_iflush_int( if (ip->i_d.di_version < 3) ip->i_d.di_flushiter++; + /* Check the inline directory data. */ + if (S_ISDIR(VFS_I(ip)->i_mode) && + ip->i_d.di_format == XFS_DINODE_FMT_LOCAL && + xfs_dir2_sf_verify(ip)) + goto corrupt_out; + /* * Copy the dirty parts of the inode into the on-disk inode. We always * copy out the core of the inode, because if the inode is dirty at all @@ -3558,14 +3564,9 @@ xfs_iflush_int( if (ip->i_d.di_flushiter == DI_MAX_FLUSH) ip->i_d.di_flushiter = 0; - error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); - if (error) - return error; - if (XFS_IFORK_Q(ip)) { - error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); - if (error) - return error; - } + xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); + if (XFS_IFORK_Q(ip)) + xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); xfs_inobp_check(mp, bp); /* diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 229cc6a6d8ef..ebfc13350f9a 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -516,6 +516,20 @@ xfs_vn_getattr( stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + if (ip->i_d.di_version == 3) { + if (request_mask & STATX_BTIME) { + stat->result_mask |= STATX_BTIME; + stat->btime.tv_sec = ip->i_d.di_crtime.t_sec; + stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec; + } + } + + if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) + stat->attributes |= STATX_ATTR_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) + stat->attributes |= STATX_ATTR_APPEND; + if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP) + stat->attributes |= STATX_ATTR_NODUMP; switch (inode->i_mode & S_IFMT) { case S_IFBLK: diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 2a6d9b1558e0..26d67ce3c18d 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -583,7 +583,7 @@ xfs_inumbers( return error; bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); - buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); + buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP); do { struct xfs_inobt_rec_incore r; int stat; diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 7cdfe167074f..143db9c523e2 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -261,9 +261,9 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ - __start_ro_after_init = .; \ + VMLINUX_SYMBOL(__start_ro_after_init) = .; \ *(.data..ro_after_init) \ - __end_ro_after_init = .; + VMLINUX_SYMBOL(__end_ro_after_init) = .; #endif /* diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index e2ee74c20b8f..4eeda120e46d 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -225,6 +225,10 @@ struct drm_display_info { #define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2) /* drive data on neg. edge */ #define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3) +/* data is transmitted MSB to LSB on the bus */ +#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4) +/* data is transmitted LSB to MSB on the bus */ +#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5) /** * @bus_flags: Additional information (like pixel signal polarity) for diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0b1ce05e2c2e..fa07be197945 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -711,6 +711,17 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo); /** + * ttm_bo_default_iomem_pfn - get a pfn for a page offset + * + * @bo: the BO we need to look up the pfn for + * @page_offset: offset inside the BO to look up. + * + * Calculate the PFN for iomem based mappings during page fault + */ +unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, + unsigned long page_offset); + +/** * ttm_bo_mmap - mmap out of the ttm device address space. * * @filp: filp as input from the mmap method. diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3641c6128ac2..6bbd34d25a8d 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -462,6 +462,15 @@ struct ttm_bo_driver { struct ttm_mem_reg *mem); void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); + + /** + * Return the pfn for a given page_offset inside the BO. + * + * @bo: the BO to look up the pfn for + * @page_offset: the offset to look up + */ + unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, + unsigned long page_offset); }; /** diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h index ed953f98f0e1..1487011fe057 100644 --- a/include/drm/ttm/ttm_object.h +++ b/include/drm/ttm/ttm_object.h @@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); * @ref_type: The type of reference. * @existed: Upon completion, indicates that an identical reference object * already existed, and the refcount was upped on that object instead. + * @require_existed: Fail with -EPERM if an identical ref object didn't + * already exist. * * Checks that the base object is shareable and adds a ref object to it. * @@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base); */ extern int ttm_ref_object_add(struct ttm_object_file *tfile, struct ttm_base_object *base, - enum ttm_ref_type ref_type, bool *existed); + enum ttm_ref_type ref_type, bool *existed, + bool require_existed); extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, struct ttm_base_object *base); diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 932be0c8086e..e88a8e39767b 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -63,6 +63,7 @@ #define TTM_PL_FLAG_CACHED (1 << 16) #define TTM_PL_FLAG_UNCACHED (1 << 17) #define TTM_PL_FLAG_WC (1 << 18) +#define TTM_PL_FLAG_CONTIGUOUS (1 << 19) #define TTM_PL_FLAG_NO_EVICT (1 << 21) #define TTM_PL_FLAG_TOPDOWN (1 << 22) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index b72dd2ad5f44..c0b3d999c266 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); +void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, bool level); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b296a9006117..9382c5da7a2e 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -51,6 +51,7 @@ struct blk_mq_hw_ctx { atomic_t nr_active; + struct delayed_work delayed_run_work; struct delayed_work delay_work; struct hlist_node cpuhp_dead; @@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q); void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5a7da607ca04..01a696b0a4d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -610,7 +610,6 @@ struct request_queue { #define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ #define QUEUE_FLAG_DAX 26 /* device supports DAX */ #define QUEUE_FLAG_STATS 27 /* track rq completion times */ -#define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q, return true; } -static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, - struct bio *next) +static inline bool bio_will_gap(struct request_queue *q, + struct request *prev_rq, + struct bio *prev, + struct bio *next) { if (bio_has_data(prev) && queue_virt_boundary(q)) { struct bio_vec pb, nb; + /* + * don't merge if the 1st bio starts with non-zero + * offset, otherwise it is quite difficult to respect + * sg gap limit. We work hard to merge a huge number of small + * single bios in case of mkfs. + */ + if (prev_rq) + bio_get_first_bvec(prev_rq->bio, &pb); + else + bio_get_first_bvec(prev, &pb); + if (pb.bv_offset) + return true; + + /* + * We don't need to worry about the situation that the + * merged segment ends in unaligned virt boundary: + * + * - if 'pb' ends aligned, the merged segment ends aligned + * - if 'pb' ends unaligned, the next bio must include + * one single bvec of 'nb', otherwise the 'nb' can't + * merge with 'pb' + */ bio_get_last_bvec(prev, &pb); bio_get_first_bvec(next, &nb); @@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, static inline bool req_gap_back_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, req->biotail, bio); + return bio_will_gap(req->q, req, req->biotail, bio); } static inline bool req_gap_front_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, bio, req->bio); + return bio_will_gap(req->q, NULL, bio, req->bio); } int kblockd_schedule_work(struct work_struct *work); diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f6b43fbb141c..af9c86e958bd 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) pr_cont_kernfs_path(cgrp->kn); } +static inline void cgroup_init_kthreadd(void) +{ + /* + * kthreadd is inherited by all kthreads, keep it in the root so + * that the new kthreads are guaranteed to stay in the root until + * initialization is finished. + */ + current->no_cgroup_migration = 1; +} + +static inline void cgroup_kthread_ready(void) +{ + /* + * This kthread finished initialization. The creator should have + * set PF_NO_SETAFFINITY if this kthread should stay in the root. + */ + current->no_cgroup_migration = 0; +} + #else /* !CONFIG_CGROUPS */ struct cgroup_subsys_state; @@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {} static inline int cgroup_init_early(void) { return 0; } static inline int cgroup_init(void) { return 0; } +static inline void cgroup_init_kthreadd(void) {} +static inline void cgroup_kthread_ready(void) {} static inline bool task_under_cgroup_hierarchy(struct task_struct *task, struct cgroup *ancestor) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index bfb3704fc6fc..79f27d60ec66 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,13 +39,13 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @kmap_atomic: maps a page from the buffer into kernel address - * space, users may not block until the subsequent unmap call. - * This callback must not sleep. - * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. - * This Callback must not sleep. - * @kmap: maps a page from the buffer into kernel address space. - * @kunmap: [optional] unmaps a page from the buffer. + * @map_atomic: maps a page from the buffer into kernel address + * space, users may not block until the subsequent unmap call. + * This callback must not sleep. + * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. + * This Callback must not sleep. + * @map: maps a page from the buffer into kernel address space. + * @unmap: [optional] unmaps a page from the buffer. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer @@ -206,10 +206,10 @@ struct dma_buf_ops { * to be restarted. */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); - void *(*kmap_atomic)(struct dma_buf *, unsigned long); - void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); - void *(*kmap)(struct dma_buf *, unsigned long); - void (*kunmap)(struct dma_buf *, unsigned long, void *); + void *(*map_atomic)(struct dma_buf *, unsigned long); + void (*unmap_atomic)(struct dma_buf *, unsigned long, void *); + void *(*map)(struct dma_buf *, unsigned long); + void (*unmap)(struct dma_buf *, unsigned long, void *); /** * @mmap: diff --git a/include/linux/elevator.h b/include/linux/elevator.h index aebecc4ed088..22d39e8d4de1 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *); extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); extern int elevator_init(struct request_queue *, char *); -extern void elevator_exit(struct elevator_queue *); +extern void elevator_exit(struct request_queue *, struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); extern bool elv_bio_merge_ok(struct request *, struct bio *); extern struct elevator_queue *elevator_alloc(struct request_queue *, diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index eafc965b3eb8..dc30f3d057eb 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h @@ -96,6 +96,9 @@ #define GICH_MISR_EOI (1 << 0) #define GICH_MISR_U (1 << 1) +#define GICV_PMR_PRIORITY_SHIFT 3 +#define GICV_PMR_PRIORITY_MASK (0x1f << GICV_PMR_PRIORITY_SHIFT) + #ifndef __ASSEMBLY__ #include <linux/irqdomain.h> diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 7a01c94496f1..3eef9fb9968a 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h @@ -35,10 +35,11 @@ * Max bus-specific overhead incurred by request/responses. * I2C requires 1 additional byte for requests. * I2C requires 2 additional bytes for responses. + * SPI requires up to 32 additional bytes for responses. * */ #define EC_PROTO_VERSION_UNKNOWN 0 #define EC_MAX_REQUEST_OVERHEAD 1 -#define EC_MAX_RESPONSE_OVERHEAD 2 +#define EC_MAX_RESPONSE_OVERHEAD 32 /* * Command interface between EC and AP, for LPC, I2C and SPI interfaces. diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 51891fb0d3ce..c91b3bcd158f 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) ___pud; \ }) -#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \ -({ \ - unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \ - pmd_t ___pmd; \ - \ - ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \ - mmu_notifier_invalidate_range(__mm, ___haddr, \ - ___haddr + HPAGE_PMD_SIZE); \ - \ - ___pmd; \ -}) - /* * set_pte_at_notify() sets the pte _after_ running the notifier. * This is safe to start by updating the secondary MMUs, because the primary MMU @@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear #define set_pte_at_notify set_pte_at #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c43d435d4225..9061780b141f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -64,26 +64,26 @@ enum { * RDMA_QPTYPE field */ enum { - NVMF_RDMA_QPTYPE_CONNECTED = 0, /* Reliable Connected */ - NVMF_RDMA_QPTYPE_DATAGRAM = 1, /* Reliable Datagram */ + NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */ + NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */ }; /* RDMA QP Service Type codes for Discovery Log Page entry TSAS * RDMA_QPTYPE field */ enum { - NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 0, /* No Provider Specified */ - NVMF_RDMA_PRTYPE_IB = 1, /* InfiniBand */ - NVMF_RDMA_PRTYPE_ROCE = 2, /* InfiniBand RoCE */ - NVMF_RDMA_PRTYPE_ROCEV2 = 3, /* InfiniBand RoCEV2 */ - NVMF_RDMA_PRTYPE_IWARP = 4, /* IWARP */ + NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */ + NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */ + NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */ + NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */ + NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */ }; /* RDMA Connection Management Service Type codes for Discovery Log Page * entry TSAS RDMA_CMS field */ enum { - NVMF_RDMA_CMS_RDMA_CM = 0, /* Sockets based enpoint addressing */ + NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */ }; #define NVMF_AQ_DEPTH 32 diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 8ce2d87a238b..5e45385c5bdc 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -145,8 +145,9 @@ struct pinctrl_desc { extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev); +extern int pinctrl_enable(struct pinctrl_dev *pctldev); -/* Please use pinctrl_register_and_init() instead */ +/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); diff --git a/include/linux/reset.h b/include/linux/reset.h index 96fb139bdd08..13d8681210d5 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, const char *id, int index, bool shared, bool optional); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, + bool optional); void reset_control_put(struct reset_control *rstc); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, @@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get( return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline struct reset_control *__reset_control_get( + struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional) @@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - false); + return __reset_control_get(dev, id, 0, false, false); } /** @@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - false); + return __reset_control_get(dev, id, 0, true, false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - true); + return __reset_control_get(dev, id, 0, false, true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - true); + return __reset_control_get(dev, id, 0, true, true); } /** diff --git a/include/linux/sched.h b/include/linux/sched.h index d67eee84fd43..4cf9a59a4d08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -604,6 +604,10 @@ struct task_struct { #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; #endif +#ifdef CONFIG_CGROUPS + /* disallow userland-initiated cgroup migration */ + unsigned no_cgroup_migration:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ diff --git a/include/linux/stat.h b/include/linux/stat.h index c76e524fb34b..64b6b3aece21 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -26,6 +26,7 @@ struct kstat { unsigned int nlink; uint32_t blksize; /* Preferred I/O size */ u64 attributes; + u64 attributes_mask; #define KSTAT_ATTR_FS_IOC_FLAGS \ (STATX_ATTR_COMPRESSED | \ STATX_ATTR_IMMUTABLE | \ diff --git a/include/linux/uio.h b/include/linux/uio.h index 804e34c6f981..f2d36a3d3005 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -39,7 +39,10 @@ struct iov_iter { }; union { unsigned long nr_segs; - int idx; + struct { + int idx; + int start_idx; + }; }; }; @@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes); +void iov_iter_revert(struct iov_iter *i, size_t bytes); int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 04b0d3f95043..7edfbdb55a99 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -167,6 +167,7 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; + int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); void (*remove)(struct virtio_device *dev); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 1f71ee5ab518..069582ee5d7f 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu) return frag; } -static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc) +static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc) { - - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); asoc->pmtu_pending = 0; } @@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { - if (t->dst && (!dst_check(t->dst, t->dst_cookie) || - t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), - SCTP_DEFAULT_MINSEGMENT))) + if (t->dst && !dst_check(t->dst, t->dst_cookie)) sctp_transport_dst_release(t); return t->dst; } +static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) +{ + __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)), + SCTP_DEFAULT_MINSEGMENT); + + if (t->pathmtu == pmtu) + return true; + + t->pathmtu = pmtu; + + return false; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 592decebac75..138f8615acf0 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info { __u64 hb_nonce; } sctp_sender_hb_info_t; -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp); +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp); +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp); void sctp_stream_free(struct sctp_stream *stream); void sctp_stream_clear(struct sctp_stream *stream); @@ -499,7 +500,6 @@ struct sctp_datamsg { /* Did the messenge fail to send? */ int send_error; u8 send_failed:1, - force_delay:1, can_delay; /* should this message be Nagle delayed */ }; @@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t); void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); -void sctp_transport_reset(struct sctp_transport *); -void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32); +void sctp_transport_reset(struct sctp_transport *t); +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); @@ -1878,6 +1878,7 @@ struct sctp_association { __u8 need_ecne:1, /* Need to send an ECNE Chunk? */ temp:1, /* Is it a temporary association? */ + force_delay:1, prsctp_enable:1, reconf_enable:1; @@ -1953,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); -void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *); +void sctp_assoc_sync_pmtu(struct sctp_association *asoc); void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 4b784b6e21c0..ccfad0e9c2cd 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -117,6 +117,7 @@ enum transport_state_table { TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19, + TRANSPORT_COMPLETE_QF_ERR = 20, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id; int tg_pt_gp_valid_id; int tg_pt_gp_alua_supported_states; - int tg_pt_gp_alua_pending_state; - int tg_pt_gp_alua_previous_state; int tg_pt_gp_alua_access_status; int tg_pt_gp_alua_access_type; int tg_pt_gp_nonop_delay_msecs; @@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp { int tg_pt_gp_pref; int tg_pt_gp_write_metadata; u32 tg_pt_gp_members; - atomic_t tg_pt_gp_alua_access_state; + int tg_pt_gp_alua_access_state; atomic_t tg_pt_gp_ref_cnt; spinlock_t tg_pt_gp_lock; - struct mutex tg_pt_gp_md_mutex; + struct mutex tg_pt_gp_transition_mutex; struct se_device *tg_pt_gp_dev; struct config_group tg_pt_gp_group; struct list_head tg_pt_gp_list; struct list_head tg_pt_gp_lun_list; struct se_lun *tg_pt_gp_alua_lun; struct se_node_acl *tg_pt_gp_alua_nacl; - struct work_struct tg_pt_gp_transition_work; - struct completion *tg_pt_gp_transition_complete; }; struct t10_vpd { @@ -705,6 +702,7 @@ struct se_lun { u64 unpacked_lun; #define SE_LUN_LINK_MAGIC 0xffff7771 u32 lun_link_magic; + bool lun_shutdown; bool lun_access_ro; u32 lun_index; diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 2584c1cca42f..76f6f78a352b 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo { * one or more cmdstream buffers. This allows for conditional execution * (context-restore), and IB buffers needed for per tile/bin draw cmds. */ +#define ETNA_SUBMIT_NO_IMPLICIT 0x0001 +#define ETNA_SUBMIT_FENCE_FD_IN 0x0002 +#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004 +#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ + ETNA_SUBMIT_FENCE_FD_IN | \ + ETNA_SUBMIT_FENCE_FD_OUT) #define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_VG 0x02 @@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit { __u64 bos; /* in, ptr to array of submit_bo's */ __u64 relocs; /* in, ptr to array of submit_reloc's */ __u64 stream; /* in, ptr to cmdstream */ + __u32 flags; /* in, mask of ETNA_SUBMIT_x */ + __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */ }; /* The normal way to synchronize with the GPU is just to CPU_PREP on diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 4d5d6a2bc59e..a4a189a240d7 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -72,6 +72,7 @@ struct drm_msm_timespec { #define MSM_PARAM_CHIP_ID 0x03 #define MSM_PARAM_MAX_FREQ 0x04 #define MSM_PARAM_TIMESTAMP 0x05 +#define MSM_PARAM_GMEM_BASE 0x06 struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index dd9820b1c779..f8d9fed17ba9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -445,6 +445,7 @@ header-y += unistd.h header-y += unix_diag.h header-y += usbdevice_fs.h header-y += usbip.h +header-y += userio.h header-y += utime.h header-y += utsname.h header-y += uuid.h diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 51a6b86e3700..d538897b8e08 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -114,7 +114,7 @@ struct statx { __u64 stx_ino; /* Inode number */ __u64 stx_size; /* File size */ __u64 stx_blocks; /* Number of 512-byte blocks allocated */ - __u64 __spare1[1]; + __u64 stx_attributes_mask; /* Mask to show what's supported in stx_attributes */ /* 0x40 */ struct statx_timestamp stx_atime; /* Last access time */ struct statx_timestamp stx_btime; /* File creation time */ @@ -152,9 +152,10 @@ struct statx { #define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ #define STATX_ALL 0x00000fffU /* All currently supported flags */ +#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ /* - * Attributes to be found in stx_attributes + * Attributes to be found in stx_attributes and masked in stx_attributes_mask. * * These give information about the features or the state of a file that might * be of use to ordinary userspace programs such as GUIs or ls rather than diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 15b4385a2be1..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -79,7 +79,7 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_OFF(msix_enabled) ((msix_enabled) ? 24 : 20) /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */ -#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled) +#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled) /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/kernel/audit.c b/kernel/audit.c index 2f4964cfde0b..a871bf80fde1 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist); /* queue msgs to send via kauditd_task */ static struct sk_buff_head audit_queue; -static void kauditd_hold_skb(struct sk_buff *skb); /* queue msgs due to temporary unicast send problems */ static struct sk_buff_head audit_retry_queue; /* queue msgs waiting for new auditd connection */ @@ -454,30 +453,6 @@ static void auditd_set(int pid, u32 portid, struct net *net) } /** - * auditd_reset - Disconnect the auditd connection - * - * Description: - * Break the auditd/kauditd connection and move all the queued records into the - * hold queue in case auditd reconnects. - */ -static void auditd_reset(void) -{ - struct sk_buff *skb; - - /* if it isn't already broken, break the connection */ - rcu_read_lock(); - if (auditd_conn.pid) - auditd_set(0, 0, NULL); - rcu_read_unlock(); - - /* flush all of the main and retry queues to the hold queue */ - while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); - while ((skb = skb_dequeue(&audit_queue))) - kauditd_hold_skb(skb); -} - -/** * kauditd_print_skb - Print the audit record to the ring buffer * @skb: audit record * @@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb) { /* put the record back in the queue at the same place */ skb_queue_head(&audit_hold_queue, skb); - - /* fail the auditd connection */ - auditd_reset(); } /** @@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); kfree_skb(skb); - - /* fail the auditd connection */ - auditd_reset(); } /** @@ -567,6 +536,30 @@ static void kauditd_retry_skb(struct sk_buff *skb) } /** + * auditd_reset - Disconnect the auditd connection + * + * Description: + * Break the auditd/kauditd connection and move all the queued records into the + * hold queue in case auditd reconnects. + */ +static void auditd_reset(void) +{ + struct sk_buff *skb; + + /* if it isn't already broken, break the connection */ + rcu_read_lock(); + if (auditd_conn.pid) + auditd_set(0, 0, NULL); + rcu_read_unlock(); + + /* flush all of the main and retry queues to the hold queue */ + while ((skb = skb_dequeue(&audit_retry_queue))) + kauditd_hold_skb(skb); + while ((skb = skb_dequeue(&audit_queue))) + kauditd_hold_skb(skb); +} + +/** * auditd_send_unicast_skb - Send a record via unicast to auditd * @skb: audit record * @@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy) NULL, kauditd_rehold_skb); if (rc < 0) { sk = NULL; + auditd_reset(); goto main_queue; } @@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy) NULL, kauditd_hold_skb); if (rc < 0) { sk = NULL; + auditd_reset(); goto main_queue; } @@ -775,16 +770,18 @@ main_queue: * unicast, dump failed record sends to the retry queue; if * sk == NULL due to previous failures we will just do the * multicast send and move the record to the retry queue */ - kauditd_send_queue(sk, portid, &audit_queue, 1, - kauditd_send_multicast_skb, - kauditd_retry_skb); + rc = kauditd_send_queue(sk, portid, &audit_queue, 1, + kauditd_send_multicast_skb, + kauditd_retry_skb); + if (sk == NULL || rc < 0) + auditd_reset(); + sk = NULL; /* drop our netns reference, no auditd sends past this line */ if (net) { put_net(net); net = NULL; } - sk = NULL; /* we have processed all the queues so wake everyone */ wake_up(&audit_backlog_wait); diff --git a/kernel/audit.h b/kernel/audit.h index 0f1cf6d1878a..0d87f8ab8778 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -333,13 +333,7 @@ extern u32 audit_sig_sid; extern int audit_filter(int msgtype, unsigned int listtype); #ifdef CONFIG_AUDITSYSCALL -extern int __audit_signal_info(int sig, struct task_struct *t); -static inline int audit_signal_info(int sig, struct task_struct *t) -{ - if (auditd_test_task(t) || (audit_signals && !audit_dummy_context())) - return __audit_signal_info(sig, t); - return 0; -} +extern int audit_signal_info(int sig, struct task_struct *t); extern void audit_filter_inodes(struct task_struct *, struct audit_context *); extern struct list_head *audit_killed_trees(void); #else diff --git a/kernel/auditsc.c b/kernel/auditsc.c index e59ffc7fc522..1c2333155893 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t) * If the audit subsystem is being terminated, record the task (pid) * and uid that is doing that. */ -int __audit_signal_info(int sig, struct task_struct *t) +int audit_signal_info(int sig, struct task_struct *t) { struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; kuid_t uid = current_uid(), t_uid = task_uid(t); - if (auditd_test_task(t)) { - if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { - audit_sig_pid = task_tgid_nr(tsk); - if (uid_valid(tsk->loginuid)) - audit_sig_uid = tsk->loginuid; - else - audit_sig_uid = uid; - security_task_getsecid(tsk, &audit_sig_sid); - } - if (!audit_signals || audit_dummy_context()) - return 0; + if (auditd_test_task(t) && + (sig == SIGTERM || sig == SIGHUP || + sig == SIGUSR1 || sig == SIGUSR2)) { + audit_sig_pid = task_tgid_nr(tsk); + if (uid_valid(tsk->loginuid)) + audit_sig_uid = tsk->loginuid; + else + audit_sig_uid = uid; + security_task_getsecid(tsk, &audit_sig_sid); } + if (!audit_signals || audit_dummy_context()) + return 0; + /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f45827e205d3..b4f1cb0c5ac7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1162,12 +1162,12 @@ out: LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ off = IMM; load_word: - /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are - * only appearing in the programs where ctx == - * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] - * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, - * internal BPF verifier will check that BPF_R6 == - * ctx. + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only + * appearing in the programs where ctx == skb + * (see may_access_skb() in the verifier). All programs + * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6, + * bpf_convert_filter() saves it in BPF_R6, internal BPF + * verifier will check that BPF_R6 == ctx. * * BPF_ABS and BPF_IND are wrappers of function calls, * so they scratch BPF_R1-BPF_R5 registers, preserve diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 796b68d00119..a834068a400e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) } } -static int check_ptr_alignment(struct bpf_verifier_env *env, - struct bpf_reg_state *reg, int off, int size) +static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, + int off, int size) { - if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) { - if (off % size != 0) { - verbose("misaligned access off %d size %d\n", - off, size); - return -EACCES; - } else { - return 0; - } - } - - if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) - /* misaligned access to packet is ok on x86,arm,arm64 */ - return 0; - if (reg->id && size != 1) { - verbose("Unknown packet alignment. Only byte-sized access allowed\n"); + verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n"); return -EACCES; } /* skb->data is NET_IP_ALIGN-ed */ - if (reg->type == PTR_TO_PACKET && - (NET_IP_ALIGN + reg->off + off) % size != 0) { + if ((NET_IP_ALIGN + reg->off + off) % size != 0) { verbose("misaligned packet access off %d+%d+%d size %d\n", NET_IP_ALIGN, reg->off, off, size); return -EACCES; } + return 0; } +static int check_val_ptr_alignment(const struct bpf_reg_state *reg, + int size) +{ + if (size != 1) { + verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); + return -EACCES; + } + + return 0; +} + +static int check_ptr_alignment(const struct bpf_reg_state *reg, + int off, int size) +{ + switch (reg->type) { + case PTR_TO_PACKET: + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : + check_pkt_ptr_alignment(reg, off, size); + case PTR_TO_MAP_VALUE_ADJ: + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 : + check_val_ptr_alignment(reg, size); + default: + if (off % size != 0) { + verbose("misaligned access off %d size %d\n", + off, size); + return -EACCES; + } + + return 0; + } +} + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, if (size < 0) return size; - err = check_ptr_alignment(env, reg, off, size); + err = check_ptr_alignment(reg, off, size); if (err) return err; @@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) * register as unknown. */ if (env->allow_ptr_leaks && + BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && (dst_reg->type == PTR_TO_MAP_VALUE || dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) dst_reg->type = PTR_TO_MAP_VALUE_ADJ; @@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) - regs[i].range = dst_reg->off; + /* keep the maximum range already checked */ + regs[i].range = max(regs[i].range, dst_reg->off); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) continue; reg = &state->spilled_regs[i / BPF_REG_SIZE]; if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) - reg->range = dst_reg->off; + reg->range = max(reg->range, dst_reg->off); } } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 48851327a15e..687f5e0194ef 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, tsk = tsk->group_leader; /* - * Workqueue threads may acquire PF_NO_SETAFFINITY and become - * trapped in a cpuset, or RT worker may be born in a cgroup - * with no rt_runtime allocated. Just say no. + * kthreads may acquire PF_NO_SETAFFINITY during initialization. + * If userland migrates such a kthread to a non-root cgroup, it can + * become trapped in a cpuset, or RT kthread may be born in a + * cgroup with no rt_runtime allocated. Just say no. */ - if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { + if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) { ret = -EINVAL; goto out_unlock_rcu; } diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4544b115f5eb..d052947fe785 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) struct cpumask * irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) { - int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; + int n, nodes, cpus_per_vec, extra_vecs, curvec; int affv = nvecs - affd->pre_vectors - affd->post_vectors; int last_affv = affv + affd->pre_vectors; nodemask_t nodemsk = NODE_MASK_NONE; @@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) goto done; } - /* Spread the vectors per node */ - vecs_per_node = affv / nodes; - /* Account for rounding errors */ - extra_vecs = affv - (nodes * vecs_per_node); - for_each_node_mask(n, nodemsk) { - int ncpus, v, vecs_to_assign = vecs_per_node; + int ncpus, v, vecs_to_assign, vecs_per_node; + + /* Spread the vectors per node */ + vecs_per_node = (affv - curvec) / nodes; /* Get the cpus on this node which are in the mask */ cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); /* Calculate the number of cpus per vector */ ncpus = cpumask_weight(nmsk); + vecs_to_assign = min(vecs_per_node, ncpus); + + /* Account for rounding errors */ + extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); for (v = 0; curvec < last_affv && v < vecs_to_assign; curvec++, v++) { @@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /* Account for extra vectors to compensate rounding errors */ if (extra_vecs) { cpus_per_vec++; - if (!--extra_vecs) - vecs_per_node++; + --extra_vecs; } irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); } if (curvec >= last_affv) break; + --nodes; } done: diff --git a/kernel/kthread.c b/kernel/kthread.c index 2f26adea0f84..26db528c1d88 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -20,6 +20,7 @@ #include <linux/freezer.h> #include <linux/ptrace.h> #include <linux/uaccess.h> +#include <linux/cgroup.h> #include <trace/events/sched.h> static DEFINE_SPINLOCK(kthread_create_lock); @@ -225,6 +226,7 @@ static int kthread(void *_create) ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { + cgroup_kthread_ready(); __kthread_parkme(self); ret = threadfn(data); } @@ -538,6 +540,7 @@ int kthreadd(void *unused) set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; + cgroup_init_kthreadd(); for (;;) { set_current_state(TASK_INTERRUPTIBLE); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0af928712174..266ddcc1d8bb 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task) WARN_ON(!task->ptrace || task->parent != current); + /* + * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. + * Recheck state under the lock to close this race. + */ spin_lock_irq(&task->sighand->siglock); - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); - else - task->state = TASK_TRACED; + if (task->state == __TASK_TRACED) { + if (__fatal_signal_pending(task)) + wake_up_state(task, __TASK_TRACED); + else + task->state = TASK_TRACED; + } spin_unlock_irq(&task->sighand->siglock); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index acf0a5a06da7..8c8714fcb53c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, if (write) { if (*negp) return -EINVAL; + if (*lvalp > UINT_MAX) + return -EINVAL; *valp = *lvalp; } else { unsigned int val = *valp; + *negp = false; *lvalp = (unsigned long)val; } return 0; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b9691ee8f6c1..27bb2e61276e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) ftrace_probe_registered = 1; } -static void __disable_ftrace_function_probe(void) +static bool __disable_ftrace_function_probe(void) { int i; if (!ftrace_probe_registered) - return; + return false; for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; if (hhd->first) - return; + return false; } /* no more funcs left */ ftrace_shutdown(&trace_probe_ops, 0); ftrace_probe_registered = 0; + return true; } @@ -3901,6 +3902,7 @@ static void __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data, int flags) { + struct ftrace_ops_hash old_hash_ops; struct ftrace_func_entry *rec_entry; struct ftrace_func_probe *entry; struct ftrace_func_probe *p; @@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, struct hlist_node *tmp; char str[KSYM_SYMBOL_LEN]; int i, ret; + bool disabled; if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) func_g.search = NULL; @@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, mutex_lock(&trace_probe_ops.func_hash->regex_lock); + old_hash_ops.filter_hash = old_hash; + /* Probes only have filters */ + old_hash_ops.notrace_hash = NULL; + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) /* Hmm, should report this somehow */ @@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, } } mutex_lock(&ftrace_lock); - __disable_ftrace_function_probe(); + disabled = __disable_ftrace_function_probe(); /* * Remove after the disable is called. Otherwise, if the last * probe is removed, a null hash means *all enabled*. */ ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); + + /* still need to update the function call sites */ + if (ftrace_enabled && !disabled) + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, + &old_hash_ops); synchronize_sched(); if (!ret) free_ftrace_hash_rcu(old_hash); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 96fc3c043ad6..54e7a90db848 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4826,9 +4826,9 @@ static __init int test_ringbuffer(void) rb_data[cpu].cnt = cpu; rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], "rbtester/%d", cpu); - if (WARN_ON(!rb_threads[cpu])) { + if (WARN_ON(IS_ERR(rb_threads[cpu]))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_threads[cpu]); goto out_free; } @@ -4838,9 +4838,9 @@ static __init int test_ringbuffer(void) /* Now create the rb hammer! */ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); - if (WARN_ON(!rb_hammer)) { + if (WARN_ON(IS_ERR(rb_hammer))) { pr_cont("FAILED\n"); - ret = -1; + ret = PTR_ERR(rb_hammer); goto out_free; } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..60abc44385b7 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size) } EXPORT_SYMBOL(iov_iter_advance); +void iov_iter_revert(struct iov_iter *i, size_t unroll) +{ + if (!unroll) + return; + i->count += unroll; + if (unlikely(i->type & ITER_PIPE)) { + struct pipe_inode_info *pipe = i->pipe; + int idx = i->idx; + size_t off = i->iov_offset; + while (1) { + size_t n = off - pipe->bufs[idx].offset; + if (unroll < n) { + off -= (n - unroll); + break; + } + unroll -= n; + if (!unroll && idx == i->start_idx) { + off = 0; + break; + } + if (!idx--) + idx = pipe->buffers - 1; + off = pipe->bufs[idx].offset + pipe->bufs[idx].len; + } + i->iov_offset = off; + i->idx = idx; + pipe_truncate(i); + return; + } + if (unroll <= i->iov_offset) { + i->iov_offset -= unroll; + return; + } + unroll -= i->iov_offset; + if (i->type & ITER_BVEC) { + const struct bio_vec *bvec = i->bvec; + while (1) { + size_t n = (--bvec)->bv_len; + i->nr_segs++; + if (unroll <= n) { + i->bvec = bvec; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } else { /* same logics for iovec and kvec */ + const struct iovec *iov = i->iov; + while (1) { + size_t n = (--iov)->iov_len; + i->nr_segs++; + if (unroll <= n) { + i->iov = iov; + i->iov_offset = n - unroll; + return; + } + unroll -= n; + } + } +} +EXPORT_SYMBOL(iov_iter_revert); + /* * Return the count of just the current iov_iter segment. */ @@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction, i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); i->iov_offset = 0; i->count = count; + i->start_idx = i->idx; } EXPORT_SYMBOL(iov_iter_pipe); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1ebc93e179f3..f3c4f9d22821 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj, clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); - } else if (!memcmp("defer", buf, - min(sizeof("defer")-1, count))) { - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); - clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); - set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); } else if (!memcmp("defer+madvise", buf, min(sizeof("defer+madvise")-1, count))) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + } else if (!memcmp("defer", buf, + min(sizeof("defer")-1, count))) { + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags); + clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags); + set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags); } else if (!memcmp("madvise", buf, min(sizeof("madvise")-1, count))) { clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags); @@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, deactivate_page(page); if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { - orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, - tlb->fullmm); + pmdp_invalidate(vma, addr, pmd); orig_pmd = pmd_mkold(orig_pmd); orig_pmd = pmd_mkclean(orig_pmd); @@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, { struct mm_struct *mm = vma->vm_mm; spinlock_t *ptl; - int ret = 0; + pmd_t entry; + bool preserve_write; + int ret; ptl = __pmd_trans_huge_lock(pmd, vma); - if (ptl) { - pmd_t entry; - bool preserve_write = prot_numa && pmd_write(*pmd); - ret = 1; + if (!ptl) + return 0; - /* - * Avoid trapping faults against the zero page. The read-only - * data is likely to be read-cached on the local CPU and - * local/remote hits to the zero page are not interesting. - */ - if (prot_numa && is_huge_zero_pmd(*pmd)) { - spin_unlock(ptl); - return ret; - } + preserve_write = prot_numa && pmd_write(*pmd); + ret = 1; - if (!prot_numa || !pmd_protnone(*pmd)) { - entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); - entry = pmd_modify(entry, newprot); - if (preserve_write) - entry = pmd_mk_savedwrite(entry); - ret = HPAGE_PMD_NR; - set_pmd_at(mm, addr, pmd, entry); - BUG_ON(vma_is_anonymous(vma) && !preserve_write && - pmd_write(entry)); - } - spin_unlock(ptl); - } + /* + * Avoid trapping faults against the zero page. The read-only + * data is likely to be read-cached on the local CPU and + * local/remote hits to the zero page are not interesting. + */ + if (prot_numa && is_huge_zero_pmd(*pmd)) + goto unlock; + + if (prot_numa && pmd_protnone(*pmd)) + goto unlock; + + /* + * In case prot_numa, we are under down_read(mmap_sem). It's critical + * to not clear pmd intermittently to avoid race with MADV_DONTNEED + * which is also under down_read(mmap_sem): + * + * CPU0: CPU1: + * change_huge_pmd(prot_numa=1) + * pmdp_huge_get_and_clear_notify() + * madvise_dontneed() + * zap_pmd_range() + * pmd_trans_huge(*pmd) == 0 (without ptl) + * // skip the pmd + * set_pmd_at(); + * // pmd is re-established + * + * The race makes MADV_DONTNEED miss the huge pmd and don't clear it + * which may break userspace. + * + * pmdp_invalidate() is required to make sure we don't miss + * dirty/young flags set by hardware. + */ + entry = *pmd; + pmdp_invalidate(vma, addr, pmd); + /* + * Recover dirty/young flags. It relies on pmdp_invalidate to not + * corrupt them. + */ + if (pmd_dirty(*pmd)) + entry = pmd_mkdirty(entry); + if (pmd_young(*pmd)) + entry = pmd_mkyoung(entry); + + entry = pmd_modify(entry, newprot); + if (preserve_write) + entry = pmd_mk_savedwrite(entry); + ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); + BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); +unlock: + spin_unlock(ptl); return ret; } diff --git a/mm/internal.h b/mm/internal.h index ccfc2a2969f4..266efaeaa370 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, enum ttu_flags; struct tlbflush_unmap_batch; + +/* + * only for MM internal work items which do not depend on + * any allocations or locks which might depend on allocations + */ +extern struct workqueue_struct *mm_percpu_wq; + #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); void try_to_unmap_flush_dirty(void); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 75b2745bac41..37d0b334bfe9 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); @@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(bm, nmask, nr_bits); + if (compat_get_bitmap(bm, nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, bm, alloc_size); + if (copy_to_user(nm, bm, alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_set_mempolicy(mode, nm, nr_bits+1); } @@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, compat_ulong_t, mode, compat_ulong_t __user *, nmask, compat_ulong_t, maxnode, compat_ulong_t, flags) { - long err = 0; unsigned long __user *nm = NULL; unsigned long nr_bits, alloc_size; nodemask_t bm; @@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) { - err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits); + if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits)) + return -EFAULT; nm = compat_alloc_user_space(alloc_size); - err |= copy_to_user(nm, nodes_addr(bm), alloc_size); + if (copy_to_user(nm, nodes_addr(bm), alloc_size)) + return -EFAULT; } - if (err) - return -EFAULT; - return sys_mbind(start, len, mode, nm, nr_bits+1, flags); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6cbde310abed..f3d603cef2c0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone) */ static cpumask_t cpus_with_pcps; + /* + * Make sure nobody triggers this path before mm_percpu_wq is fully + * initialized. + */ + if (WARN_ON_ONCE(!mm_percpu_wq)) + return; + /* Workqueues cannot recurse */ if (current->flags & PF_WQ_WORKER) return; @@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone) for_each_cpu(cpu, &cpus_with_pcps) { struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); INIT_WORK(work, drain_local_pages_wq); - schedule_work_on(cpu, work); + queue_work_on(cpu, mm_percpu_wq, work); } for_each_cpu(cpu, &cpus_with_pcps) flush_work(per_cpu_ptr(&pcpu_drain, cpu)); @@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) K(node_page_state(pgdat, NR_FILE_MAPPED)), K(node_page_state(pgdat, NR_FILE_DIRTY)), K(node_page_state(pgdat, NR_WRITEBACK)), + K(node_page_state(pgdat, NR_SHMEM)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR), K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), #endif - K(node_page_state(pgdat, NR_SHMEM)), K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), K(node_page_state(pgdat, NR_UNSTABLE_NFS)), node_page_state(pgdat, NR_PAGES_SCANNED), diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index c4c9def8ffea..de9c40d7304a 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); - /* Only for THP, seek to next pte entry makes sense */ - if (pvmw->pte) { - if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) - return not_found(pvmw); + if (pvmw->pte) goto next_pte; - } if (unlikely(PageHuge(pvmw->page))) { /* when pud is not present, pte will be NULL */ @@ -165,9 +161,14 @@ restart: while (1) { if (check_pte(pvmw)) return true; -next_pte: do { +next_pte: + /* Seek to next pte only makes sense for THP */ + if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) + return not_found(pvmw); + do { pvmw->address += PAGE_SIZE; - if (pvmw->address >= + if (pvmw->address >= pvmw->vma->vm_end || + pvmw->address >= __vma_address(pvmw->page, pvmw->vma) + hpage_nr_pages(pvmw->page) * PAGE_SIZE) return not_found(pvmw); diff --git a/mm/swap.c b/mm/swap.c index c4910f14f957..5dabf444d724 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); -/* - * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM - * workqueue, aiding in getting memory freed. - */ -static struct workqueue_struct *lru_add_drain_wq; - -static int __init lru_init(void) -{ - lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0); - - if (WARN(!lru_add_drain_wq, - "Failed to create workqueue lru_add_drain_wq")) - return -ENOMEM; - - return 0; -} -early_initcall(lru_init); - void lru_add_drain_all(void) { static DEFINE_MUTEX(lock); static struct cpumask has_work; int cpu; + /* + * Make sure nobody triggers this path before mm_percpu_wq is fully + * initialized. + */ + if (WARN_ON(!mm_percpu_wq)) + return; + mutex_lock(&lock); get_online_cpus(); cpumask_clear(&has_work); @@ -707,7 +696,7 @@ void lru_add_drain_all(void) pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || need_activate_page_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); - queue_work_on(cpu, lru_add_drain_wq, work); + queue_work_on(cpu, mm_percpu_wq, work); cpumask_set_cpu(cpu, &has_work); } } diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 310ac0b8f974..ac6318a064d3 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 89f95396ec46..809025ed97ea 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP -static struct workqueue_struct *vmstat_wq; static DEFINE_PER_CPU(struct delayed_work, vmstat_work); int sysctl_stat_interval __read_mostly = HZ; @@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w) * to occur in the future. Keep on running the * update worker thread. */ - queue_delayed_work_on(smp_processor_id(), vmstat_wq, + queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); } @@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w) struct delayed_work *dw = &per_cpu(vmstat_work, cpu); if (!delayed_work_pending(dw) && need_update(cpu)) - queue_delayed_work_on(cpu, vmstat_wq, dw, 0); + queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); } put_online_cpus(); @@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void) INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), vmstat_update); - vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); schedule_delayed_work(&shepherd, round_jiffies_relative(sysctl_stat_interval)); } @@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu) #endif +struct workqueue_struct *mm_percpu_wq; + void __init init_mm_internals(void) { -#ifdef CONFIG_SMP - int ret; + int ret __maybe_unused; + mm_percpu_wq = alloc_workqueue("mm_percpu_wq", + WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); + +#ifdef CONFIG_SMP ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", NULL, vmstat_cpu_dead); if (ret < 0) diff --git a/mm/z3fold.c b/mm/z3fold.c index f9492bccfd79..54f63c4a809a 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr) spin_lock(&zhdr->page_lock); } +/* Try to lock a z3fold page */ +static inline int z3fold_page_trylock(struct z3fold_header *zhdr) +{ + return spin_trylock(&zhdr->page_lock); +} + /* Unlock a z3fold page */ static inline void z3fold_page_unlock(struct z3fold_header *zhdr) { @@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, spin_lock(&pool->lock); zhdr = list_first_entry_or_null(&pool->unbuddied[i], struct z3fold_header, buddy); - if (!zhdr) { + if (!zhdr || !z3fold_page_trylock(zhdr)) { spin_unlock(&pool->lock); continue; } @@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, spin_unlock(&pool->lock); page = virt_to_page(zhdr); - z3fold_page_lock(zhdr); if (zhdr->first_chunks == 0) { if (zhdr->middle_chunks != 0 && chunks >= zhdr->start_middle) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7ee9c34dbd6..d41edd28298b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -276,7 +276,7 @@ struct zs_pool { struct zspage { struct { unsigned int fullness:FULLNESS_BITS; - unsigned int class:CLASS_BITS; + unsigned int class:CLASS_BITS + 1; unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index ea71513fca21..90f49a194249 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev) return err; } +static void br_dev_uninit(struct net_device *dev) +{ + struct net_bridge *br = netdev_priv(dev); + + br_multicast_uninit_stats(br); + br_vlan_flush(br); + free_percpu(br->stats); +} + static int br_dev_open(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); @@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = { .ndo_open = br_dev_open, .ndo_stop = br_dev_stop, .ndo_init = br_dev_init, + .ndo_uninit = br_dev_uninit, .ndo_start_xmit = br_dev_xmit, .ndo_get_stats64 = br_get_stats64, .ndo_set_mac_address = br_set_mac_address, @@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = { .ndo_features_check = passthru_features_check, }; -static void br_dev_free(struct net_device *dev) -{ - struct net_bridge *br = netdev_priv(dev); - - free_percpu(br->stats); - free_netdev(dev); -} - static struct device_type br_type = { .name = "bridge", }; @@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev) ether_setup(dev); dev->netdev_ops = &br_netdev_ops; - dev->destructor = br_dev_free; + dev->destructor = free_netdev; dev->ethtool_ops = &br_ethtool_ops; SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 8ac1770aa222..56a2a72e7738 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head) br_fdb_delete_by_port(br, NULL, 0, 1); - br_vlan_flush(br); br_multicast_dev_del(br); cancel_delayed_work_sync(&br->gc_work); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b760f2620abf..faa7261a992f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br) out: spin_unlock_bh(&br->multicast_lock); - - free_percpu(br->mcast_stats); } int br_multicast_set_router(struct net_bridge *br, unsigned long val) @@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br) return 0; } +void br_multicast_uninit_stats(struct net_bridge *br) +{ + free_percpu(br->mcast_stats); +} + static void mcast_stats_add_dir(u64 *dst, u64 *src) { dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index a8f6acd23e30..225ef7d53701 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, spin_unlock_bh(&br->lock); } - err = br_changelink(dev, tb, data); + err = register_netdevice(dev); if (err) return err; - return register_netdevice(dev); + err = br_changelink(dev, tb, data); + if (err) + unregister_netdevice(dev); + return err; } static size_t br_get_size(const struct net_device *brdev) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 61368186edea..0d177280aa84 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p, const struct sk_buff *skb, u8 type, u8 dir); int br_multicast_init_stats(struct net_bridge *br); +void br_multicast_uninit_stats(struct net_bridge *br); void br_multicast_get_stats(const struct net_bridge *br, const struct net_bridge_port *p, struct br_mcast_stats *dest); @@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br) return 0; } +static inline void br_multicast_uninit_stats(struct net_bridge *br) +{ +} + static inline int br_multicast_igmp_type(const struct sk_buff *skb) { return 0; diff --git a/net/core/datagram.c b/net/core/datagram.c index ea633342ab0d..f4947e737f34 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, struct iov_iter *to, int len) { int start = skb_headlen(skb); - int i, copy = start - offset; + int i, copy = start - offset, start_off = offset, n; struct sk_buff *frag_iter; trace_skb_copy_datagram_iovec(skb, len); @@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, if (copy > 0) { if (copy > len) copy = len; - if (copy_to_iter(skb->data + offset, copy, to) != copy) + n = copy_to_iter(skb->data + offset, copy, to); + offset += n; + if (n != copy) goto short_copy; if ((len -= copy) == 0) return 0; - offset += copy; } /* Copy paged appendix. Hmm... why does this look so complicated? */ @@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (copy_page_to_iter(skb_frag_page(frag), + n = copy_page_to_iter(skb_frag_page(frag), frag->page_offset + offset - - start, copy, to) != copy) + start, copy, to); + offset += n; + if (n != copy) goto short_copy; if (!(len -= copy)) return 0; - offset += copy; } start = end; } @@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, */ fault: + iov_iter_revert(to, offset - start_off); return -EFAULT; short_copy: @@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, __wsum *csump) { int start = skb_headlen(skb); - int i, copy = start - offset; + int i, copy = start - offset, start_off = offset; struct sk_buff *frag_iter; int pos = 0; int n; @@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, if (copy > len) copy = len; n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to); + offset += n; if (n != copy) goto fault; if ((len -= copy) == 0) return 0; - offset += copy; pos = copy; } @@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, offset - start, copy, &csum2, to); kunmap(page); + offset += n; if (n != copy) goto fault; *csump = csum_block_add(*csump, csum2, pos); if (!(len -= copy)) return 0; - offset += copy; pos += copy; } start = end; @@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, return 0; fault: + iov_iter_revert(to, offset - start_off); return -EFAULT; } @@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, } return 0; csum_error: + iov_iter_revert(&msg->msg_iter, chunk); return -EINVAL; fault: return -EFAULT; diff --git a/net/core/dev.c b/net/core/dev.c index 7869ae3837ca..533a6d6f6092 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags) return err; } -EXPORT_SYMBOL(dev_change_xdp_fd); /** * dev_new_index - allocate an ifindex diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index c35aae13c8d2..d98d4998213d 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -390,7 +390,7 @@ mpls: unsigned char ar_tip[4]; } *arp_eth, _arp_eth; const struct arphdr *arp; - struct arphdr *_arp; + struct arphdr _arp; arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, hlen, &_arp); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e7c12caa20c8..4526cbd7e28a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh) if (skb) skb = skb_clone(skb, GFP_ATOMIC); write_unlock(&neigh->lock); - neigh->ops->solicit(neigh, skb); + if (neigh->ops->solicit) + neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); kfree_skb(skb); } diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 758f140b6bed..d28da7d363f1 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -20,9 +20,11 @@ #include <net/tcp.h> static siphash_key_t net_secret __read_mostly; +static siphash_key_t ts_secret __read_mostly; static __always_inline void net_secret_init(void) { + net_get_random_once(&ts_secret, sizeof(ts_secret)); net_get_random_once(&net_secret, sizeof(net_secret)); } #endif @@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq) #endif #if IS_ENABLED(CONFIG_IPV6) +static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr) +{ + const struct { + struct in6_addr saddr; + struct in6_addr daddr; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .saddr = *(struct in6_addr *)saddr, + .daddr = *(struct in6_addr *)daddr, + }; + + if (sysctl_tcp_timestamps != 1) + return 0; + + return siphash(&combined, offsetofend(typeof(combined), daddr), + &ts_secret); +} + u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport, u32 *tsoff) { @@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, net_secret_init(); hash = siphash(&combined, offsetofend(typeof(combined), dport), &net_secret); - *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + *tsoff = secure_tcpv6_ts_off(saddr, daddr); return seq_scale(hash); } EXPORT_SYMBOL(secure_tcpv6_sequence_number); @@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #endif #ifdef CONFIG_INET +static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr) +{ + if (sysctl_tcp_timestamps != 1) + return 0; + + return siphash_2u32((__force u32)saddr, (__force u32)daddr, + &ts_secret); +} /* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d), * but fortunately, `sport' cannot be 0 in any circumstances. If this changes, @@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, hash = siphash_3u32((__force u32)saddr, (__force u32)daddr, (__force u32)sport << 16 | (__force u32)dport, &net_secret); - *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0; + *tsoff = secure_tcp_ts_off(saddr, daddr); return seq_scale(hash); } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 4ead336e14ea..7f9cc400eca0 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = { .data = &sysctl_net_busy_poll, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, { .procname = "busy_read", .data = &sysctl_net_busy_read, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, }, #endif #ifdef CONFIG_NET_SCHED diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index fd9f34bbd740..dfb2ab2dd3c8 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -306,7 +306,7 @@ static void __init ic_close_devs(void) while ((d = next)) { next = d->next; dev = d->dev; - if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) { + if (d != ic_dev && !netdev_uses_dsa(dev)) { pr_debug("IP-Config: Downing %s\n", dev->name); dev_change_flags(dev, d->flags); } diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 52f26459efc3..9b8841316e7b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par) clusterip_config_put(cipinfo->config); - nf_ct_netns_get(par->net, par->family); + nf_ct_netns_put(par->net, par->family); } #ifdef CONFIG_COMPAT diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index c9b52c361da2..53e49f5011d3 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = { .timeout = 180, }; -static struct nf_conntrack_helper snmp_helper __read_mostly = { - .me = THIS_MODULE, - .help = help, - .expect_policy = &snmp_exp_policy, - .name = "snmp", - .tuple.src.l3num = AF_INET, - .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT), - .tuple.dst.protonum = IPPROTO_UDP, -}; - static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { .me = THIS_MODULE, .help = help, @@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = { static int __init nf_nat_snmp_basic_init(void) { - int ret = 0; - BUG_ON(nf_nat_snmp_hook != NULL); RCU_INIT_POINTER(nf_nat_snmp_hook, help); - ret = nf_conntrack_helper_register(&snmp_trap_helper); - if (ret < 0) { - nf_conntrack_helper_unregister(&snmp_helper); - return ret; - } - return ret; + return nf_conntrack_helper_register(&snmp_trap_helper); } static void __exit nf_nat_snmp_basic_fini(void) { RCU_INIT_POINTER(nf_nat_snmp_hook, NULL); + synchronize_rcu(); nf_conntrack_helper_unregister(&snmp_trap_helper); } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 2af6244b83e2..ccfbce13a633 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -156,17 +156,18 @@ int ping_hash(struct sock *sk) void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); + pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); + write_lock_bh(&ping_table.lock); if (sk_hashed(sk)) { - write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sk_nulls_node_init(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - write_unlock_bh(&ping_table.lock); } + write_unlock_bh(&ping_table.lock); } EXPORT_SYMBOL_GPL(ping_unhash); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8471dd116771..acd69cfe2951 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) skb_reset_network_header(skb); /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */ - ip_hdr(skb)->protocol = IPPROTO_ICMP; + ip_hdr(skb)->protocol = IPPROTO_UDP; skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1e319a525d51..40ba4249a586 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + tcp_saved_syn_free(tp); /* Clean up fastopen related fields */ tcp_free_fastopen_req(tp); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c43119726a62..659d1baefb2b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; #define REXMIT_LOST 1 /* retransmit packets marked lost */ #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ -static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) +static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, + unsigned int len) { static bool __once __read_mostly; @@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb) rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); - pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", - dev ? dev->name : "Unknown driver"); + if (!dev || len >= dev->mtu) + pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", + dev ? dev->name : "Unknown driver"); rcu_read_unlock(); } } @@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, tcp_sk(sk)->advmss); - if (unlikely(icsk->icsk_ack.rcv_mss != len)) - tcp_gro_dev_warn(sk, skb); + /* Account for possibly-removed options */ + if (unlikely(len > icsk->icsk_ack.rcv_mss + + MAX_TCP_OPTION_SPACE)) + tcp_gro_dev_warn(sk, skb, len); } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. @@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); - if (metric > tp->reordering) { - int mib_idx; + int mib_idx; + if (metric > tp->reordering) { tp->reordering = min(sysctl_tcp_max_reordering, metric); - /* This exciting event is worth to be remembered. 8) */ - if (ts) - mib_idx = LINUX_MIB_TCPTSREORDER; - else if (tcp_is_reno(tp)) - mib_idx = LINUX_MIB_TCPRENOREORDER; - else if (tcp_is_fack(tp)) - mib_idx = LINUX_MIB_TCPFACKREORDER; - else - mib_idx = LINUX_MIB_TCPSACKREORDER; - - NET_INC_STATS(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, @@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric, } tp->rack.reord = 1; + + /* This exciting event is worth to be remembered. 8) */ + if (ts) + mib_idx = LINUX_MIB_TCPTSREORDER; + else if (tcp_is_reno(tp)) + mib_idx = LINUX_MIB_TCPRENOREORDER; + else if (tcp_is_fack(tp)) + mib_idx = LINUX_MIB_TCPFACKREORDER; + else + mib_idx = LINUX_MIB_TCPSACKREORDER; + + NET_INC_STATS(sock_net(sk), mib_idx); } /* This must be called before lost_out is incremented */ @@ -1930,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct sk_buff *skb; + bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ bool mark_lost; @@ -1989,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk) tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); - /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO - * if a previous recovery is underway, otherwise it may incorrectly - * call a timeout spurious if some previously retransmitted packets - * are s/acked (sec 3.2). We do not apply that retriction since - * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS - * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO - * on PTMU discovery to avoid sending new data. + /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous + * loss recovery is underway except recurring timeout(s) on + * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing + * + * In theory F-RTO can be used repeatedly during loss recovery. + * In practice this interacts badly with broken middle-boxes that + * falsely raise the receive window, which results in repeated + * timeouts and stop-and-go behavior. */ - tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size; + tp->frto = sysctl_tcp_frto && + (new_recovery || icsk->icsk_retransmits) && + !inet_csk(sk)->icsk_mtup.probe_size; } /* If ACK arrived pointing to a remembered SACK, it means that our diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 22548b5f05cb..c3c082ed3879 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) { struct sk_buff *skb; + TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); + /* NOTE: No TCP options attached and we never retransmit this. */ skb = alloc_skb(MAX_TCP_HEADER, priority); if (!skb) { @@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) /* Send it off. */ if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); - - TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); } /* Send a crossed SYN-ACK during socket establishment. diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index 4ecb38ae8504..d8acbd9f477a 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) /* Account for retransmits that are lost again */ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, + tcp_skb_pcount(skb)); } } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 363172527e43..80ce478c4851 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3626,14 +3626,19 @@ restart: INIT_LIST_HEAD(&del_list); list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { struct rt6_info *rt = NULL; + bool keep; addrconf_del_dad_work(ifa); + keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) && + !addr_is_local(&ifa->addr); + if (!keep) + list_move(&ifa->if_list, &del_list); + write_unlock_bh(&idev->lock); spin_lock_bh(&ifa->lock); - if (keep_addr && (ifa->flags & IFA_F_PERMANENT) && - !addr_is_local(&ifa->addr)) { + if (keep) { /* set state to skip the notifier below */ state = INET6_IFADDR_STATE_DEAD; ifa->state = 0; @@ -3645,8 +3650,6 @@ restart: } else { state = ifa->state; ifa->state = INET6_IFADDR_STATE_DEAD; - - list_move(&ifa->if_list, &del_list); } spin_unlock_bh(&ifa->lock); diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 309062f3debe..31762f76cdb5 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct kcm_attach info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_attach_ioctl(sock, &info); @@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct kcm_unattach info; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_unattach_ioctl(sock, &info); @@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct socket *newsock = NULL; if (copy_from_user(&info, (void __user *)arg, sizeof(info))) - err = -EFAULT; + return -EFAULT; err = kcm_clone(sock, &info, &newsock); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 8adab6335ced..e37d9554da7b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn } EXPORT_SYMBOL_GPL(l2tp_session_find); -struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) +/* Like l2tp_session_find() but takes a reference on the returned session. + * Optionally calls session->ref() too if do_ref is true. + */ +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref) +{ + struct hlist_head *session_list; + struct l2tp_session *session; + + if (!tunnel) { + struct l2tp_net *pn = l2tp_pernet(net); + + session_list = l2tp_session_id_hash_2(pn, session_id); + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, global_hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + rcu_read_unlock_bh(); + + return session; + } + } + rcu_read_unlock_bh(); + + return NULL; + } + + session_list = l2tp_session_id_hash(tunnel, session_id); + read_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session, session_list, hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + + return session; + } + } + read_unlock_bh(&tunnel->hlist_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_session_get); + +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref) { int hash; struct l2tp_session *session; @@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { if (++count > nth) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); read_unlock_bh(&tunnel->hlist_lock); return session; } @@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_nth); +EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); int hash; @@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); rcu_read_unlock_bh(); + return session; } } @@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); + +static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, + struct l2tp_session *session) +{ + struct l2tp_session *session_walk; + struct hlist_head *g_head; + struct hlist_head *head; + struct l2tp_net *pn; + + head = l2tp_session_id_hash(tunnel, session->session_id); + + write_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session_walk, head, hlist) + if (session_walk->session_id == session->session_id) + goto exist; + + if (tunnel->version == L2TP_HDR_VER_3) { + pn = l2tp_pernet(tunnel->l2tp_net); + g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), + session->session_id); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_for_each_entry(session_walk, g_head, global_hlist) + if (session_walk->session_id == session->session_id) + goto exist_glob; + + hlist_add_head_rcu(&session->global_hlist, g_head); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + } + + hlist_add_head(&session->hlist, head); + write_unlock_bh(&tunnel->hlist_lock); + + return 0; + +exist_glob: + spin_unlock_bh(&pn->l2tp_session_hlist_lock); +exist: + write_unlock_bh(&tunnel->hlist_lock); + + return -EEXIST; +} /* Lookup a tunnel by id */ @@ -633,6 +733,9 @@ discard: * a data (not control) frame before coming here. Fields up to the * session-id have already been parsed and ptr points to the data * after the session-id. + * + * session->ref() must have been called prior to l2tp_recv_common(). + * session->deref() will be called automatically after skb is processed. */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, @@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int offset; u32 ns, nr; - /* The ref count is increased since we now hold a pointer to - * the session. Take care to decrement the refcnt when exiting - * this function from now on... - */ - l2tp_session_inc_refcount(session); - if (session->ref) - (*session->ref)(session); - /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { @@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Try to dequeue as many skbs from reorder_q as we can. */ l2tp_recv_dequeue(session); - l2tp_session_dec_refcount(session); - return; discard: @@ -812,8 +905,6 @@ discard: if (session->deref) (*session->deref)(session); - - l2tp_session_dec_refcount(session); } EXPORT_SYMBOL(l2tp_recv_common); @@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } /* Find the session context */ - session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); + session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true); if (!session || !session->recv_skb) { + if (session) { + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } + /* Not found? Pass to userspace to deal with */ l2tp_info(tunnel, L2TP_MSG_DATA, "%s: no session found (%u/%u). Passing up.\n", @@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; + int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); + err = l2tp_session_add_to_tunnel(tunnel, session); + if (err) { + kfree(session); + + return ERR_PTR(err); + } + /* Bump the reference count. The session context is deleted * only when this drops to zero. */ @@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn /* Ensure tunnel socket isn't deleted */ sock_hold(tunnel->sock); - /* Add session to the tunnel's hash list */ - write_lock_bh(&tunnel->hlist_lock); - hlist_add_head(&session->hlist, - l2tp_session_id_hash(tunnel, session_id)); - write_unlock_bh(&tunnel->hlist_lock); - - /* And to the global session list if L2TPv3 */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_add_head_rcu(&session->global_hlist, - l2tp_session_id_hash_2(pn, session_id)); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - } - /* Ignore management session in session count value */ if (session->session_id != 0) atomic_inc(&l2tp_session_count); + + return session; } - return session; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(l2tp_session_create); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index aebf281d09ee..8ce7818c7a9d 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -230,11 +230,16 @@ out: return tunnel; } +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref); struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); -struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, + bool do_ref); +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref); struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 2d6760a2ae34..d100aed3d06f 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v) } /* Show the tunnel or session context */ - if (pd->session == NULL) + if (!pd->session) { l2tp_dfs_seq_tunnel_show(m, pd->tunnel); - else + } else { l2tp_dfs_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 8bf18a5f66e0..6fd41d7afe1e 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p goto out; } - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - rc = -EEXIST; - goto out; - } - if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { @@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); - if (!session) { - rc = -ENOMEM; + if (IS_ERR(session)) { + rc = PTR_ERR(session); goto out; } diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index d25038cfd64e..4d322c1b7233 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -178,9 +179,10 @@ pass_up: tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); @@ -202,6 +204,12 @@ pass_up: return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a4abcbc4c09a..88b397c30d86 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); + return 0; pass_up: @@ -191,9 +193,10 @@ pass_up: tunnel_id = ntohl(*(__be32 *) &skb->data[4]); tunnel = l2tp_tunnel_find(net, tunnel_id); - if (tunnel != NULL) + if (tunnel) { sk = tunnel->sock; - else { + sock_hold(sk); + } else { struct ipv6hdr *iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); @@ -215,6 +218,12 @@ pass_up: return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 3620fba31786..7e3e669baac4 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + bool do_ref) { u32 tunnel_id; u32 session_id; @@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - session = l2tp_session_find_by_ifname(net, ifname); + session = l2tp_session_get_by_ifname(net, ifname, do_ref); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) - session = l2tp_session_find(net, tunnel, session_id); + session = l2tp_session_get(net, tunnel, session_id, + do_ref); } return session; @@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf session_id, peer_session_id, &cfg); if (ret >= 0) { - session = l2tp_session_find(net, tunnel, session_id); - if (session) + session = l2tp_session_get(net, tunnel, session_id, false); + if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); + l2tp_session_dec_refcount(session); + } } out: @@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; u16 pw_type; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, true); if (session == NULL) { ret = -ENODEV; goto out; @@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf int ret = 0; struct l2tp_session *session; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; goto out; @@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int ret; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; - goto out; + goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err_ref; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session, L2TP_CMD_SESSION_GET); if (ret < 0) - goto err_out; + goto err_ref_msg; - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); -err_out: - nlmsg_free(msg); + l2tp_session_dec_refcount(session); -out: + return ret; + +err_ref_msg: + nlmsg_free(msg); +err_ref: + l2tp_session_dec_refcount(session); +err: return ret; } @@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback goto out; } - session = l2tp_session_find_nth(tunnel, si); + session = l2tp_session_get_nth(tunnel, si, false); if (session == NULL) { ti++; tunnel = NULL; @@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - session, L2TP_CMD_SESSION_GET) < 0) + session, L2TP_CMD_SESSION_GET) < 0) { + l2tp_session_dec_refcount(session); break; + } + l2tp_session_dec_refcount(session); si++; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 36cc56fd0418..32ea0f3d868c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session) static void pppol2tp_session_destruct(struct sock *sk) { struct l2tp_session *session = sk->sk_user_data; + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + if (session) { sk->sk_user_data = NULL; BUG_ON(session->magic != L2TP_SESSION_MAGIC); @@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock) l2tp_session_queue_purge(session); sock_put(sk); } - skb_queue_purge(&sk->sk_receive_queue); - skb_queue_purge(&sk->sk_write_queue); - release_sock(sk); /* This will delete the session context via @@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; + bool drop_refcnt = false; int ver = 2; int fd; @@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = peer_tunnel_id; - /* Create session if it doesn't already exist. We handle the - * case where a session was previously created by the netlink - * interface by checking that the session doesn't already have - * a socket and its tunnel socket are what we expect. If any - * of those checks fail, return EEXIST to the caller. - */ - session = l2tp_session_find(sock_net(sk), tunnel, session_id); - if (session == NULL) { - /* Default MTU must allow space for UDP/L2TP/PPP - * headers. + session = l2tp_session_get(sock_net(sk), tunnel, session_id, false); + if (session) { + drop_refcnt = true; + ps = l2tp_session_priv(session); + + /* Using a pre-existing session is fine as long as it hasn't + * been connected yet. */ - cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; + if (ps->sock) { + error = -EEXIST; + goto end; + } - /* Allocate and initialize a new session context. */ - session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); - if (session == NULL) { - error = -ENOMEM; + /* consistency checks */ + if (ps->tunnel_sock != tunnel->sock) { + error = -EEXIST; goto end; } } else { - ps = l2tp_session_priv(session); - error = -EEXIST; - if (ps->sock != NULL) - goto end; + /* Default MTU must allow space for UDP/L2TP/PPP headers */ + cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; + cfg.mru = cfg.mtu; - /* consistency checks */ - if (ps->tunnel_sock != tunnel->sock) + session = l2tp_session_create(sizeof(struct pppol2tp_session), + tunnel, session_id, + peer_session_id, &cfg); + if (IS_ERR(session)) { + error = PTR_ERR(session); goto end; + } } /* Associate session with its PPPoL2TP socket */ @@ -777,6 +779,8 @@ out_no_ppp: session->name); end: + if (drop_refcnt) + l2tp_session_dec_refcount(session); release_sock(sk); return error; @@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i if (tunnel->sock == NULL) goto out; - /* Check that this session doesn't already exist */ - error = -EEXIST; - session = l2tp_session_find(net, tunnel, session_id); - if (session != NULL) - goto out; - /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; @@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ - error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); - if (session == NULL) + if (IS_ERR(session)) { + error = PTR_ERR(session); goto out; + } ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; @@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = - l2tp_session_find(sock_net(sk), tunnel, stats.session_id); - if (session != NULL) - err = pppol2tp_session_ioctl(session, cmd, arg); - else + l2tp_session_get(sock_net(sk), tunnel, + stats.session_id, true); + + if (session) { + err = pppol2tp_session_ioctl(session, cmd, + arg); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } else { err = -EBADR; + } break; } #ifdef CONFIG_XFRM @@ -1377,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, } else err = pppol2tp_session_setsockopt(sk, session, optname, val); - err = 0; - end_put_sess: sock_put(sk); end: @@ -1501,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); sock_put(ps->tunnel_sock); - } else + if (err) + goto end_put_sess; + } else { err = pppol2tp_session_getsockopt(sk, session, optname, &val); + if (err) + goto end_put_sess; + } err = -EFAULT; if (put_user(len, optlen)) @@ -1554,7 +1563,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) { - pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx); + pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true); pd->session_idx++; if (pd->session == NULL) { @@ -1681,10 +1690,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) /* Show the tunnel or session context. */ - if (pd->session == NULL) + if (!pd->session) { pppol2tp_seq_tunnel_show(m, pd->tunnel); - else + } else { pppol2tp_seq_session_show(m, pd->session); + if (pd->session->deref) + pd->session->deref(pd->session); + l2tp_session_dec_refcount(pd->session); + } out: return 0; @@ -1843,4 +1856,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP"); MODULE_LICENSE("GPL"); MODULE_VERSION(PPPOL2TP_DRV_VERSION); MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP); -MODULE_ALIAS_L2TP_PWTYPE(11); +MODULE_ALIAS_L2TP_PWTYPE(7); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 40813dd3301c..5bb0c5012819 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) ieee80211_recalc_ps(local); if (sdata->vif.type == NL80211_IFTYPE_MONITOR || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + local->ops->wake_tx_queue) { /* XXX: for AP_VLAN, actually track AP queues */ netif_tx_start_all_queues(dev); } else if (dev) { diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c index da9df2d56e66..22fc32143e9c 100644 --- a/net/netfilter/nf_conntrack_ecache.c +++ b/net/netfilter/nf_conntrack_ecache.c @@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net, BUG_ON(notify != new); RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); + /* synchronize_rcu() is called from ctnetlink_exit. */ } EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); @@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net, BUG_ON(notify != new); RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); mutex_unlock(&nf_ct_ecache_mutex); + /* synchronize_rcu() is called from ctnetlink_exit. */ } EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 4b2e1fb28bb4..d80073037856 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, hlist_del_rcu(&exp->hnode); net->ct.expect_count--; - hlist_del(&exp->lnode); + hlist_del_rcu(&exp->lnode); master_help->expecting[exp->class]--; nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report); @@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) /* two references : one for hash insert, one for the timer */ atomic_add(2, &exp->use); - hlist_add_head(&exp->lnode, &master_help->expectations); + hlist_add_head_rcu(&exp->lnode, &master_help->expectations); master_help->expecting[exp->class]++; hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 02bcf00c2492..008299b7f78f 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } + off = ALIGN(sizeof(struct nf_ct_ext), t->align); len = off + t->len + var_alloc_len; alloc_size = t->alloc_size + var_alloc_len; @@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id, rcu_read_lock(); t = rcu_dereference(nf_ct_ext_types[id]); - BUG_ON(t == NULL); + if (!t) { + rcu_read_unlock(); + return NULL; + } newoff = ALIGN(old->len, t->align); newlen = newoff + t->len + var_alloc_len; @@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type) RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL); update_alloc_size(type); mutex_unlock(&nf_ct_ext_type_mutex); - rcu_barrier(); /* Wait for completion of call_rcu()'s */ + synchronize_rcu(); } EXPORT_SYMBOL_GPL(nf_ct_extend_unregister); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 6dc44d9b4190..4eeb3418366a 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum) { struct nf_conntrack_helper *h; + rcu_read_lock(); + h = __nf_conntrack_helper_find(name, l3num, protonum); #ifdef CONFIG_MODULES if (h == NULL) { - if (request_module("nfct-helper-%s", name) == 0) + rcu_read_unlock(); + if (request_module("nfct-helper-%s", name) == 0) { + rcu_read_lock(); h = __nf_conntrack_helper_find(name, l3num, protonum); + } else { + return h; + } } #endif if (h != NULL && !try_module_get(h->me)) h = NULL; + rcu_read_unlock(); + return h; } EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); @@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n) } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister); +/* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_name(const char *name) { struct nf_ct_helper_expectfn *cur; bool found = false; - rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (!strcmp(cur->name, name)) { found = true; break; } } - rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name); +/* Caller should hold the rcu lock */ struct nf_ct_helper_expectfn * nf_ct_helper_expectfn_find_by_symbol(const void *symbol) { struct nf_ct_helper_expectfn *cur; bool found = false; - rcu_read_lock(); list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) { if (cur->expectfn == symbol) { found = true; break; } } - rcu_read_unlock(); return found ? cur : NULL; } EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 6806b5e73567..dc7dfd68fafe 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct, * treat the second attempt as a no-op instead of returning * an error. */ - if (help && help->helper && - !strcmp(help->helper->name, helpname)) - return 0; - else - return -EBUSY; + err = -EBUSY; + if (help) { + rcu_read_lock(); + helper = rcu_dereference(help->helper); + if (helper && !strcmp(helper->name, helpname)) + err = 0; + rcu_read_unlock(); + } + + return err; } if (!strcmp(helpname, "")) { @@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl, err = 0; if (test_bit(IPS_EXPECTED_BIT, &ct->status)) - events = IPCT_RELATED; + events = 1 << IPCT_RELATED; else - events = IPCT_NEW; + events = 1 << IPCT_NEW; if (cda[CTA_LABELS] && ctnetlink_attach_labels(ct, cda) == 0) @@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: - hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]], - hnode) { + hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], + hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; @@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); last = (struct nf_conntrack_expect *)cb->args[1]; restart: - hlist_for_each_entry(exp, &help->expectations, lnode) { + hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (cb->args[1]) { @@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); + /* No expectation linked to this connection tracking. */ + if (!nfct_help(ct)) { + nf_ct_put(ct); + return 0; + } + c.data = ct; err = netlink_dump_start(ctnl, skb, nlh, &c); @@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net, return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); + rcu_read_lock(); if (cda[CTA_EXPECT_HELP_NAME]) { const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper == NULL) { + rcu_read_unlock(); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { err = -EOPNOTSUPP; goto err_ct; } + rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; - goto err_ct; + goto err_rcu; } + rcu_read_unlock(); #endif err = -EOPNOTSUPP; goto err_ct; @@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net, exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); if (IS_ERR(exp)) { err = PTR_ERR(exp); - goto err_ct; + goto err_rcu; } err = nf_ct_expect_related_report(exp, portid, report); nf_ct_expect_put(exp); +err_rcu: + rcu_read_unlock(); err_ct: nf_ct_put(ct); return err; @@ -3442,6 +3459,7 @@ static void __exit ctnetlink_exit(void) #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT RCU_INIT_POINTER(nfnl_ct_hook, NULL); #endif + synchronize_rcu(); } module_init(ctnetlink_init); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 94b14c5a8b17..82802e4a6640 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void) #ifdef CONFIG_XFRM RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL); #endif + synchronize_rcu(); + for (i = 0; i < NFPROTO_NUMPROTO; i++) kfree(nf_nat_l4protos[i]); diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c index d43869879fcf..86067560a318 100644 --- a/net/netfilter/nf_nat_redirect.c +++ b/net/netfilter/nf_nat_redirect.c @@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, rcu_read_lock(); idev = __in6_dev_get(skb->dev); if (idev != NULL) { + read_lock_bh(&idev->lock); list_for_each_entry(ifa, &idev->addr_list, if_list) { newdst = ifa->addr; addr = true; break; } + read_unlock_bh(&idev->lock); } rcu_read_unlock(); diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index de8782345c86..d45558178da5 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c @@ -32,6 +32,13 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers"); +struct nfnl_cthelper { + struct list_head list; + struct nf_conntrack_helper helper; +}; + +static LIST_HEAD(nfnl_cthelper_list); + static int nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) @@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, int i, ret; struct nf_conntrack_expect_policy *expect_policy; struct nlattr *tb[NFCTH_POLICY_SET_MAX+1]; + unsigned int class_max; ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, nfnl_cthelper_expect_policy_set); @@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, if (!tb[NFCTH_POLICY_SET_NUM]) return -EINVAL; - helper->expect_class_max = - ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); - - if (helper->expect_class_max != 0 && - helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES) + class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); + if (class_max == 0) + return -EINVAL; + if (class_max > NF_CT_MAX_EXPECT_CLASSES) return -EOVERFLOW; expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) * - helper->expect_class_max, GFP_KERNEL); + class_max, GFP_KERNEL); if (expect_policy == NULL) return -ENOMEM; - for (i=0; i<helper->expect_class_max; i++) { + for (i = 0; i < class_max; i++) { if (!tb[NFCTH_POLICY_SET+i]) goto err; @@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper, if (ret < 0) goto err; } + + helper->expect_class_max = class_max - 1; helper->expect_policy = expect_policy; return 0; err: @@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[], struct nf_conntrack_tuple *tuple) { struct nf_conntrack_helper *helper; + struct nfnl_cthelper *nfcth; int ret; if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN]) return -EINVAL; - helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL); - if (helper == NULL) + nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL); + if (nfcth == NULL) return -ENOMEM; + helper = &nfcth->helper; ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) - goto err; + goto err1; strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN); helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); @@ -245,15 +256,101 @@ nfnl_cthelper_create(const struct nlattr * const tb[], ret = nf_conntrack_helper_register(helper); if (ret < 0) - goto err; + goto err2; + list_add_tail(&nfcth->list, &nfnl_cthelper_list); return 0; -err: - kfree(helper); +err2: + kfree(helper->expect_policy); +err1: + kfree(nfcth); return ret; } static int +nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy, + struct nf_conntrack_expect_policy *new_policy, + const struct nlattr *attr) +{ + struct nlattr *tb[NFCTH_POLICY_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, + nfnl_cthelper_expect_pol); + if (err < 0) + return err; + + if (!tb[NFCTH_POLICY_NAME] || + !tb[NFCTH_POLICY_EXPECT_MAX] || + !tb[NFCTH_POLICY_EXPECT_TIMEOUT]) + return -EINVAL; + + if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name)) + return -EBUSY; + + new_policy->max_expected = + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX])); + new_policy->timeout = + ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT])); + + return 0; +} + +static int nfnl_cthelper_update_policy_all(struct nlattr *tb[], + struct nf_conntrack_helper *helper) +{ + struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1]; + struct nf_conntrack_expect_policy *policy; + int i, err; + + /* Check first that all policy attributes are well-formed, so we don't + * leave things in inconsistent state on errors. + */ + for (i = 0; i < helper->expect_class_max + 1; i++) { + + if (!tb[NFCTH_POLICY_SET + i]) + return -EINVAL; + + err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i], + &new_policy[i], + tb[NFCTH_POLICY_SET + i]); + if (err < 0) + return err; + } + /* Now we can safely update them. */ + for (i = 0; i < helper->expect_class_max + 1; i++) { + policy = (struct nf_conntrack_expect_policy *) + &helper->expect_policy[i]; + policy->max_expected = new_policy->max_expected; + policy->timeout = new_policy->timeout; + } + + return 0; +} + +static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper, + const struct nlattr *attr) +{ + struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1]; + unsigned int class_max; + int err; + + err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr, + nfnl_cthelper_expect_policy_set); + if (err < 0) + return err; + + if (!tb[NFCTH_POLICY_SET_NUM]) + return -EINVAL; + + class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM])); + if (helper->expect_class_max + 1 != class_max) + return -EBUSY; + + return nfnl_cthelper_update_policy_all(tb, helper); +} + +static int nfnl_cthelper_update(const struct nlattr * const tb[], struct nf_conntrack_helper *helper) { @@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[], return -EBUSY; if (tb[NFCTH_POLICY]) { - ret = nfnl_cthelper_parse_expect_policy(helper, - tb[NFCTH_POLICY]); + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); if (ret < 0) return ret; } @@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, const char *helper_name; struct nf_conntrack_helper *cur, *helper = NULL; struct nf_conntrack_tuple tuple; - int ret = 0, i; + struct nfnl_cthelper *nlcth; + int ret = 0; if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) return -EINVAL; @@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, if (ret < 0) return ret; - rcu_read_lock(); - for (i = 0; i < nf_ct_helper_hsize && !helper; i++) { - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - if (strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) - continue; + if ((tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - if ((tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; + if (nlh->nlmsg_flags & NLM_F_EXCL) + return -EEXIST; - if (nlh->nlmsg_flags & NLM_F_EXCL) { - ret = -EEXIST; - goto err; - } - helper = cur; - break; - } + helper = cur; + break; } - rcu_read_unlock(); if (helper == NULL) ret = nfnl_cthelper_create(tb, &tuple); @@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, ret = nfnl_cthelper_update(tb, helper); return ret; -err: - rcu_read_unlock(); - return ret; } static int @@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb, goto nla_put_failure; if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM, - htonl(helper->expect_class_max))) + htonl(helper->expect_class_max + 1))) goto nla_put_failure; - for (i=0; i<helper->expect_class_max; i++) { + for (i = 0; i < helper->expect_class_max + 1; i++) { nest_parms2 = nla_nest_start(skb, (NFCTH_POLICY_SET+i) | NLA_F_NESTED); if (nest_parms2 == NULL) @@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) { - int ret = -ENOENT, i; + int ret = -ENOENT; struct nf_conntrack_helper *cur; struct sk_buff *skb2; char *helper_name = NULL; struct nf_conntrack_tuple tuple; + struct nfnl_cthelper *nlcth; bool tuple_set = false; if (nlh->nlmsg_flags & NLM_F_DUMP) { @@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, tuple_set = true; } - for (i = 0; i < nf_ct_helper_hsize; i++) { - hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) { + list_for_each_entry(nlcth, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; + if (helper_name && + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + if (tuple_set && + (tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - if (helper_name && strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) { - continue; - } - if (tuple_set && - (tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; - - skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (skb2 == NULL) { - ret = -ENOMEM; - break; - } + skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (skb2 == NULL) { + ret = -ENOMEM; + break; + } - ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, - nlh->nlmsg_seq, - NFNL_MSG_TYPE(nlh->nlmsg_type), - NFNL_MSG_CTHELPER_NEW, cur); - if (ret <= 0) { - kfree_skb(skb2); - break; - } + ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid, + nlh->nlmsg_seq, + NFNL_MSG_TYPE(nlh->nlmsg_type), + NFNL_MSG_CTHELPER_NEW, cur); + if (ret <= 0) { + kfree_skb(skb2); + break; + } - ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, - MSG_DONTWAIT); - if (ret > 0) - ret = 0; + ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid, + MSG_DONTWAIT); + if (ret > 0) + ret = 0; - /* this avoids a loop in nfnetlink. */ - return ret == -EAGAIN ? -ENOBUFS : ret; - } + /* this avoids a loop in nfnetlink. */ + return ret == -EAGAIN ? -ENOBUFS : ret; } return ret; } @@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, { char *helper_name = NULL; struct nf_conntrack_helper *cur; - struct hlist_node *tmp; struct nf_conntrack_tuple tuple; bool tuple_set = false, found = false; - int i, j = 0, ret; + struct nfnl_cthelper *nlcth, *n; + int j = 0, ret; if (tb[NFCTH_NAME]) helper_name = nla_data(tb[NFCTH_NAME]); @@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, tuple_set = true; } - for (i = 0; i < nf_ct_helper_hsize; i++) { - hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], - hnode) { - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; + j++; - j++; + if (helper_name && + strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN)) + continue; - if (helper_name && strncmp(cur->name, helper_name, - NF_CT_HELPER_NAME_LEN) != 0) { - continue; - } - if (tuple_set && - (tuple.src.l3num != cur->tuple.src.l3num || - tuple.dst.protonum != cur->tuple.dst.protonum)) - continue; + if (tuple_set && + (tuple.src.l3num != cur->tuple.src.l3num || + tuple.dst.protonum != cur->tuple.dst.protonum)) + continue; - found = true; - nf_conntrack_helper_unregister(cur); - } + found = true; + nf_conntrack_helper_unregister(cur); + kfree(cur->expect_policy); + + list_del(&nlcth->list); + kfree(nlcth); } + /* Make sure we return success if we flush and there is no helpers */ return (found || j == 0) ? 0 : -ENOENT; } @@ -662,20 +741,16 @@ err_out: static void __exit nfnl_cthelper_exit(void) { struct nf_conntrack_helper *cur; - struct hlist_node *tmp; - int i; + struct nfnl_cthelper *nlcth, *n; nfnetlink_subsys_unregister(&nfnl_cthelper_subsys); - for (i=0; i<nf_ct_helper_hsize; i++) { - hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i], - hnode) { - /* skip non-userspace conntrack helpers. */ - if (!(cur->flags & NF_CT_HELPER_F_USERSPACE)) - continue; + list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { + cur = &nlcth->helper; - nf_conntrack_helper_unregister(cur); - } + nf_conntrack_helper_unregister(cur); + kfree(cur->expect_policy); + kfree(nlcth); } } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 139e0867e56e..47d6656c9119 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void) #ifdef CONFIG_NF_CONNTRACK_TIMEOUT RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL); RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL); + synchronize_rcu(); #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ - rcu_barrier(); } module_init(cttimeout_init); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 3ee0b8a000a4..933509ebf3d3 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { skb_tx_error(entskb); - return NULL; + goto nlmsg_failure; } nlh = nlmsg_put(skb, 0, 0, @@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, if (!nlh) { skb_tx_error(entskb); kfree_skb(skb); - return NULL; + goto nlmsg_failure; } nfmsg = nlmsg_data(nlh); nfmsg->nfgen_family = entry->state.pf; @@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, } nlh->nlmsg_len = skb->len; + if (seclen) + security_release_secctx(secdata, seclen); return skb; nla_put_failure: skb_tx_error(entskb); kfree_skb(skb); net_err_ratelimited("nf_queue: error creating packet message\n"); +nlmsg_failure: + if (seclen) + security_release_secctx(secdata, seclen); return NULL; } diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index eb2721af898d..c4dad1254ead 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -21,6 +21,7 @@ struct nft_hash { enum nft_registers sreg:8; enum nft_registers dreg:8; u8 len; + bool autogen_seed:1; u32 modulus; u32 seed; u32 offset; @@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx, if (priv->offset + priv->modulus - 1 < priv->offset) return -EOVERFLOW; - if (tb[NFTA_HASH_SEED]) + if (tb[NFTA_HASH_SEED]) { priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED])); - else + } else { + priv->autogen_seed = true; get_random_bytes(&priv->seed, sizeof(priv->seed)); + } return nft_validate_register_load(priv->sreg, len) && nft_validate_register_store(ctx, priv->dreg, NULL, @@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb, goto nla_put_failure; if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) goto nla_put_failure; - if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) + if (!priv->autogen_seed && + nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed))) goto nla_put_failure; if (priv->offset != 0) if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 27241a767f17..c64aca611ac5 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; - if (len < tcp_hdrlen) + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (len > tcp_hdrlen) return 0; + /* tcph->doff has 4 bits, do not wrap it to 0 */ + if (tcp_hdrlen >= 15 * 4) + return 0; + /* * MSS Option not found ?! add it.. */ diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 80cb7babeb64..df7f1df00330 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, rcu_read_lock(); indev = __in6_dev_get(skb->dev); - if (indev) + if (indev) { + read_lock_bh(&indev->lock); list_for_each_entry(ifa, &indev->addr_list, if_list) { if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED)) continue; @@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, laddr = &ifa->addr; break; } + read_unlock_bh(&indev->lock); + } rcu_read_unlock(); return laddr ? laddr : daddr; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index e0a87776a010..7b2c2fce408a 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net, */ if (nf_ct_is_confirmed(ct)) nf_ct_delete(ct, 0, 0); - else - nf_conntrack_put(&ct->ct_general); + + nf_conntrack_put(&ct->ct_general); nf_ct_set(skb, NULL, 0); return false; } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d4bb8eb63f2..3f76cb765e5b 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) /* Link layer. */ clear_vlan(key); - if (key->mac_proto == MAC_PROTO_NONE) { + if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { if (unlikely(eth_type_vlan(skb->protocol))) return -EINVAL; @@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) { - return key_extract(skb, key); + int res; + + res = key_extract(skb, key); + if (!res) + key->mac_proto &= ~SW_FLOW_KEY_INVALID; + + return res; } static int key_extract_mac_proto(struct sk_buff *skb) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a0dbe7ca8f72..8489beff5c25 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + if (val > INT_MAX) + return -EINVAL; po->tp_reserve = val; return 0; } @@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, if (unlikely(!PAGE_ALIGNED(req->tp_block_size))) goto out; if (po->tp_version >= TPACKET_V3 && - (int)(req->tp_block_size - - BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0) + req->tp_block_size <= + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv)) goto out; if (unlikely(req->tp_frame_size < po->tp_hdrlen + po->tp_reserve)) @@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, rb->frames_per_block = req->tp_block_size / req->tp_frame_size; if (unlikely(rb->frames_per_block == 0)) goto out; + if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) + goto out; if (unlikely((rb->frames_per_block * req->tp_block_nr) != req->tp_frame_nr)) goto out; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index b052b27a984e..1a2f9e964330 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev) } } #ifdef CONFIG_NET_SCHED - if (dev->qdisc) + if (dev->qdisc != &noop_qdisc) qdisc_hash_add(dev->qdisc); #endif } diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 0439a1a68367..a9708da28eb5 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; + if (sctp_stream_new(asoc, gfp)) + goto fail_init; + /* Assume that peer would support both address types unless we are * told otherwise. */ @@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) - goto fail_init; + goto stream_free; asoc->active_key_id = ep->active_key_id; asoc->prsctp_enable = ep->prsctp_enable; @@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a return asoc; +stream_free: + sctp_stream_free(asoc->stream); fail_init: sock_put(asoc->base.sk); sctp_endpoint_put(asoc->ep); @@ -1407,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc, /* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ -void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) +void sctp_assoc_sync_pmtu(struct sctp_association *asoc) { struct sctp_transport *t; __u32 pmtu = 0; @@ -1419,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { if (t->pmtu_pending && t->dst) { - sctp_transport_update_pmtu(sk, t, - SCTP_TRUNC4(dst_mtu(t->dst))); + sctp_transport_update_pmtu( + t, SCTP_TRUNC4(dst_mtu(t->dst))); t->pmtu_pending = 0; } if (!pmtu || (t->pathmtu < pmtu)) diff --git a/net/sctp/input.c b/net/sctp/input.c index 2a28ab20487f..0e06a278d2a9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, if (t->param_flags & SPP_PMTUD_ENABLE) { /* Update transports view of the MTU */ - sctp_transport_update_pmtu(sk, t, pmtu); + sctp_transport_update_pmtu(t, pmtu); /* Update association pmtu. */ - sctp_assoc_sync_pmtu(sk, asoc); + sctp_assoc_sync_pmtu(asoc); } /* Retransmit with the new pmtu setting. diff --git a/net/sctp/output.c b/net/sctp/output.c index 1224421036b3..1409a875ad8e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; + struct sock *sk; pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); - packet->vtag = vtag; - if (asoc && tp->dst) { - struct sock *sk = asoc->base.sk; - - rcu_read_lock(); - if (__sk_dst_get(sk) != tp->dst) { - dst_hold(tp->dst); - sk_setup_caps(sk, tp->dst); - } - - if (sk_can_gso(sk)) { - struct net_device *dev = tp->dst->dev; + /* do the following jobs only once for a flush schedule */ + if (!sctp_packet_empty(packet)) + return; - packet->max_size = dev->gso_max_size; - } else { - packet->max_size = asoc->pathmtu; - } - rcu_read_unlock(); + /* set packet max_size with pathmtu */ + packet->max_size = tp->pathmtu; + if (!asoc) + return; - } else { - packet->max_size = tp->pathmtu; + /* update dst or transport pathmtu if in need */ + sk = asoc->base.sk; + if (!sctp_transport_dst_check(tp)) { + sctp_transport_route(tp, NULL, sctp_sk(sk)); + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); + } else if (!sctp_transport_pmtu_check(tp)) { + if (asoc->param_flags & SPP_PMTUD_ENABLE) + sctp_assoc_sync_pmtu(asoc); } - if (ecn_capable && sctp_packet_empty(packet)) { - struct sctp_chunk *chunk; + /* If there a is a prepend chunk stick it on the list before + * any other chunks get appended. + */ + if (ecn_capable) { + struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); - /* If there a is a prepend chunk stick it on the list before - * any other chunks get appended. - */ - chunk = sctp_get_ecne_prepend(asoc); if (chunk) sctp_packet_append_chunk(packet, chunk); } + + if (!tp->dst) + return; + + /* set packet max_size with gso_max_size if gso is enabled*/ + rcu_read_lock(); + if (__sk_dst_get(sk) != tp->dst) { + dst_hold(tp->dst); + sk_setup_caps(sk, tp->dst); + } + packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size + : asoc->pathmtu; + rcu_read_unlock(); } /* Initialize the packet structure. */ @@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) sh->vtag = htonl(packet->vtag); sh->checksum = 0; - /* update dst if in need */ - if (!sctp_transport_dst_check(tp)) { - sctp_transport_route(tp, NULL, sctp_sk(sk)); - if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE) - sctp_assoc_sync_pmtu(sk, asoc); - } + /* drop packet if no dst */ dst = dst_clone(tp->dst); if (!dst) { IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); @@ -704,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, */ if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && - !chunk->msg->force_delay) + !asoc->force_delay) /* Nothing unacked */ return SCTP_XMIT_OK; diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 025ccff67072..8081476ed313 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1026,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. */ - if (chunk->sinfo.sinfo_stream >= - asoc->c.sinit_num_ostreams) { + if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) { /* Mark as failed send. */ sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 206377fe91ec..a0b29d43627f 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) sctp_seq_dump_remote_addrs(seq, assoc); seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " "%8d %8d %8d %8d", - assoc->hbinterval, assoc->c.sinit_max_instreams, - assoc->c.sinit_num_ostreams, assoc->max_retrans, + assoc->hbinterval, assoc->stream->incnt, + assoc->stream->outcnt, assoc->max_retrans, assoc->init_retries, assoc->shutdown_retries, assoc->rtx_data_chunks, atomic_read(&sk->sk_wmem_alloc), diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 969a30c7bb54..118faff6a332 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, * association. */ if (!asoc->temp) { - int error; - - asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams, - asoc->c.sinit_num_ostreams, gfp); - if (!asoc->stream) + if (sctp_stream_init(asoc, gfp)) goto clean_up; - error = sctp_assoc_set_id(asoc, gfp); - if (error) + if (sctp_assoc_set_id(asoc, gfp)) goto clean_up; } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e03bb1aab4d0..24c6ccce7539 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + if (ntohs(skip->stream) >= asoc->stream->incnt) goto discard_noforce; } @@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( /* Silently discard the chunk if stream-id is not valid */ sctp_walk_fwdtsn(skip, chunk) { - if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + if (ntohs(skip->stream) >= asoc->stream->incnt) goto gen_shutdown; } @@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, * and discard the DATA chunk. */ sid = ntohs(data_hdr->stream); - if (sid >= asoc->c.sinit_max_instreams) { + if (sid >= asoc->stream->incnt) { /* Mark tsn as received even though we drop it */ sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0f378ea2ae38..d9d4c92e06b3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } if (asoc->pmtu_pending) - sctp_assoc_pending_pmtu(sk, asoc); + sctp_assoc_pending_pmtu(asoc); /* If fragmentation is disabled and the message length exceeds the * association fragmentation point, return EMSGSIZE. The I-D @@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) } /* Check for invalid stream. */ - if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { + if (sinfo->sinfo_stream >= asoc->stream->outcnt) { err = -EINVAL; goto out_free; } @@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) err = PTR_ERR(datamsg); goto out_free; } - datamsg->force_delay = !!(msg->msg_flags & MSG_MORE); + asoc->force_delay = !!(msg->msg_flags & MSG_MORE); /* Now send the (possibly) fragmented message. */ list_for_each_entry(chunk, &datamsg->chunks, frag_list) { @@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { if (trans) { trans->pathmtu = params->spp_pathmtu; - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } else if (asoc) { asoc->pathmtu = params->spp_pathmtu; } else { @@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, (trans->param_flags & ~SPP_PMTUD) | pmtud_change; if (update) { sctp_transport_pmtu(trans, sctp_opt2sk(sp)); - sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + sctp_assoc_sync_pmtu(asoc); } } else if (asoc) { asoc->param_flags = @@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - info->sctpi_instrms = asoc->c.sinit_max_instreams; - info->sctpi_outstrms = asoc->c.sinit_num_ostreams; + info->sctpi_instrms = asoc->stream->incnt; + info->sctpi_outstrms = asoc->stream->outcnt; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) @@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, status.sstat_unackdata = asoc->unack_data; status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - status.sstat_instrms = asoc->c.sinit_max_instreams; - status.sstat_outstrms = asoc->c.sinit_num_ostreams; + status.sstat_instrms = asoc->stream->incnt; + status.sstat_outstrms = asoc->stream->outcnt; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, @@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog) if (sock->state != SS_UNCONNECTED) goto out; + if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED)) + goto out; + /* If backlog is zero, disable listening. */ if (!backlog) { if (sctp_sstate(sk, CLOSED)) diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 1c6cc04fa3a4..bbed997e1c5f 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -35,33 +35,60 @@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h> -struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp) +int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp) { struct sctp_stream *stream; int i; stream = kzalloc(sizeof(*stream), gfp); if (!stream) - return NULL; + return -ENOMEM; - stream->outcnt = outcnt; + stream->outcnt = asoc->c.sinit_num_ostreams; stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); if (!stream->out) { kfree(stream); - return NULL; + return -ENOMEM; } for (i = 0; i < stream->outcnt; i++) stream->out[i].state = SCTP_STREAM_OPEN; - stream->incnt = incnt; + asoc->stream = stream; + + return 0; +} + +int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_stream *stream = asoc->stream; + int i; + + /* Initial stream->out size may be very big, so free it and alloc + * a new one with new outcnt to save memory. + */ + kfree(stream->out); + stream->outcnt = asoc->c.sinit_num_ostreams; + stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp); + if (!stream->out) + goto nomem; + + for (i = 0; i < stream->outcnt; i++) + stream->out[i].state = SCTP_STREAM_OPEN; + + stream->incnt = asoc->c.sinit_max_instreams; stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp); if (!stream->in) { kfree(stream->out); - kfree(stream); - return NULL; + goto nomem; } - return stream; + return 0; + +nomem: + asoc->stream = NULL; + kfree(stream); + + return -ENOMEM; } void sctp_stream_free(struct sctp_stream *stream) diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3379668af368..721eeebfcd8a 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) +void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { - struct dst_entry *dst; + struct dst_entry *dst = sctp_transport_dst_check(t); if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", - __func__, pmtu, - SCTP_DEFAULT_MINSEGMENT); + __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); /* Use default minimum segment size and disable * pmtu discovery on this transport. */ @@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p t->pathmtu = pmtu; } - dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); - if (dst) { - dst->ops->update_pmtu(dst, sk, NULL, pmtu); - + dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); dst = sctp_transport_dst_check(t); - if (!dst) - t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); } + + if (!dst) + t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); } /* Caches the dst entry and source address for a transport's destination diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 16b6b5988be9..570a2b67ca10 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev) /* Age scan results with time spent in suspend */ cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at); - if (rdev->ops->resume) { - rtnl_lock(); - if (rdev->wiphy.registered) - ret = rdev_resume(rdev); - rtnl_unlock(); - } + rtnl_lock(); + if (rdev->wiphy.registered && rdev->ops->resume) + ret = rdev_resume(rdev); + rtnl_unlock(); return ret; } diff --git a/samples/statx/test-statx.c b/samples/statx/test-statx.c index 8571d766331d..d4d77b09412c 100644 --- a/samples/statx/test-statx.c +++ b/samples/statx/test-statx.c @@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx) if (stx->stx_mask & STATX_BTIME) print_time(" Birth: ", &stx->stx_btime); - if (stx->stx_attributes) { - unsigned char bits; + if (stx->stx_attributes_mask) { + unsigned char bits, mbits; int loop, byte; static char attr_representation[64 + 1] = @@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx) printf("Attributes: %016llx (", stx->stx_attributes); for (byte = 64 - 8; byte >= 0; byte -= 8) { bits = stx->stx_attributes >> byte; + mbits = stx->stx_attributes_mask >> byte; for (loop = 7; loop >= 0; loop--) { int bit = byte + loop; - if (bits & 0x80) + if (!(mbits & 0x80)) + putchar('.'); /* Not supported */ + else if (bits & 0x80) putchar(attr_representation[63 - bit]); else - putchar('-'); + putchar('-'); /* Not set */ bits <<= 1; + mbits <<= 1; } if (byte) putchar(' '); diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 0a07f9014944..7234e61e7ce3 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -155,7 +155,7 @@ else # $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files # and locates generated .h files # FIXME: Replace both with specific CFLAGS* statements in the makefiles -__c_flags = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \ +__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \ $(call flags,_c_flags) __a_flags = $(call flags,_a_flags) __cpp_flags = $(call flags,_cpp_flags) diff --git a/scripts/kconfig/gconf.c b/scripts/kconfig/gconf.c index 26d208b435a0..cfddddb9c9d7 100644 --- a/scripts/kconfig/gconf.c +++ b/scripts/kconfig/gconf.c @@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget, current = menu; display_tree_part(); gtk_widget_set_sensitive(back_btn, TRUE); - } else if ((col == COL_OPTION)) { + } else if (col == COL_OPTION) { toggle_sym_value(menu); gtk_tree_view_expand_row(view, path, TRUE); } diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index 122153b16ea4..390d7c9685fd 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -168,6 +168,16 @@ .off = OFF, \ .imm = 0 }) +/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ + +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 273f21fa32b5..7aa57225cbf7 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -130,6 +130,12 @@ static struct arch architectures[] = { .name = "powerpc", .init = powerpc__annotate_init, }, + { + .name = "s390", + .objdump = { + .comment_char = '#', + }, + }, }; static void ins__delete(struct ins_operands *ops) diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 93b0aa74ca03..39c2c7d067bb 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c @@ -156,6 +156,7 @@ out: */ case 0x2C: /* Westmere EP - Gulftown */ cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO; + break; case 0x2A: /* SNB */ case 0x2D: /* SNB Xeon */ case 0x3A: /* IVB */ diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index fedca3285326..ccf2a69365cc 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states. These numbers are from hardware residency counters. \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor. \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor. +\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms. +\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz. \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states. These numbers are from hardware residency counters. \fBPkgWatt\fP Watts consumed by the whole package. \fBCorWatt\fP Watts consumed by the core part of the package. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 828dccd3f01e..b11294730771 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old, * it is possible for mperf's non-halted cycles + idle states * to exceed TSC's all cycles: show c1 = 0% in that case. */ - if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc) + if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak)) old->c1 = 0; else { /* normal case, derive c1 */ @@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void) if (fp == NULL) fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r"); - else + else { rewind(fp); + fflush(fp); + } retval = fscanf(fp, "%d", &gfx_cur_mhz); if (retval != 1) @@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx " - "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n", + "(high %d guar %d eff %d low %d)\n", cpu, msr, (unsigned int)HWP_HIGHEST_PERF(msr), (unsigned int)HWP_GUARANTEED_PERF(msr), @@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx " - "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n", + "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), @@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) return 0; fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx " - "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n", + "(min %d max %d des %d epp 0x%x window 0x%x)\n", cpu, msr, (unsigned int)(((msr) >> 0) & 0xff), (unsigned int)(((msr) >> 8) & 0xff), @@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model) case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */ case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */ case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */ - do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; + do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO; BIC_PRESENT(BIC_PKG__); BIC_PRESENT(BIC_RAM__); if (rapl_joules) { BIC_PRESENT(BIC_Pkg_J); BIC_PRESENT(BIC_Cor_J); BIC_PRESENT(BIC_RAM_J); + BIC_PRESENT(BIC_GFX_J); } else { BIC_PRESENT(BIC_PkgWatt); BIC_PRESENT(BIC_CorWatt); BIC_PRESENT(BIC_RAMWatt); + BIC_PRESENT(BIC_GFXWatt); } break; case INTEL_FAM6_HASWELL_X: /* HSX */ @@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p) { unsigned long long msr; - unsigned int dts; + unsigned int dts, dts2; int cpu; if (!(do_dts || do_ptm)) @@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n", cpu, msr, tcc_activation_temp - dts); -#ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr)) return 0; @@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); -#endif } - if (do_dts) { + if (do_dts && debug) { unsigned int resolution; if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr)) @@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n", cpu, msr, tcc_activation_temp - dts, resolution); -#ifdef THERM_DEBUG if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr)) return 0; @@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p dts2 = (msr >> 8) & 0x7F; fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n", cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2); -#endif } return 0; @@ -4578,7 +4578,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 17.02.24" + fprintf(outf, "turbostat version 17.04.12" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 6a1ad58cb66f..9af09e8099c0 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -1,7 +1,14 @@ LIBDIR := ../../../lib BPFDIR := $(LIBDIR)/bpf +APIDIR := ../../../include/uapi +GENDIR := ../../../../include/generated +GENHDR := $(GENDIR)/autoconf.h -CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) +ifneq ($(wildcard $(GENHDR)),) + GENFLAGS := -DHAVE_GENHDR +endif + +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) LDLIBS += -lcap TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index d1555e4240c0..c848e90b6421 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -30,6 +30,14 @@ #include <bpf/bpf.h> +#ifdef HAVE_GENHDR +# include "autoconf.h" +#else +# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) +# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 +# endif +#endif + #include "../../../include/linux/filter.h" #ifndef ARRAY_SIZE @@ -39,6 +47,8 @@ #define MAX_INSNS 512 #define MAX_FIXUPS 8 +#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) + struct bpf_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; @@ -53,6 +63,7 @@ struct bpf_test { REJECT } result, result_unpriv; enum bpf_prog_type prog_type; + uint8_t flags; }; /* Note we want this to be 64 bit aligned so that the end of our array is @@ -2432,6 +2443,30 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "direct packet access: test15 (spill with xadd)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_5, 4096), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 invalid mem access 'inv'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { "helper access to packet: test1, valid packet_ptr range", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -2934,6 +2969,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "valid map access into an array with a variable", @@ -2957,6 +2993,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "valid map access into an array with a signed variable", @@ -2984,6 +3021,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a constant", @@ -3025,6 +3063,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is outside of the array range", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a variable", @@ -3048,6 +3087,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with no floor check", @@ -3074,6 +3114,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a invalid max check", @@ -3100,6 +3141,7 @@ static struct bpf_test tests[] = { .errstr = "invalid access to map value, value_size=48 off=44 size=8", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid map access into an array with a invalid max check", @@ -3129,6 +3171,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result_unpriv = REJECT, .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "multiple registers share map_lookup_elem result", @@ -3252,6 +3295,7 @@ static struct bpf_test tests[] = { .result = REJECT, .errstr_unpriv = "R0 pointer arithmetic prohibited", .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "constant register |= constant should keep constant type", @@ -3418,6 +3462,26 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_LWT_XMIT, }, { + "overlapping checks for direct packet access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, + }, + { "invalid access of tc_classid for LWT_IN", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, @@ -3961,7 +4025,208 @@ static struct bpf_test tests[] = { .result_unpriv = REJECT, }, { - "map element value (adjusted) is preserved across register spilling", + "map element value or null is marked on register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + }, + { + "map element value store of cleared call register", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R1 !read_ok", + .errstr = "R1 !read_ok", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value with unaligned store", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), + BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map element value with unaligned load", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map element value illegal alu op, 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 3", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 4", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value illegal alu op, 5", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_IMM(BPF_REG_3, 4096), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + }, + { + "map element value is preserved across register spilling", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), @@ -3983,6 +4248,7 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R0 pointer arithmetic prohibited", .result = ACCEPT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", @@ -4421,6 +4687,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { "invalid range check", @@ -4452,6 +4719,7 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, } }; @@ -4530,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, static void do_test_single(struct bpf_test *test, bool unpriv, int *passes, int *errors) { + int fd_prog, expected_ret, reject_from_alignment; struct bpf_insn *prog = test->insns; int prog_len = probe_filter_length(prog); int prog_type = test->prog_type; int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; - int fd_prog, expected_ret; const char *expected_err; do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); @@ -4547,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->result_unpriv : test->result; expected_err = unpriv && test->errstr_unpriv ? test->errstr_unpriv : test->errstr; + + reject_from_alignment = fd_prog < 0 && + (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && + strstr(bpf_vlog, "Unknown alignment."); +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (reject_from_alignment) { + printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", + strerror(errno)); + goto fail_log; + } +#endif if (expected_ret == ACCEPT) { - if (fd_prog < 0) { + if (fd_prog < 0 && !reject_from_alignment) { printf("FAIL\nFailed to load prog '%s'!\n", strerror(errno)); goto fail_log; @@ -4558,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv, printf("FAIL\nUnexpected success to load!\n"); goto fail_log; } - if (!strstr(bpf_vlog, expected_err)) { + if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { printf("FAIL\nUnexpected error message!\n"); goto fail_log; } } (*passes)++; - printf("OK\n"); + printf("OK%s\n", reject_from_alignment ? + " (NOTE: reject due to unknown alignment)" : ""); close_fds: close(fd_prog); close(fd_f1); diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index 1c5d0575802e..bf13fc2297aa 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -34,34 +34,34 @@ endif all: $(SUB_DIRS) $(SUB_DIRS): - BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all + BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all include ../lib.mk override define RUN_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\ done; endef override define INSTALL_RULE @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\ done; endef override define EMIT_TESTS @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\ done; endef clean: @for TARGET in $(SUB_DIRS); do \ - BUILD_TARGET=$$OUTPUT/$$TARGET; \ + BUILD_TARGET=$(OUTPUT)/$$TARGET; \ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \ done; rm -f tags diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 276139a24e6f..702f8108608d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -392,6 +392,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) } /** + * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware + * + * For a specific CPU, initialize the GIC VE hardware. + */ +void kvm_vgic_init_cpu_hardware(void) +{ + BUG_ON(preemptible()); + + /* + * We want to make sure the list registers start out clear so that we + * only have the program the used registers. + */ + if (kvm_vgic_global_state.type == VGIC_V2) + vgic_v2_init_lrs(); + else + kvm_call_hyp(__vgic_v3_init_lrs); +} + +/** * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable * according to the host GIC model. Accordingly calls either * vgic_v2/v3_probe which registers the KVM_DEVICE that can be diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index a3ad7ff95c9b..0a4283ed9aa7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu, val = vmcr.ctlr; break; case GIC_CPU_PRIMASK: - val = vmcr.pmr; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >> + GICV_PMR_PRIORITY_SHIFT; break; case GIC_CPU_BINPOINT: val = vmcr.bpr; @@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu, vmcr.ctlr = val; break; case GIC_CPU_PRIMASK: - vmcr.pmr = val; + /* + * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the + * the PMR field as GICH_VMCR.VMPriMask rather than + * GICC_PMR.Priority, so we expose the upper five bits of + * priority mask to userspace using the lower bits in the + * unsigned long. + */ + vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) & + GICV_PMR_PRIORITY_MASK; break; case GIC_CPU_BINPOINT: vmcr.bpr = val; diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index b834ecdf3225..b637d9c7afe3 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val) return (unsigned long *)val; } +static inline void vgic_v2_write_lr(int lr, u32 val) +{ + void __iomem *base = kvm_vgic_global_state.vctrl_base; + + writel_relaxed(val, base + GICH_LR0 + (lr * 4)); +} + +void vgic_v2_init_lrs(void) +{ + int i; + + for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) + vgic_v2_write_lr(i, 0); +} + void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) { struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; @@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_MASK; vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; - vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & - GICH_VMCR_PRIMASK_MASK; + vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << + GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; } @@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) GICH_VMCR_ALIAS_BINPOINT_SHIFT; vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; - vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> - GICH_VMCR_PRIMASK_SHIFT; + vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> + GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; } void vgic_v2_enable(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index db28f7cadab2..6cf557e9f718 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq) return irq->pending_latch || irq->line_level; } +/* + * This struct provides an intermediate representation of the fields contained + * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC + * state to userspace can generate either GICv2 or GICv3 CPU interface + * registers regardless of the hardware backed GIC used. + */ struct vgic_vmcr { u32 ctlr; u32 abpr; u32 bpr; - u32 pmr; + u32 pmr; /* Priority mask field in the GICC_PMR and + * ICC_PMR_EL1 priority field format */ /* Below member variable are valid only for GICv3 */ u32 grpen0; u32 grpen1; @@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm); int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type); +void vgic_v2_init_lrs(void); + static inline void vgic_get_irq_kref(struct vgic_irq *irq) { if (irq->intid < VGIC_MIN_LPI) |