diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-21 07:06:00 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-21 07:06:00 +0300 |
commit | b65bd40311565a58b12f400e95e8aa0a1e3a8878 (patch) | |
tree | 6f884d510e7cc49bbe3cf456c9a36117c5bb1c7b | |
parent | 19c800caa682d9e20087d7b98af49301cf1ab039 (diff) | |
parent | 288e5c8898c488298c39ff4bbf58928d30fbf99f (diff) | |
download | linux-b65bd40311565a58b12f400e95e8aa0a1e3a8878.tar.xz |
Merge tag 'drm-msm-next-2018-03-20' of git://people.freedesktop.org/~robclark/linux into drm-next
Updates for 4.17. Sorry, running a bit late on this, didn't have a
chance to send pull-req before heading to linaro. But it has all been
in linux-next for a while. Main updates:
+ DSI updates from 10nm / SDM845
+ fix for race condition with a3xx/a4xx fence completion irq
+ some refactoring/prep work for eventual a6xx support (ie. when we have
a userspace)
+ a5xx debugfs enhancements
+ some mdp5 fixes/cleanups to prepare for eventually merging writeback
support (ie. when we have a userspace)
* tag 'drm-msm-next-2018-03-20' of git://people.freedesktop.org/~robclark/linux: (36 commits)
drm/msm: fix building without debugfs
drm/msm/mdp5: don't pre-reserve LM's if no dual-dsi
drm/msm/mdp5: add missing LM flush bits
drm/msm/mdp5: print a bit more of the atomic state
drm/msm/mdp5: rework CTL START signal handling
drm/msm: Trigger fence completion from GPU
drm/msm/dsi: fix direct caller of msm_gem_free_object()
drm/msm: strip out msm_fence_cb
drm/msm: rename mdp->disp
drm/msm/dsi: Fix potential NULL pointer dereference in msm_dsi_modeset_init
drm/msm/adreno/a5xx_debugfs: fix potential NULL pointer dereference
drm/msm/dsi: Get byte_intf_clk only for versions that need it
drm/msm/adreno: Use generic function to load firmware to a buffer object
drm/msm/adreno: Define a list of firmware files to load per target
drm/msm/adreno: Rename gpmufw to powerfw
drm/msm: Pass the correct aperture end to drm_mm_init
drm/msm/gpu: Set number of clocks to 0 if the list allocation fails
drm/msm: Replace gem_object deprecated functions
drm/msm/hdmi: fix semicolon.cocci warnings
drm/msm/mdp5: Fix trailing semicolon
...
-rw-r--r-- | Documentation/devicetree/bindings/display/msm/dsi.txt | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/Kconfig | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/Makefile | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_debugfs.c | 187 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 99 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a5xx_power.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_device.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c) | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c) | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h) | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c) | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c) | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c) | 60 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h) | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c) | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c) | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h) | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c) | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp_common.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp_common.xml.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp/mdp_format.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.c) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.h) | 0 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi.xml.h | 187 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_cfg.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_cfg.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_host.c | 47 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/dsi_manager.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c | 251 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/pll/dsi_pll.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/pll/dsi_pll.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c | 822 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_debugfs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_fb.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_fence.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_submit.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem_vma.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gpu.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_ringbuffer.c | 2 | ||||
-rw-r--r-- | include/uapi/drm/msm_drm.h | 2 |
73 files changed, 1880 insertions, 247 deletions
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt index a6671bd2c85a..518e9cdf0d4b 100644 --- a/Documentation/devicetree/bindings/display/msm/dsi.txt +++ b/Documentation/devicetree/bindings/display/msm/dsi.txt @@ -7,8 +7,6 @@ Required properties: - reg: Physical base address and length of the registers of controller - reg-names: The names of register regions. The following regions are required: * "dsi_ctrl" -- qcom,dsi-host-index: The ID of DSI controller hardware instance. This should - be 0 or 1, since we have 2 DSI controllers at most for now. - interrupts: The interrupt signal from the DSI block. - power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: Phandles to device clocks. @@ -22,6 +20,8 @@ Required properties: * "core" For DSIv2, we need an additional clock: * "src" + For DSI6G v2.0 onwards, we need also need the clock: + * "byte_intf" - assigned-clocks: Parents of "byte" and "pixel" for the given platform. - assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block. See [1] for details on clock bindings. @@ -88,21 +88,35 @@ Required properties: * "qcom,dsi-phy-28nm-lp" * "qcom,dsi-phy-20nm" * "qcom,dsi-phy-28nm-8960" -- reg: Physical base address and length of the registers of PLL, PHY and PHY - regulator + * "qcom,dsi-phy-14nm" + * "qcom,dsi-phy-10nm" +- reg: Physical base address and length of the registers of PLL, PHY. Some + revisions require the PHY regulator base address, whereas others require the + PHY lane base address. See below for each PHY revision. - reg-names: The names of register regions. The following regions are required: + For DSI 28nm HPM/LP/8960 PHYs and 20nm PHY: * "dsi_pll" * "dsi_phy" * "dsi_phy_regulator" + For DSI 14nm and 10nm PHYs: + * "dsi_pll" + * "dsi_phy" + * "dsi_phy_lane" - clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating 2 clocks: A byte clock (index 0), and a pixel clock (index 1). -- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should - be 0 or 1, since we have 2 DSI PHYs at most for now. - power-domains: Should be <&mmcc MDSS_GDSC>. - clocks: Phandles to device clocks. See [1] for details on clock bindings. - clock-names: the following clocks are required: * "iface" + For 28nm HPM/LP, 28nm 8960 PHYs: +- vddio-supply: phandle to vdd-io regulator device node + For 20nm PHY: - vddio-supply: phandle to vdd-io regulator device node +- vcca-supply: phandle to vcca regulator device node + For 14nm PHY: +- vcca-supply: phandle to vcca regulator device node + For 10nm PHY: +- vdds-supply: phandle to vdds regulator device node Optional properties: - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 99d39b2aefa6..38cbde971b48 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -28,6 +28,19 @@ config DRM_MSM_REGISTER_LOGGING that can be parsed by envytools demsm tool. If enabled, register logging can be switched on via msm.reglog=y module param. +config DRM_MSM_GPU_SUDO + bool "Enable SUDO flag on submits" + depends on DRM_MSM && EXPERT + default n + help + Enable userspace that has CAP_SYS_RAWIO to submit GPU commands + that are run from RB instead of IB1. This essentially gives + userspace kernel level access, but is useful for firmware + debugging. + + Only use this if you are a driver developer. This should *not* + be enabled for production kernels. If unsure, say N. + config DRM_MSM_HDMI_HDCP bool "Enable HDMI HDCP support in MSM DRM driver" depends on DRM_MSM && QCOM_SCM @@ -81,3 +94,10 @@ config DRM_MSM_DSI_14NM_PHY default y help Choose this option if DSI PHY on 8996 is used on the platform. + +config DRM_MSM_DSI_10NM_PHY + bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on SDM845 is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 92b3844202d2..cd40c050b2d7 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -25,26 +25,26 @@ msm-y := \ edp/edp_connector.o \ edp/edp_ctrl.o \ edp/edp_phy.o \ - mdp/mdp_format.o \ - mdp/mdp_kms.o \ - mdp/mdp4/mdp4_crtc.o \ - mdp/mdp4/mdp4_dtv_encoder.o \ - mdp/mdp4/mdp4_lcdc_encoder.o \ - mdp/mdp4/mdp4_lvds_connector.o \ - mdp/mdp4/mdp4_irq.o \ - mdp/mdp4/mdp4_kms.o \ - mdp/mdp4/mdp4_plane.o \ - mdp/mdp5/mdp5_cfg.o \ - mdp/mdp5/mdp5_ctl.o \ - mdp/mdp5/mdp5_crtc.o \ - mdp/mdp5/mdp5_encoder.o \ - mdp/mdp5/mdp5_irq.o \ - mdp/mdp5/mdp5_mdss.o \ - mdp/mdp5/mdp5_kms.o \ - mdp/mdp5/mdp5_pipe.o \ - mdp/mdp5/mdp5_mixer.o \ - mdp/mdp5/mdp5_plane.o \ - mdp/mdp5/mdp5_smp.o \ + disp/mdp_format.o \ + disp/mdp_kms.o \ + disp/mdp4/mdp4_crtc.o \ + disp/mdp4/mdp4_dtv_encoder.o \ + disp/mdp4/mdp4_lcdc_encoder.o \ + disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_irq.o \ + disp/mdp4/mdp4_kms.o \ + disp/mdp4/mdp4_plane.o \ + disp/mdp5/mdp5_cfg.o \ + disp/mdp5/mdp5_ctl.o \ + disp/mdp5/mdp5_crtc.o \ + disp/mdp5/mdp5_encoder.o \ + disp/mdp5/mdp5_irq.o \ + disp/mdp5/mdp5_mdss.o \ + disp/mdp5/mdp5_kms.o \ + disp/mdp5/mdp5_pipe.o \ + disp/mdp5/mdp5_mixer.o \ + disp/mdp5/mdp5_plane.o \ + disp/mdp5/mdp5_smp.o \ msm_atomic.o \ msm_debugfs.o \ msm_drv.o \ @@ -62,31 +62,35 @@ msm-y := \ msm_ringbuffer.o \ msm_submitqueue.o +msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o + msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o +msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ - mdp/mdp4/mdp4_dsi_encoder.o \ + disp/mdp4/mdp4_dsi_encoder.o \ dsi/dsi_cfg.o \ dsi/dsi_host.o \ dsi/dsi_manager.o \ dsi/phy/dsi_phy.o \ - mdp/mdp5/mdp5_cmd_encoder.o + disp/mdp5/mdp5_cmd_encoder.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o +msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o +msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 4baef2738178..3ebbeb3a9b68 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -35,6 +35,7 @@ A3XX_INT0_CP_RB_INT | \ A3XX_INT0_CP_REG_PROTECT_FAULT | \ A3XX_INT0_CP_AHB_ERROR_HALT | \ + A3XX_INT0_CACHE_FLUSH_TS | \ A3XX_INT0_UCHE_OOB_ACCESS) extern bool hang_debug; @@ -256,8 +257,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu) */ /* Load PM4: */ - ptr = (uint32_t *)(adreno_gpu->pm4->data); - len = adreno_gpu->pm4->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; DBG("loading PM4 ucode version: %x", ptr[1]); gpu_write(gpu, REG_AXXX_CP_DEBUG, @@ -268,8 +269,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ - ptr = (uint32_t *)(adreno_gpu->pfp->data); - len = adreno_gpu->pfp->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; DBG("loading PFP ucode version: %x", ptr[5]); gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 8199a4b9f2fa..16d3d596638e 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -27,6 +27,7 @@ A4XX_INT0_CP_RB_INT | \ A4XX_INT0_CP_REG_PROTECT_FAULT | \ A4XX_INT0_CP_AHB_ERROR_HALT | \ + A4XX_INT0_CACHE_FLUSH_TS | \ A4XX_INT0_UCHE_OOB_ACCESS) extern bool hang_debug; @@ -274,16 +275,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu) return ret; /* Load PM4: */ - ptr = (uint32_t *)(adreno_gpu->pm4->data); - len = adreno_gpu->pm4->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data); + len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4; DBG("loading PM4 ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0); for (i = 1; i < len; i++) gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]); /* Load PFP: */ - ptr = (uint32_t *)(adreno_gpu->pfp->data); - len = adreno_gpu->pfp->size / 4; + ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data); + len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4; DBG("loading PFP ucode version: %u", ptr[0]); gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c new file mode 100644 index 000000000000..059ec7d394d0 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c @@ -0,0 +1,187 @@ +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + + +#include <linux/types.h> +#include <linux/debugfs.h> +#include <drm/drm_print.h> + +#include "a5xx_gpu.h" + +static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "PFP state:\n"); + + for (i = 0; i < 36; i++) { + gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); + } + + return 0; +} + +static int me_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ME state:\n"); + + for (i = 0; i < 29; i++) { + gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); + } + + return 0; +} + +static int meq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "MEQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); + + for (i = 0; i < 64; i++) { + drm_printf(p, " %02x: %08x\n", i, + gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); + } + + return 0; +} + +static int roq_print(struct msm_gpu *gpu, struct drm_printer *p) +{ + int i; + + drm_printf(p, "ROQ state:\n"); + gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0); + + for (i = 0; i < 512 / 4; i++) { + uint32_t val[4]; + int j; + for (j = 0; j < 4; j++) + val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA); + drm_printf(p, " %02x: %08x %08x %08x %08x\n", i, + val[0], val[1], val[2], val[3]); + } + + return 0; +} + +static int show(struct seq_file *m, void *arg) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct msm_drm_private *priv = dev->dev_private; + struct drm_printer p = drm_seq_file_printer(m); + int (*show)(struct msm_gpu *gpu, struct drm_printer *p) = + node->info_ent->data; + + return show(priv->gpu, &p); +} + +#define ENT(n) { .name = #n, .show = show, .data = n ##_print } +static struct drm_info_list a5xx_debugfs_list[] = { + ENT(pfp), + ENT(me), + ENT(meq), + ENT(roq), +}; + +/* for debugfs files that can be written to, we can't use drm helper: */ +static int +reset_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct msm_drm_private *priv = dev->dev_private; + struct msm_gpu *gpu = priv->gpu; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (!capable(CAP_SYS_ADMIN)) + return -EINVAL; + + /* TODO do we care about trying to make sure the GPU is idle? + * Since this is just a debug feature limited to CAP_SYS_ADMIN, + * maybe it is fine to let the user keep both pieces if they + * try to reset an active GPU. + */ + + mutex_lock(&dev->struct_mutex); + + release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]); + adreno_gpu->fw[ADRENO_FW_PM4] = NULL; + + release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]); + adreno_gpu->fw[ADRENO_FW_PFP] = NULL; + + if (a5xx_gpu->pm4_bo) { + if (a5xx_gpu->pm4_iova) + msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); + drm_gem_object_unreference(a5xx_gpu->pm4_bo); + a5xx_gpu->pm4_bo = NULL; + } + + if (a5xx_gpu->pfp_bo) { + if (a5xx_gpu->pfp_iova) + msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); + drm_gem_object_unreference(a5xx_gpu->pfp_bo); + a5xx_gpu->pfp_bo = NULL; + } + + gpu->needs_hw_init = true; + + pm_runtime_get_sync(&gpu->pdev->dev); + gpu->funcs->recover(gpu); + + pm_runtime_put_sync(&gpu->pdev->dev); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n"); + + +int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor) +{ + struct drm_device *dev; + struct dentry *ent; + int ret; + + if (!minor) + return 0; + + dev = minor->dev; + + ret = drm_debugfs_create_files(a5xx_debugfs_list, + ARRAY_SIZE(a5xx_debugfs_list), + minor->debugfs_root, minor); + + if (ret) { + dev_err(dev->dev, "could not install a5xx_debugfs_list\n"); + return ret; + } + + ent = debugfs_create_file("reset", S_IWUGO, + minor->debugfs_root, + dev, &reset_fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 7e09d44e4a15..a4f68affc13b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -140,6 +140,65 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); } +static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit, + struct msm_file_private *ctx) +{ + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; + struct msm_gem_object *obj; + uint32_t *ptr, dwords; + unsigned int i; + + for (i = 0; i < submit->nr_cmds; i++) { + switch (submit->cmd[i].type) { + case MSM_SUBMIT_CMD_IB_TARGET_BUF: + break; + case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: + if (priv->lastctx == ctx) + break; + case MSM_SUBMIT_CMD_BUF: + /* copy commands into RB: */ + obj = submit->bos[submit->cmd[i].idx].obj; + dwords = submit->cmd[i].size; + + ptr = msm_gem_get_vaddr(&obj->base); + + /* _get_vaddr() shouldn't fail at this point, + * since we've already mapped it once in + * submit_reloc() + */ + if (WARN_ON(!ptr)) + return; + + for (i = 0; i < dwords; i++) { + /* normally the OUT_PKTn() would wait + * for space for the packet. But since + * we just OUT_RING() the whole thing, + * need to call adreno_wait_ring() + * ourself: + */ + adreno_wait_ring(ring, 1); + OUT_RING(ring, ptr[i]); + } + + msm_gem_put_vaddr(&obj->base); + + break; + } + } + + a5xx_flush(gpu, ring); + a5xx_preempt_trigger(gpu); + + /* we might not necessarily have a cmd from userspace to + * trigger an event to know that submit has completed, so + * do this manually: + */ + a5xx_idle(gpu, ring); + ring->memptrs->fence = submit->seqno; + msm_gpu_retire(gpu); +} + static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { @@ -149,6 +208,12 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0; + if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) { + priv->lastctx = NULL; + a5xx_submit_in_rb(gpu, submit, ctx); + return; + } + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); OUT_RING(ring, 0x02); @@ -432,25 +497,6 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) return a5xx_idle(gpu, ring) ? 0 : -EINVAL; } - -static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, - const struct firmware *fw, u64 *iova) -{ - struct drm_gem_object *bo; - void *ptr; - - ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, - MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); - - if (IS_ERR(ptr)) - return ERR_CAST(ptr); - - memcpy(ptr, &fw->data[4], fw->size - 4); - - msm_gem_put_vaddr(bo); - return bo; -} - static int a5xx_ucode_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -458,8 +504,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) int ret; if (!a5xx_gpu->pm4_bo) { - a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4, - &a5xx_gpu->pm4_iova); + a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova); if (IS_ERR(a5xx_gpu->pm4_bo)) { ret = PTR_ERR(a5xx_gpu->pm4_bo); @@ -471,8 +517,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu) } if (!a5xx_gpu->pfp_bo) { - a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp, - &a5xx_gpu->pfp_iova); + a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu, + adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova); if (IS_ERR(a5xx_gpu->pfp_bo)) { ret = PTR_ERR(a5xx_gpu->pfp_bo); @@ -793,19 +839,19 @@ static void a5xx_destroy(struct msm_gpu *gpu) if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_iova) msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); + drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo); } if (a5xx_gpu->pfp_bo) { if (a5xx_gpu->pfp_iova) msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); + drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo); } if (a5xx_gpu->gpmu_bo) { if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); + drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo); } adreno_gpu_cleanup(adreno_gpu); @@ -1195,6 +1241,7 @@ static const struct adreno_gpu_funcs funcs = { .destroy = a5xx_destroy, #ifdef CONFIG_DEBUG_FS .show = a5xx_show, + .debugfs_init = a5xx_debugfs_init, #endif .gpu_busy = a5xx_gpu_busy, }, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6fb8c2f9b9e4..7d71860c4bee 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -49,6 +49,10 @@ struct a5xx_gpu { #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) +#ifdef CONFIG_DEBUG_FS +int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor); +#endif + /* * In order to do lockless preemption we use a simple state machine to progress * through the process. diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 4e4d965fd9ab..e9c0e56dbec0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -261,7 +261,6 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct drm_device *drm = gpu->dev; - const struct firmware *fw; uint32_t dwords = 0, offset = 0, bosize; unsigned int *data, *ptr, *cmds; unsigned int cmds_size; @@ -269,15 +268,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) if (a5xx_gpu->gpmu_bo) return; - /* Get the firmware */ - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw); - if (IS_ERR(fw)) { - DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n", - gpu->name); - return; - } - - data = (unsigned int *) fw->data; + data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data; /* * The first dword is the size of the remaining data in dwords. Use it @@ -285,12 +276,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) * the firmware that we read */ - if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2))) - goto out; + if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 || + (data[0] < 2) || (data[0] >= + (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2))) + return; /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */ if (data[1] != 2) - goto out; + return; cmds = data + data[2] + 3; cmds_size = data[0] - data[2] - 2; @@ -325,8 +318,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) msm_gem_put_vaddr(a5xx_gpu->gpmu_bo); a5xx_gpu->gpmu_dwords = dwords; - goto out; - + return; err: if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace); @@ -336,8 +328,4 @@ err: a5xx_gpu->gpmu_bo = NULL; a5xx_gpu->gpmu_iova = 0; a5xx_gpu->gpmu_dwords = 0; - -out: - /* No need to keep that firmware laying around anymore */ - release_firmware(fw); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 62bdb7316da1..8e0cb161754b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -30,61 +30,75 @@ static const struct adreno_info gpulist[] = { .rev = ADRENO_REV(3, 0, 5, ANY_ID), .revn = 305, .name = "A305", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_256K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 0, 6, 0), .revn = 307, /* because a305c is revn==306 */ .name = "A306", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_128K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), .revn = 320, .name = "A320", - .pm4fw = "a300_pm4.fw", - .pfpfw = "a300_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a300_pm4.fw", + [ADRENO_FW_PFP] = "a300_pfp.fw", + }, .gmem = SZ_512K, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(3, 3, 0, ANY_ID), .revn = 330, .name = "A330", - .pm4fw = "a330_pm4.fw", - .pfpfw = "a330_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a330_pm4.fw", + [ADRENO_FW_PFP] = "a330_pfp.fw", + }, .gmem = SZ_1M, .init = a3xx_gpu_init, }, { .rev = ADRENO_REV(4, 2, 0, ANY_ID), .revn = 420, .name = "A420", - .pm4fw = "a420_pm4.fw", - .pfpfw = "a420_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { .rev = ADRENO_REV(4, 3, 0, ANY_ID), .revn = 430, .name = "A430", - .pm4fw = "a420_pm4.fw", - .pfpfw = "a420_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a420_pm4.fw", + [ADRENO_FW_PFP] = "a420_pfp.fw", + }, .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", - .pm4fw = "a530_pm4.fw", - .pfpfw = "a530_pfp.fw", + .fw = { + [ADRENO_FW_PM4] = "a530_pm4.fw", + [ADRENO_FW_PFP] = "a530_pfp.fw", + [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2", + }, .gmem = SZ_1M, .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | ADRENO_QUIRK_FAULT_DETECT_MASK, .init = a5xx_gpu_init, - .gpmufw = "a530v3_gpmu.fw2", .zapfw = "a530_zap.mdt", }, }; @@ -150,6 +164,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) return NULL; } +#ifdef CONFIG_DEBUG_FS + if (gpu->funcs->debugfs_init) { + gpu->funcs->debugfs_init(gpu, dev->primary); + gpu->funcs->debugfs_init(gpu, dev->render); + gpu->funcs->debugfs_init(gpu, dev->control); + } +#endif + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index de63ff26a062..17d0506d058c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -140,27 +140,47 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) static int adreno_load_fw(struct adreno_gpu *adreno_gpu) { - const struct firmware *fw; + int i; - if (adreno_gpu->pm4) - return 0; + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { + const struct firmware *fw; + + if (!adreno_gpu->info->fw[i]) + continue; + + /* Skip if the firmware has already been loaded */ + if (adreno_gpu->fw[i]) + continue; - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw); - if (IS_ERR(fw)) - return PTR_ERR(fw); - adreno_gpu->pm4 = fw; + fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); + if (IS_ERR(fw)) + return PTR_ERR(fw); - fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw); - if (IS_ERR(fw)) { - release_firmware(adreno_gpu->pm4); - adreno_gpu->pm4 = NULL; - return PTR_ERR(fw); + adreno_gpu->fw[i] = fw; } - adreno_gpu->pfp = fw; return 0; } +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova) +{ + struct drm_gem_object *bo; + void *ptr; + + ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + + if (IS_ERR(ptr)) + return ERR_CAST(ptr); + + memcpy(ptr, &fw->data[4], fw->size - 4); + + msm_gem_put_vaddr(bo); + + return bo; +} + int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -293,26 +313,12 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_RING(ring, 0x00000000); } + /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ OUT_PKT3(ring, CP_EVENT_WRITE, 3); - OUT_RING(ring, CACHE_FLUSH_TS); + OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); - /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - - /* Workaround for missing irq issue on 8x16/a306. Unsure if the - * root cause is a platform issue or some a306 quirk, but this - * keeps things humming along: - */ - if (adreno_is_a306(adreno_gpu)) { - OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); - OUT_RING(ring, 0x00000000); - OUT_PKT3(ring, CP_INTERRUPT, 1); - OUT_RING(ring, 0x80000000); - } - #if 0 if (adreno_is_a3xx(adreno_gpu)) { /* Dummy set-constant to trigger context rollover */ @@ -569,8 +575,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - release_firmware(adreno_gpu->pm4); - release_firmware(adreno_gpu->pfp); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) + release_firmware(adreno_gpu->fw[i]); msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 8d3d0a924908..d6b0e7b813f4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -48,6 +48,13 @@ enum adreno_regs { REG_ADRENO_REGISTER_MAX, }; +enum { + ADRENO_FW_PM4 = 0, + ADRENO_FW_PFP = 1, + ADRENO_FW_GPMU = 2, + ADRENO_FW_MAX, +}; + enum adreno_quirks { ADRENO_QUIRK_TWO_PASS_USE_WFI = 1, ADRENO_QUIRK_FAULT_DETECT_MASK = 2, @@ -72,8 +79,7 @@ struct adreno_info { struct adreno_rev rev; uint32_t revn; const char *name; - const char *pm4fw, *pfpfw; - const char *gpmufw; + const char *fw[ADRENO_FW_MAX]; uint32_t gmem; enum adreno_quirks quirks; struct msm_gpu *(*init)(struct drm_device *dev); @@ -115,7 +121,7 @@ struct adreno_gpu { } fwloc; /* firmware: */ - const struct firmware *pm4, *pfp; + const struct firmware *fw[ADRENO_FW_MAX]; /* * Register offsets are different between some GPUs. @@ -200,6 +206,8 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu) int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname); +struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, + const struct firmware *fw, u64 *iova); int adreno_hw_init(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h index 576cea30d391..576cea30d391 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 14bd3bd3e040..6e5e1aa54ce1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -129,7 +129,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct msm_kms *kms = &mdp4_kms->base.base; msm_gem_put_iova(val, kms->aspace); - drm_gem_object_unreference_unlocked(val); + drm_gem_object_put_unlocked(val); } static void mdp4_crtc_destroy(struct drm_crtc *crtc) @@ -382,7 +382,7 @@ static void update_cursor(struct drm_crtc *crtc) if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ - drm_gem_object_reference(next_bo); + drm_gem_object_get(next_bo); msm_gem_get_iova(next_bo, kms->aspace, &iova); /* enable cursor: */ @@ -467,7 +467,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail: - drm_gem_object_unreference_unlocked(cursor_bo); + drm_gem_object_put_unlocked(cursor_bo); return ret; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c index 6a1ebdace391..6a1ebdace391 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c index ba8e587f734b..ba8e587f734b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c index b764d7f10312..b764d7f10312 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index f7f087419ed8..4b646bf9c214 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -164,7 +164,7 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); - drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); + drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h index a1b3e31e959e..0c13f8697bfe 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h @@ -22,7 +22,7 @@ #include "msm_drv.h" #include "msm_kms.h" -#include "mdp/mdp_kms.h" +#include "disp/mdp_kms.h" #include "mdp4.xml.h" struct device_node; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c index 4a645926edb7..4a645926edb7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index e3b1c86b7aae..e3b1c86b7aae 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c index ce4245971673..ce4245971673 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c index 7a1ad3af08e3..7a1ad3af08e3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h index d9c10e02ee41..d9c10e02ee41 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c index 824067d2d427..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h index 75910d0f2f4c..75910d0f2f4c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c index 1abc7f5c345c..d6f79dc755b4 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c @@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) pingpong_tearcheck_disable(encoder); mdp5_ctl_set_encoder_state(ctl, pipeline, false); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); bs_set(mdp5_cmd_enc, 0); @@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) if (pingpong_tearcheck_enable(encoder)) return; - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); mdp5_ctl_set_encoder_state(ctl, pipeline, true); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index e414850dbbda..9893e43ba6c5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask) struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_ctl *ctl = mdp5_cstate->ctl; struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; + bool start = !mdp5_cstate->defer_start; + + mdp5_cstate->defer_start = false; DBG("%s: flush=%08x", crtc->name, flush_mask); - return mdp5_ctl_commit(ctl, pipeline, flush_mask); + + return mdp5_ctl_commit(ctl, pipeline, flush_mask, start); } /* @@ -170,7 +174,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) struct msm_kms *kms = &mdp5_kms->base.base; msm_gem_put_iova(val, kms->aspace); - drm_gem_object_unreference_unlocked(val); + drm_gem_object_put_unlocked(val); } static void mdp5_crtc_destroy(struct drm_crtc *crtc) @@ -947,12 +951,17 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p, if (WARN_ON(!pipeline)) return; + if (mdp5_cstate->ctl) + drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl)); + drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ? pipeline->mixer->name : "(null)"); if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT) drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ? pipeline->r_mixer->name : "(null)"); + + drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode); } static void mdp5_crtc_reset(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 439e0a300e25..f93d5681267c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -41,7 +41,9 @@ struct mdp5_ctl { u32 status; bool encoder_enabled; - uint32_t start_mask; + + /* pending flush_mask bits */ + u32 flush_mask; /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ spinlock_t hw_lock; @@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) { - struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; - struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr); + struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) | - mdp_ctl_flush_mask_encoder(intf); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ if (!mdp5_cfg_intf_is_virtual(intf->type)) @@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl, { struct mdp5_interface *intf = pipeline->intf; - if (!ctl->encoder_enabled || ctl->start_mask != 0) + if (!ctl->encoder_enabled) return false; switch (intf->type) { @@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl) spin_unlock_irqrestore(&ctl->hw_lock, flags); } -static void refill_start_mask(struct mdp5_ctl *ctl, - struct mdp5_pipeline *pipeline) -{ - struct mdp5_interface *intf = pipeline->intf; - struct mdp5_hw_mixer *mixer = pipeline->mixer; - struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer; - - ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm); - if (r_mixer) - ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm); - - /* - * Writeback encoder needs to program & flush - * address registers for each page flip.. - */ - if (intf->type == INTF_WB) - ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf); -} - /** * mdp5_ctl_set_encoder_state() - set the encoder state * @@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl, pipeline); } return 0; @@ -494,6 +468,8 @@ u32 mdp_ctl_flush_mask_lm(int lm) case 0: return MDP5_CTL_FLUSH_LM0; case 1: return MDP5_CTL_FLUSH_LM1; case 2: return MDP5_CTL_FLUSH_LM2; + case 3: return MDP5_CTL_FLUSH_LM3; + case 4: return MDP5_CTL_FLUSH_LM4; case 5: return MDP5_CTL_FLUSH_LM5; default: return 0; } @@ -557,17 +533,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, */ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask) + u32 flush_mask, bool start) { struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsigned long flags; u32 flush_id = ctl->id; u32 curr_ctl_flush_mask; - ctl->start_mask &= ~flush_mask; - - VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask, - ctl->start_mask, ctl->pending_ctl_trigger); + VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); if (ctl->pending_ctl_trigger & flush_mask) { flush_mask |= MDP5_CTL_FLUSH_CTL; @@ -582,6 +555,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, fix_for_single_flush(ctl, &flush_mask, &flush_id); + if (!start) { + ctl->flush_mask |= flush_mask; + return curr_ctl_flush_mask; + } else { + flush_mask |= ctl->flush_mask; + ctl->flush_mask = 0; + } + if (flush_mask) { spin_lock_irqsave(&ctl->hw_lock, flags); ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); @@ -590,7 +571,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, if (start_signal_needed(ctl, pipeline)) { send_start_signal(ctl); - refill_start_mask(ctl, pipeline); } return curr_ctl_flush_mask; @@ -711,6 +691,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, struct mdp5_ctl_manager *ctl_mgr; const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd); int rev = mdp5_cfg_get_hw_rev(cfg_hnd); + unsigned dsi_cnt = 0; const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; unsigned long flags; int c, ret; @@ -760,7 +741,10 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev, * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. * Single FLUSH is supported from hw rev v3.0. */ - if (rev >= 3) { + for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) + if (hw_cfg->intf.connect[c] == INTF_DSI) + dsi_cnt++; + if ((rev >= 3) && (dsi_cnt > 1)) { ctl_mgr->single_flush_supported = true; /* Reserve CTL0/1 for INTF1/2 */ ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h index b63120388dc6..403b0db0fa4c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); /* @flush_mask: see CTL flush masks definitions below */ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, - u32 flush_mask); + u32 flush_mask, bool start); u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c index 36ad3cbe5f79..9af94e35f678 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c @@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); /* * Wait for a vsync so we know the ENABLE=0 latched before @@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); - mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); + mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true); mdp5_ctl_set_encoder_state(ctl, pipeline, true); @@ -319,6 +319,7 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder, mdp5_cstate->ctl = ctl; mdp5_cstate->pipeline.intf = intf; + mdp5_cstate->defer_start = true; return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c index 280e368bc9bb..280e368bc9bb 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 3e9bba4d6624..6d8e3a9a6fc0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) } else { dev_info(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); - aspace = NULL;; + aspace = NULL; } pm_runtime_put_sync(&pdev->dev); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index 9b3fe01089d1..425a03d213e5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -20,7 +20,7 @@ #include "msm_drv.h" #include "msm_kms.h" -#include "mdp/mdp_kms.h" +#include "disp/mdp_kms.h" #include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ #include "mdp5.xml.h" #include "mdp5_pipe.h" @@ -133,6 +133,14 @@ struct mdp5_crtc_state { u32 pp_done_irqmask; bool cmd_mode; + + /* should we not write CTL[n].START register on flush? If the + * encoder has changed this is set to true, since encoder->enable() + * is called after crtc state is committed, but we only want to + * write the CTL[n].START register once. This lets us defer + * writing CTL[n].START until encoder->enable() + */ + bool defer_start; }; #define to_mdp5_crtc_state(x) \ container_of(x, struct mdp5_crtc_state, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c index f2a0db7a8a03..f2a0db7a8a03 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c index 8a00991f03c7..8a00991f03c7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h index 9be94f567fbd..9be94f567fbd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c index ff52c49095f9..ff52c49095f9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h index bb2b0ac7aa2b..bb2b0ac7aa2b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c index 44fc9fe4737a..a9f31da7d45a 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c @@ -535,7 +535,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane, ctl = mdp5_crtc_get_ctl(new_state->crtc); - mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); + mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true); } *to_mdp5_plane_state(plane->state) = diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index ae4983d9d0a5..ae4983d9d0a5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h index b41d0448fbe8..b41d0448fbe8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h index 1494c407be44..1494c407be44 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h +++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c index b4a8aa4490ee..b4a8aa4490ee 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_format.c +++ b/drivers/gpu/drm/msm/disp/mdp_format.c diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c index 64287304054d..64287304054d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp_kms.c diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h index 1185487e7e5e..1185487e7e5e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp_kms.h diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 98742d7af6dc..b744bcc7d8ad 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -192,13 +192,14 @@ void __exit msm_dsi_unregister(void) int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, struct drm_encoder *encoder) { - struct msm_drm_private *priv = dev->dev_private; + struct msm_drm_private *priv; struct drm_bridge *ext_bridge; int ret; - if (WARN_ON(!encoder)) + if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev)) return -EINVAL; + priv = dev->dev_private; msm_dsi->dev = dev; ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); @@ -245,19 +246,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return 0; fail: - if (msm_dsi) { - /* bridge/connector are normally destroyed by drm: */ - if (msm_dsi->bridge) { - msm_dsi_manager_bridge_destroy(msm_dsi->bridge); - msm_dsi->bridge = NULL; - } + /* bridge/connector are normally destroyed by drm: */ + if (msm_dsi->bridge) { + msm_dsi_manager_bridge_destroy(msm_dsi->bridge); + msm_dsi->bridge = NULL; + } - /* don't destroy connector if we didn't make it */ - if (msm_dsi->connector && !msm_dsi->external_bridge) - msm_dsi->connector->funcs->destroy(msm_dsi->connector); + /* don't destroy connector if we didn't make it */ + if (msm_dsi->connector && !msm_dsi->external_bridge) + msm_dsi->connector->funcs->destroy(msm_dsi->connector); - msm_dsi->connector = NULL; - } + msm_dsi->connector = NULL; return ret; } diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 2302046197a8..70d9a9a47acd 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -36,6 +36,7 @@ enum msm_dsi_phy_type { MSM_DSI_PHY_20NM, MSM_DSI_PHY_28NM_8960, MSM_DSI_PHY_14NM, + MSM_DSI_PHY_10NM, MSM_DSI_PHY_MAX }; diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 479086ccf180..f6a9471b70c8 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27) - -Copyright (C) 2013-2017 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) + +Copyright (C) 2013-2018 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) @@ -1556,5 +1547,175 @@ static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00 #define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 +#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010 + +#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014 + +#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018 + +#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c + +#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020 + +#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024 + +#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028 + +#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c + +#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030 + +#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034 + +#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4 + +#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4 + +#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8 + +#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec + +#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4 + +#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8 + +static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; } + +static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; } + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000 + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004 + +#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010 + +#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c + +#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020 + +#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024 + +#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c + +#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030 + +#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054 + +#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064 + +#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c + +#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080 + +#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094 + +#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4 + +#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8 + +#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4 + +#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0 + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4 + +#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8 + +#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c + +#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118 + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c + +#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120 + +#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c + +#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144 + +#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c + +#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154 + +#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c + +#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180 + +#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184 + +#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c + +#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0 + #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 65c1dfbbe019..0327bb54b01b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -118,6 +118,24 @@ static const struct msm_dsi_config msm8996_dsi_cfg = { .num_dsi = 2, }; +static const char * const dsi_sdm845_bus_clk_names[] = { + "iface", "bus", +}; + +static const struct msm_dsi_config sdm845_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 1, + .regs = { + {"vdda", 21800, 4 }, /* 1.2 V */ + }, + }, + .bus_clk_names = dsi_sdm845_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names), + .io_start = { 0xae94000, 0xae96000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, @@ -131,6 +149,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index 00a5da2663c6..9cfdcf1c95d5 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -25,6 +25,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 #define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 +#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 0f7324a686ca..7a03a9489708 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -115,6 +115,7 @@ struct msm_dsi_host { struct clk *pixel_clk; struct clk *byte_clk_src; struct clk *pixel_clk_src; + struct clk *byte_intf_clk; u32 byte_clk_rate; u32 esc_clk_rate; @@ -214,7 +215,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( goto exit; } - ahb_clk = clk_get(dev, "iface_clk"); + ahb_clk = msm_clk_get(msm_host->pdev, "iface"); if (IS_ERR(ahb_clk)) { pr_err("%s: cannot get interface clock\n", __func__); goto put_gdsc; @@ -225,7 +226,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config( ret = regulator_enable(gdsc_reg); if (ret) { pr_err("%s: unable to enable gdsc\n", __func__); - goto put_clk; + goto put_gdsc; } ret = clk_prepare_enable(ahb_clk); @@ -249,8 +250,6 @@ disable_clks: disable_gdsc: regulator_disable(gdsc_reg); pm_runtime_put_sync(dev); -put_clk: - clk_put(ahb_clk); put_gdsc: regulator_put(gdsc_reg); exit: @@ -379,6 +378,19 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host) goto exit; } + if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && + cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) { + msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); + if (IS_ERR(msm_host->byte_intf_clk)) { + ret = PTR_ERR(msm_host->byte_intf_clk); + pr_err("%s: can't find byte_intf clock. ret=%d\n", + __func__, ret); + goto exit; + } + } else { + msm_host->byte_intf_clk = NULL; + } + msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); if (!msm_host->byte_clk_src) { ret = -ENODEV; @@ -504,6 +516,16 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto error; } + if (msm_host->byte_intf_clk) { + ret = clk_set_rate(msm_host->byte_intf_clk, + msm_host->byte_clk_rate / 2); + if (ret) { + pr_err("%s: Failed to set rate byte intf clk, %d\n", + __func__, ret); + goto error; + } + } + ret = clk_prepare_enable(msm_host->esc_clk); if (ret) { pr_err("%s: Failed to enable dsi esc clk\n", __func__); @@ -522,8 +544,19 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) goto pixel_clk_err; } + if (msm_host->byte_intf_clk) { + ret = clk_prepare_enable(msm_host->byte_intf_clk); + if (ret) { + pr_err("%s: Failed to enable byte intf clk\n", + __func__); + goto byte_intf_clk_err; + } + } + return 0; +byte_intf_clk_err: + clk_disable_unprepare(msm_host->pixel_clk); pixel_clk_err: clk_disable_unprepare(msm_host->byte_clk); byte_clk_err: @@ -617,6 +650,8 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { clk_disable_unprepare(msm_host->esc_clk); clk_disable_unprepare(msm_host->pixel_clk); + if (msm_host->byte_intf_clk) + clk_disable_unprepare(msm_host->byte_intf_clk); clk_disable_unprepare(msm_host->byte_clk); } else { clk_disable_unprepare(msm_host->pixel_clk); @@ -1028,10 +1063,8 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) if (msm_host->tx_gem_obj) { msm_gem_put_iova(msm_host->tx_gem_obj, 0); - mutex_lock(&dev->struct_mutex); - msm_gem_free_object(msm_host->tx_gem_obj); + drm_gem_object_put_unlocked(msm_host->tx_gem_obj); msm_host->tx_gem_obj = NULL; - mutex_unlock(&dev->struct_mutex); } if (msm_host->tx_buf) diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 855248132b2b..4cb1cb68878b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -88,6 +88,8 @@ static int dsi_mgr_setup_components(int id) msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); + if (IS_ERR(src_pll)) + return PTR_ERR(src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); } else if (!other_dsi) { ret = 0; @@ -116,6 +118,8 @@ static int dsi_mgr_setup_components(int id) msm_dsi_phy_set_usecase(clk_slave_dsi->phy, MSM_DSI_PHY_SLAVE); src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); + if (IS_ERR(src_pll)) + return PTR_ERR(src_pll); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); if (ret) return ret; @@ -858,7 +862,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) int id = msm_dsi->id; int ret; - if (id > DSI_MAX) { + if (id >= DSI_MAX) { pr_err("%s: invalid id %d\n", __func__, id); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index 790ca280cbfd..8e9d5c255820 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -395,6 +395,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { { .compatible = "qcom,dsi-phy-14nm", .data = &dsi_phy_14nm_cfgs }, #endif +#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY + { .compatible = "qcom,dsi-phy-10nm", + .data = &dsi_phy_10nm_cfgs }, +#endif {} }; @@ -503,10 +507,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id); - if (!phy->pll) + if (IS_ERR_OR_NULL(phy->pll)) dev_info(dev, - "%s: pll init failed, need separate pll clk driver\n", - __func__); + "%s: pll init failed: %ld, need separate pll clk driver\n", + __func__, PTR_ERR(phy->pll)); dsi_phy_disable_resource(phy); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index 1733f6608a09..c56268cbdb3d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs; struct msm_dsi_dphy_timing { u32 clk_pre; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c new file mode 100644 index 000000000000..0af951aaeea1 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c @@ -0,0 +1,251 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/iopoll.h> + +#include "dsi_phy.h" +#include "dsi.xml.h" + +static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy) +{ + void __iomem *base = phy->base; + u32 data = 0; + + data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL); + mb(); /* make sure read happened */ + + return (data & BIT(0)); +} + +static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable) +{ + void __iomem *lane_base = phy->lane_base; + int phy_lane_0 = 0; /* TODO: Support all lane swap configs */ + + /* + * LPRX and CDRX need to enabled only for physical data lane + * corresponding to the logical data lane 0 + */ + if (enable) + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3); + else + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0); +} + +static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy) +{ + int i; + u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 }; + void __iomem *lane_base = phy->lane_base; + + /* Strength ctrl settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i), + 0x55); + /* + * Disable LPRX and CDRX for all lanes. And later on, it will + * be only enabled for the physical data lane corresponding + * to the logical data lane 0 + */ + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i), + 0x88); + } + + dsi_phy_hw_v3_0_config_lpcdrx(phy, true); + + /* other settings */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i), + i == 4 ? 0x80 : 0x0); + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0); + dsi_phy_write(lane_base + + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i), + tx_dctrl[i]); + } + + /* Toggle BIT 0 to release freeze I/0 */ + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05); + dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); +} + +static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + /* + * TODO: These params need to be computed, they're currently hardcoded + * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a + * default escape clock of 19.2 Mhz. + */ + + timing->hs_halfbyte_en = 0; + timing->clk_zero = 0x1c; + timing->clk_prepare = 0x07; + timing->clk_trail = 0x07; + timing->hs_exit = 0x23; + timing->hs_zero = 0x21; + timing->hs_prepare = 0x07; + timing->hs_trail = 0x07; + timing->hs_rqst = 0x05; + timing->ta_sure = 0x00; + timing->ta_go = 0x03; + timing->ta_get = 0x04; + + timing->shared_timings.clk_pre = 0x2d; + timing->shared_timings.clk_post = 0x0d; + + return 0; +} + +static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + int ret; + u32 status; + u32 const delay_us = 5; + u32 const timeout_us = 1000; + struct msm_dsi_dphy_timing *timing = &phy->timing; + void __iomem *base = phy->base; + u32 data; + + DBG(""); + + if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + if (dsi_phy_hw_v3_0_is_pll_on(phy)) + pr_warn("PLL turned on before configuring PHY\n"); + + /* wait for REFGEN READY */ + ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS, + status, (status & BIT(0)), + delay_us, timeout_us); + if (ret) { + pr_err("Ref gen not ready. Aborting\n"); + return -EINVAL; + } + + /* de-assert digital and pll power down */ + data = BIT(6) | BIT(5); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); + + /* Assert PLL core reset */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00); + + /* turn off resync FIFO */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00); + + /* Select MS1 byte-clk */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10); + + /* Enable LDO */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59); + + /* Configure PHY lane swap (TODO: we need to calculate this) */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84); + + /* DSI PHY timings */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0, + timing->hs_halfbyte_en); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1, + timing->clk_zero); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2, + timing->clk_prepare); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3, + timing->clk_trail); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4, + timing->hs_exit); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5, + timing->hs_zero); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6, + timing->hs_prepare); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7, + timing->hs_trail); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8, + timing->hs_rqst); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9, + timing->ta_go | (timing->ta_sure << 3)); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10, + timing->ta_get); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11, + 0x00); + + /* Remove power down from all blocks */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f); + + /* power up lanes */ + data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0); + + /* TODO: only power up lanes that are used */ + data |= 0x1F; + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data); + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F); + + /* Select full-rate mode */ + dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* DSI lane settings */ + dsi_phy_hw_v3_0_lane_settings(phy); + + DBG("DSI%d PHY enabled", phy->id); + + return 0; +} + +static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy) +{ +} + +static int dsi_10nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + dev_err(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = { + .type = MSM_DSI_PHY_10NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vdds", 36000, 32}, + }, + }, + .ops = { + .enable = dsi_10nm_phy_enable, + .disable = dsi_10nm_phy_disable, + .init = dsi_10nm_phy_init, + }, + .io_start = { 0xae94400, 0xae96400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index bc289f5c9078..613e206fa4fc 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -166,6 +166,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_14NM: pll = msm_dsi_pll_14nm_init(pdev, id); break; + case MSM_DSI_PHY_10NM: + pll = msm_dsi_pll_10nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; @@ -173,7 +176,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, if (IS_ERR(pll)) { dev_err(dev, "%s: failed to init DSI PLL\n", __func__); - return NULL; + return pll; } pll->type = type; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index f63e7ada74a8..8b32271cbc24 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -115,5 +115,14 @@ msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) return ERR_PTR(-ENODEV); } #endif +#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY +struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c new file mode 100644 index 000000000000..c4c37a7df637 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c @@ -0,0 +1,822 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2018, The Linux Foundation + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/iopoll.h> + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 10nm - clock diagram (eg: DSI0): + * + * dsi0_pll_out_div_clk dsi0_pll_bit_clk + * | | + * | | + * +---------+ | +----------+ | +----+ + * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte + * +---------+ | +----------+ | +----+ + * | | + * | | dsi0_pll_by_2_bit_clk + * | | | + * | | +----+ | |\ dsi0_pclk_mux + * | |--| /2 |--o--| \ | + * | | +----+ | \ | +---------+ + * | --------------| |--o--| div_7_4 |-- dsi0pll + * |------------------------------| / +---------+ + * | +-----+ | / + * -----------| /4? |--o----------|/ + * +-----+ | | + * | |dsiclk_sel + * | + * dsi0_pll_post_out_div_clk + */ + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 +#define NUM_PROVIDED_CLKS 2 + +struct dsi_pll_regs { + u32 pll_prop_gain_rate; + u32 pll_lockdet_rate; + u32 decimal_div_start; + u32 frac_div_start_low; + u32 frac_div_start_mid; + u32 frac_div_start_high; + u32 pll_clock_inverters; + u32 ssc_stepsize_low; + u32 ssc_stepsize_high; + u32 ssc_div_per_low; + u32 ssc_div_per_high; + u32 ssc_adjper_low; + u32 ssc_adjper_high; + u32 ssc_control; +}; + +struct dsi_pll_config { + u32 ref_freq; + bool div_override; + u32 output_div; + bool ignore_frac; + bool disable_prescaler; + bool enable_ssc; + bool ssc_center; + u32 dec_bits; + u32 frac_bits; + u32 lock_timer; + u32 ssc_freq; + u32 ssc_offset; + u32 ssc_adj_per; + u32 thresh_cycles; + u32 refclk_cycles; +}; + +struct pll_10nm_cached_state { + unsigned long vco_rate; + u8 bit_clk_div; + u8 pix_clk_div; + u8 pll_out_div; + u8 pll_mux; +}; + +struct dsi_pll_10nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + u64 vco_ref_clk_rate; + u64 vco_current_rate; + + /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + int vco_delay; + struct dsi_pll_config pll_configuration; + struct dsi_pll_regs reg_setup; + + /* private clocks: */ + struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; + u32 num_hws; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_10nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_10nm *slave; +}; + +#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX]; + +static void dsi_pll_setup_config(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + + config->ref_freq = pll->vco_ref_clk_rate; + config->output_div = 1; + config->dec_bits = 8; + config->frac_bits = 18; + config->lock_timer = 64; + config->ssc_freq = 31500; + config->ssc_offset = 5000; + config->ssc_adj_per = 2; + config->thresh_cycles = 32; + config->refclk_cycles = 256; + + config->div_override = false; + config->ignore_frac = false; + config->disable_prescaler = false; + + config->enable_ssc = false; + config->ssc_center = 0; +} + +static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u64 fref = pll->vco_ref_clk_rate; + u64 pll_freq; + u64 divider; + u64 dec, dec_multiple; + u32 frac; + u64 multiplier; + + pll_freq = pll->vco_current_rate; + + if (config->disable_prescaler) + divider = fref; + else + divider = fref * 2; + + multiplier = 1 << config->frac_bits; + dec_multiple = div_u64(pll_freq * multiplier, divider); + div_u64_rem(dec_multiple, multiplier, &frac); + + dec = div_u64(dec_multiple, multiplier); + + if (pll_freq <= 1900000000UL) + regs->pll_prop_gain_rate = 8; + else if (pll_freq <= 3000000000UL) + regs->pll_prop_gain_rate = 10; + else + regs->pll_prop_gain_rate = 12; + if (pll_freq < 1100000000UL) + regs->pll_clock_inverters = 8; + else + regs->pll_clock_inverters = 0; + + regs->pll_lockdet_rate = config->lock_timer; + regs->decimal_div_start = dec; + regs->frac_div_start_low = (frac & 0xff); + regs->frac_div_start_mid = (frac & 0xff00) >> 8; + regs->frac_div_start_high = (frac & 0x30000) >> 16; +} + +#define SSC_CENTER BIT(0) +#define SSC_EN BIT(1) + +static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll) +{ + struct dsi_pll_config *config = &pll->pll_configuration; + struct dsi_pll_regs *regs = &pll->reg_setup; + u32 ssc_per; + u32 ssc_mod; + u64 ssc_step_size; + u64 frac; + + if (!config->enable_ssc) { + DBG("SSC not enabled\n"); + return; + } + + ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1; + ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1); + ssc_per -= ssc_mod; + + frac = regs->frac_div_start_low | + (regs->frac_div_start_mid << 8) | + (regs->frac_div_start_high << 16); + ssc_step_size = regs->decimal_div_start; + ssc_step_size *= (1 << config->frac_bits); + ssc_step_size += frac; + ssc_step_size *= config->ssc_offset; + ssc_step_size *= (config->ssc_adj_per + 1); + ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1)); + ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000); + + regs->ssc_div_per_low = ssc_per & 0xFF; + regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8; + regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF); + regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8); + regs->ssc_adjper_low = config->ssc_adj_per & 0xFF; + regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8; + + regs->ssc_control = config->ssc_center ? SSC_CENTER : 0; + + pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n", + regs->decimal_div_start, frac, config->frac_bits); + pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n", + ssc_per, (u32)ssc_step_size, config->ssc_adj_per); +} + +static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *regs = &pll->reg_setup; + + if (pll->pll_configuration.enable_ssc) { + pr_debug("SSC is enabled\n"); + + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1, + regs->ssc_stepsize_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1, + regs->ssc_stepsize_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1, + regs->ssc_div_per_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1, + regs->ssc_div_per_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1, + regs->ssc_adjper_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1, + regs->ssc_adjper_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL, + SSC_EN | regs->ssc_control); + } +} + +static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80); + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03); + pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e); + pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, + 0xba); + pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c); + pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, + 0x4c); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80); + pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29); + pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f); +} + +static void dsi_pll_commit(struct dsi_pll_10nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_regs *reg = &pll->reg_setup; + + pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12); + pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1, + reg->decimal_div_start); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1, + reg->frac_div_start_low); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1, + reg->frac_div_start_mid); + pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1, + reg->frac_div_start_high); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40); + pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06); + pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10); + pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS, + reg->pll_clock_inverters); +} + +static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate, + parent_rate); + + pll_10nm->vco_current_rate = rate; + pll_10nm->vco_ref_clk_rate = parent_rate; + + dsi_pll_setup_config(pll_10nm); + + dsi_pll_calc_dec_frac(pll_10nm); + + dsi_pll_calc_ssc(pll_10nm); + + dsi_pll_commit(pll_10nm); + + dsi_pll_config_hzindep_reg(pll_10nm); + + dsi_pll_ssc_commit(pll_10nm); + + /* flush, ensure all register writes are done*/ + wmb(); + + return 0; +} + +static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll) +{ + int rc; + u32 status = 0; + u32 const delay_us = 100; + u32 const timeout_us = 5000; + + rc = readl_poll_timeout_atomic(pll->mmio + + REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE, + status, + ((status & BIT(0)) > 0), + delay_us, + timeout_us); + if (rc) + pr_err("DSI PLL(%d) lock failed, status=0x%08x\n", + pll->id, status); + + return rc; +} + +static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0); + + pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0, + data & ~BIT(5)); + ndelay(250); +} + +static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll) +{ + u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0); + + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0, + data | BIT(5)); + pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0); + ndelay(250); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + data & ~BIT(5)); +} + +static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll) +{ + u32 data; + + data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + data | BIT(5)); +} + +static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + int rc; + + dsi_pll_enable_pll_bias(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_pll_bias(pll_10nm->slave); + + /* Start PLL */ + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, + 0x01); + + /* + * ensure all PLL configurations are written prior to checking + * for PLL lock. + */ + wmb(); + + /* Check for PLL lock */ + rc = dsi_pll_10nm_lock_status(pll_10nm); + if (rc) { + pr_err("PLL(%d) lock failed\n", pll_10nm->id); + goto error; + } + + pll->pll_on = true; + + dsi_pll_enable_global_clk(pll_10nm); + if (pll_10nm->slave) + dsi_pll_enable_global_clk(pll_10nm->slave); + + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, + 0x01); + if (pll_10nm->slave) + pll_write(pll_10nm->slave->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01); + +error: + return rc; +} + +static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll) +{ + pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0); + dsi_pll_disable_pll_bias(pll); +} + +static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + /* + * To avoid any stray glitches while abruptly powering down the PLL + * make sure to gate the clock using the clock enable bit before + * powering down the PLL + */ + dsi_pll_disable_global_clk(pll_10nm); + pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0); + dsi_pll_disable_sub(pll_10nm); + if (pll_10nm->slave) { + dsi_pll_disable_global_clk(pll_10nm->slave); + dsi_pll_disable_sub(pll_10nm->slave); + } + /* flush, ensure all register writes are done */ + wmb(); + pll->pll_on = false; +} + +static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + void __iomem *base = pll_10nm->mmio; + u64 ref_clk = pll_10nm->vco_ref_clk_rate; + u64 vco_rate = 0x0; + u64 multiplier; + u32 frac; + u32 dec; + u64 pll_freq, tmp64; + + dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1); + dec &= 0xff; + + frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1); + frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) & + 0xff) << 8); + frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) & + 0x3) << 16); + + /* + * TODO: + * 1. Assumes prescaler is disabled + * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits) + */ + multiplier = 1 << 18; + pll_freq = dec * (ref_clk * 2); + tmp64 = (ref_clk * 2 * frac); + pll_freq += div_u64(tmp64, multiplier); + + vco_rate = pll_freq; + + DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x", + pll_10nm->id, (unsigned long)vco_rate, dec, frac); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_10nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_10nm_vco_set_rate, + .recalc_rate = dsi_pll_10nm_vco_recalc_rate, + .prepare = dsi_pll_10nm_vco_prepare, + .unprepare = dsi_pll_10nm_vco_unprepare, +}; + +/* + * PLL Callbacks + */ + +static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy_cmn_mmio; + u32 cmn_clk_cfg0, cmn_clk_cfg1; + + cached->pll_out_div = pll_read(pll_10nm->mmio + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + cached->pll_out_div &= 0x3; + + cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0); + cached->bit_clk_div = cmn_clk_cfg0 & 0xf; + cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4; + + cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + cached->pll_mux = cmn_clk_cfg1 & 0x3; + + DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x", + pll_10nm->id, cached->pll_out_div, cached->bit_clk_div, + cached->pix_clk_div, cached->pll_mux); +} + +static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state; + void __iomem *phy_base = pll_10nm->phy_cmn_mmio; + u32 val; + + val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE); + val &= ~0x3; + val |= cached->pll_out_div; + pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val); + + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + cached->bit_clk_div | (cached->pix_clk_div << 4)); + + val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1); + val &= ~0x3; + val |= cached->pll_mux; + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val); + + DBG("DSI PLL%d", pll_10nm->id); + + return 0; +} + +static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + void __iomem *base = pll_10nm->phy_cmn_mmio; + u32 data = 0x0; /* internal PLL */ + + DBG("DSI PLL%d", pll_10nm->id); + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + break; + case MSM_DSI_PHY_MASTER: + pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + data = 0x1; /* external PLL */ + break; + default: + return -EINVAL; + } + + /* set PLL src */ + pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2)); + + pll_10nm->uc = uc; + + return 0; +} + +static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data; + + DBG("DSI PLL%d", pll_10nm->id); + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll); + + DBG("DSI PLL%d", pll_10nm->id); +} + +/* + * The post dividers and mux clocks are created using the standard divider and + * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux + * state to follow the master PLL's divider/mux state. Therefore, we don't + * require special clock ops that also configure the slave PLL registers + */ +static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm) +{ + char clk_name[32], parent[32], vco_name[32]; + char parent2[32], parent3[32], parent4[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_10nm_vco, + }; + struct device *dev = &pll_10nm->pdev->dev; + struct clk_hw **hws = pll_10nm->hws; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int num = 0; + int ret; + + DBG("DSI%d", pll_10nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id); + pll_10nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_10nm->base.clk_hw); + if (ret) + return ret; + + hws[num++] = &pll_10nm->base.clk_hw; + + snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id); + + hw = clk_hw_register_divider(dev, clk_name, + parent, CLK_SET_RATE_PARENT, + pll_10nm->mmio + + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, + 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + + /* BIT CLK: DIV_CTRL_3_0 */ + hw = clk_hw_register_divider(dev, clk_name, parent, + CLK_SET_RATE_PARENT, + pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 0, 4, CLK_DIVIDER_ONE_BASED, + &pll_10nm->postdiv_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + + /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 2); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + 0, 1, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id); + snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id); + snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id); + snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id); + + hw = clk_hw_register_mux(dev, clk_name, + (const char *[]){ + parent, parent2, parent3, parent4 + }, 4, 0, pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG1, + 0, 2, 0, NULL); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id); + snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id); + + /* PIX CLK DIV : DIV_CTRL_7_4*/ + hw = clk_hw_register_divider(dev, clk_name, parent, + 0, pll_10nm->phy_cmn_mmio + + REG_DSI_10nm_PHY_CMN_CLK_CFG0, + 4, 4, CLK_DIVIDER_ONE_BASED, + &pll_10nm->postdiv_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + pll_10nm->num_hws = num; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_10nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_10nm->hw_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_10nm *pll_10nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL); + if (!pll_10nm) + return ERR_PTR(-ENOMEM); + + DBG("DSI PLL%d", id); + + pll_10nm->pdev = pdev; + pll_10nm->id = id; + pll_10nm_list[id] = pll_10nm; + + pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) { + dev_err(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_10nm->mmio)) { + dev_err(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + pll = &pll_10nm->base; + pll->min_rate = 1000000000UL; + pll->max_rate = 3500000000UL; + pll->get_provider = dsi_pll_10nm_get_provider; + pll->destroy = dsi_pll_10nm_destroy; + pll->save_state = dsi_pll_10nm_save_state; + pll->restore_state = dsi_pll_10nm_restore_state; + pll->set_usecase = dsi_pll_10nm_set_usecase; + + pll_10nm->vco_delay = 1; + + ret = pll_10nm_register(pll_10nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + /* TODO: Remove this when we have proper display handover support */ + msm_dsi_pll_save_state(pll); + + return pll; +} diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c index 6e767979aab3..3656155e3793 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c @@ -769,7 +769,7 @@ static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctr if (rc) { pr_err("%s: wait key and an ready failed\n", __func__); return rc; - }; + } /* Read BCAPS and send to HDCP engine */ rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl); diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 1855182c76ce..ba74cb4f94df 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -161,8 +161,11 @@ int msm_debugfs_init(struct drm_minor *minor) return ret; } - if (priv->kms->funcs->debugfs_init) + if (priv->kms->funcs->debugfs_init) { ret = priv->kms->funcs->debugfs_init(priv->kms, minor); + if (ret) + return ret; + } return ret; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d90ef1d78a1b..30cd514d8f7c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -660,7 +660,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, ret = msm_gem_cpu_prep(obj, args->op, &timeout); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -678,7 +678,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data, ret = msm_gem_cpu_fini(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -718,7 +718,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, args->offset = msm_gem_mmap_offset(obj); } - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -783,7 +783,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ret = 0; } - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); unlock: mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 0a653dd2e618..48ed5b9a8580 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -51,7 +51,6 @@ struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; -struct msm_fence_cb; struct msm_gem_address_space; struct msm_gem_vma; diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index fc175e724ad6..0e0c87252ab0 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -53,7 +53,7 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) for (i = 0; i < n; i++) { struct drm_gem_object *bo = msm_fb->planes[i]; - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_put_unlocked(bo); } kfree(msm_fb); @@ -160,7 +160,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, out_unref: for (i = 0; i < n; i++) - drm_gem_object_unreference_unlocked(bos[i]); + drm_gem_object_put_unlocked(bos[i]); return ERR_PTR(ret); } @@ -274,7 +274,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format /* note: if fb creation failed, we can't rely on fb destroy * to unref the bo: */ - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_put_unlocked(bo); return ERR_CAST(fb); } diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 1aa6a4c6530c..b9fe059091f2 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -37,8 +37,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx); int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, ktime_t *timeout, bool interruptible); -int msm_queue_fence_cb(struct msm_fence_context *fctx, - struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence); struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 07376de9ff4c..95196479f651 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -470,7 +470,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, *offset = msm_gem_mmap_offset(obj); - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); fail: return ret; @@ -798,6 +798,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) } #endif +/* don't call directly! Use drm_gem_object_put() and friends */ void msm_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; @@ -854,7 +855,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, ret = drm_gem_handle_create(file, obj, handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ret; } @@ -974,7 +975,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, return obj; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -1034,7 +1035,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, return obj; fail: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_put_unlocked(obj); return ERR_PTR(ret); } @@ -1052,7 +1053,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, if (iova) { ret = msm_gem_get_iova(obj, aspace, iova); if (ret) { - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); return ERR_PTR(ret); } } @@ -1060,7 +1061,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, vaddr = msm_gem_get_vaddr(obj); if (IS_ERR(vaddr)) { msm_gem_put_iova(obj, aspace); - drm_gem_object_unreference(obj); + drm_gem_object_put(obj); return ERR_CAST(vaddr); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 9320e184b48d..c5d9bd3e47a8 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -146,6 +146,7 @@ struct msm_gem_submit { struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ + bool in_rb; /* "sudo" mode, copy cmds into RB */ struct msm_ringbuffer *ring; unsigned int nr_cmds; unsigned int nr_bos; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b8dc8f96caf2..7bd83e0afa97 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -430,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) return -EINVAL; + if (args->flags & MSM_SUBMIT_SUDO) { + if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) || + !capable(CAP_SYS_RAWIO)) + return -EINVAL; + } + queue = msm_submitqueue_get(ctx, args->queueid); if (!queue) return -ENOENT; @@ -471,6 +477,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out_unlock; } + if (args->flags & MSM_SUBMIT_SUDO) + submit->in_rb = true; + ret = submit_lookup_objects(submit, args, file); if (ret) goto out; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index d34e331554f3..ffbec224551b 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -96,6 +96,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name) { struct msm_gem_address_space *aspace; + u64 size = domain->geometry.aperture_end - + domain->geometry.aperture_start; aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); if (!aspace) @@ -106,7 +108,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, aspace->mmu = msm_iommu_new(dev, domain); drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT), - (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + size >> PAGE_SHIFT); kref_init(&aspace->kref); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index bd376f9e18a7..1c09acfb4028 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -552,7 +552,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) /* move to inactive: */ msm_gem_move_to_inactive(&msm_obj->base); msm_gem_put_iova(&msm_obj->base, gpu->aspace); - drm_gem_object_unreference(&msm_obj->base); + drm_gem_object_put(&msm_obj->base); } pm_runtime_mark_last_busy(&gpu->pdev->dev); @@ -634,7 +634,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); /* submit takes a reference to the bo and iova until retired: */ - drm_gem_object_reference(&msm_obj->base); + drm_gem_object_get(&msm_obj->base); msm_gem_get_iova(&msm_obj->base, submit->gpu->aspace, &iova); @@ -682,8 +682,10 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks, GFP_KERNEL); - if (!gpu->grp_clks) + if (!gpu->grp_clks) { + gpu->nr_clocks = 0; return -ENOMEM; + } of_property_for_each_string(dev->of_node, "clock-names", prop, name) { gpu->grp_clks[i] = get_clock(dev, name); @@ -865,7 +867,7 @@ fail: if (gpu->memptrs_bo) { msm_gem_put_vaddr(gpu->memptrs_bo); msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + drm_gem_object_put_unlocked(gpu->memptrs_bo); } platform_set_drvdata(pdev, NULL); @@ -888,7 +890,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) if (gpu->memptrs_bo) { msm_gem_put_vaddr(gpu->memptrs_bo); msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); - drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + drm_gem_object_put_unlocked(gpu->memptrs_bo); } if (!IS_ERR_OR_NULL(gpu->aspace)) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index fccfccd303af..b8241179175a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -65,6 +65,8 @@ struct msm_gpu_funcs { #ifdef CONFIG_DEBUG_FS /* show GPU status in debugfs: */ void (*show)(struct msm_gpu *gpu, struct seq_file *m); + /* for generation specific debugfs: */ + int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor); #endif int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value); }; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 6ca98da35f63..6f5295b3f2f6 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -76,7 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) if (ring->bo) { msm_gem_put_iova(ring->bo, ring->gpu->aspace); msm_gem_put_vaddr(ring->bo); - drm_gem_object_unreference_unlocked(ring->bo); + drm_gem_object_put_unlocked(ring->bo); } kfree(ring); } diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index bbbaffad772d..c06d0a5bdd80 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo { #define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */ #define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */ #define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */ +#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */ #define MSM_SUBMIT_FLAGS ( \ MSM_SUBMIT_NO_IMPLICIT | \ MSM_SUBMIT_FENCE_FD_IN | \ MSM_SUBMIT_FENCE_FD_OUT | \ + MSM_SUBMIT_SUDO | \ 0) /* Each cmdstream submit consists of a table of buffers involved, and |