summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2022-05-11 05:40:47 +0300
committerDave Airlie <airlied@redhat.com>2022-05-11 05:40:47 +0300
commitf83493f7d34da258310ecd3d07f0cc78f884c954 (patch)
tree5b0d034a505dc8a0f42a16fe17407e443afae32d /drivers
parentd53b8e19c24bab37f72a2fc4b61d6f4d77b84ab4 (diff)
parent24df12013853ac59c52cc726e9cbe51e38d09eda (diff)
downloadlinux-f83493f7d34da258310ecd3d07f0cc78f884c954.tar.xz
Merge tag 'drm-msm-next-2022-05-09' of https://gitlab.freedesktop.org/drm/msm into drm-next
- Fourcc modifier for tiled but not compressed layouts - Support for userspace allocated IOVA (GPU virtual address) - Devfreq clamp_to_idle fix - DPU: DSC (Display Stream Compression) support - DPU: inline rotation support on SC7280 - DPU: update DP timings to follow vendor recommendations - DP, DPU: add support for wide bus (on newer chipsets) - DP: eDP support - Merge DPU1 and MDP5 MDSS driver, make dpu/mdp device the master component - MDSS: optionally reset the IP block at the bootup to drop bootloader state - Properly register and unregister internal bridges in the DRM framework - Complete DPU IRQ cleanup - DP: conversion to use drm_bridge and drm_bridge_connector - eDP: drop old eDP parts again - DPU: writeback support - Misc small fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rob Clark <robdclark@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvJCr_1D8d0dgmyQC5HD4gmXeZw=bFV_CNCfceZbpMxRw@mail.gmail.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c4
-rw-r--r--drivers/gpu/drm/drm_writeback.c73
-rw-r--r--drivers/gpu/drm/msm/Kconfig51
-rw-r--r--drivers/gpu/drm/msm/Makefile29
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c68
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c592
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h97
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c112
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c79
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c157
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c215
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h80
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c161
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c62
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h22
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c32
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c279
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c241
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c260
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c170
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c78
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h93
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c76
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h31
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c64
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c81
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c252
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c50
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c21
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c98
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c166
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c438
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c215
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c103
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c49
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h14
-rw-r--r--drivers/gpu/drm/msm/dp/dp_power.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h80
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c300
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c15
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c10
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h78
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1374
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c11
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c294
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h162
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c20
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c151
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h53
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c37
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c61
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h25
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c97
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h21
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c415
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c5
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c15
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c4
111 files changed, 5627 insertions, 3814 deletions
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index e465cc4879c9..ce4b760a691b 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -155,7 +155,6 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
kwb_conn->wb_layer = kcrtc->master->wb_layer;
wb_conn = &kwb_conn->base;
- wb_conn->encoder.possible_crtcs = BIT(drm_crtc_index(&kcrtc->base));
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
@@ -164,7 +163,8 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
&komeda_wb_encoder_helper_funcs,
- formats, n_formats);
+ formats, n_formats,
+ BIT(drm_crtc_index(&kcrtc->base)));
komeda_put_fourcc_list(formats);
if (err) {
kfree(kwb_conn);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index f5847a79dd7e..204c869d9fe2 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -212,7 +212,6 @@ int malidp_mw_connector_init(struct drm_device *drm)
if (!malidp->dev->hw->enable_memwrite)
return 0;
- malidp->mw_connector.encoder.possible_crtcs = 1 << drm_crtc_index(&malidp->crtc);
drm_connector_helper_add(&malidp->mw_connector.base,
&malidp_mw_connector_helper_funcs);
@@ -223,7 +222,8 @@ int malidp_mw_connector_init(struct drm_device *drm)
ret = drm_writeback_connector_init(drm, &malidp->mw_connector,
&malidp_mw_connector_funcs,
&malidp_mw_encoder_helper_funcs,
- formats, n_formats);
+ formats, n_formats,
+ 1 << drm_crtc_index(&malidp->crtc));
kfree(formats);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index dccf4504f1bb..99fd15d1b366 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -157,6 +157,7 @@ static const struct drm_encoder_funcs drm_writeback_encoder_funcs = {
* @enc_helper_funcs: Encoder helper funcs vtable to be used by the internal encoder
* @formats: Array of supported pixel formats for the writeback engine
* @n_formats: Length of the formats array
+ * @possible_crtcs: possible crtcs for the internal writeback encoder
*
* This function creates the writeback-connector-specific properties if they
* have not been already created, initializes the connector as
@@ -174,7 +175,64 @@ int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
- const u32 *formats, int n_formats)
+ const u32 *formats, int n_formats,
+ u32 possible_crtcs)
+{
+ int ret = 0;
+
+ drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
+
+ wb_connector->encoder.possible_crtcs = possible_crtcs;
+
+ ret = drm_encoder_init(dev, &wb_connector->encoder,
+ &drm_writeback_encoder_funcs,
+ DRM_MODE_ENCODER_VIRTUAL, NULL);
+ if (ret)
+ return ret;
+
+ ret = drm_writeback_connector_init_with_encoder(dev, wb_connector, &wb_connector->encoder,
+ con_funcs, formats, n_formats);
+
+ if (ret)
+ drm_encoder_cleanup(&wb_connector->encoder);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_writeback_connector_init);
+
+/**
+ * drm_writeback_connector_init_with_encoder - Initialize a writeback connector with
+ * a custom encoder
+ *
+ * @dev: DRM device
+ * @wb_connector: Writeback connector to initialize
+ * @enc: handle to the already initialized drm encoder
+ * @con_funcs: Connector funcs vtable
+ * @formats: Array of supported pixel formats for the writeback engine
+ * @n_formats: Length of the formats array
+ *
+ * This function creates the writeback-connector-specific properties if they
+ * have not been already created, initializes the connector as
+ * type DRM_MODE_CONNECTOR_WRITEBACK, and correctly initializes the property
+ * values.
+ *
+ * This function assumes that the drm_writeback_connector's encoder has already been
+ * created and initialized before invoking this function.
+ *
+ * In addition, this function also assumes that callers of this API will manage
+ * assigning the encoder helper functions, possible_crtcs and any other encoder
+ * specific operation.
+ *
+ * Drivers should always use this function instead of drm_connector_init() to
+ * set up writeback connectors if they want to manage themselves the lifetime of the
+ * associated encoder.
+ *
+ * Returns: 0 on success, or a negative error code
+ */
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector, struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs, const u32 *formats,
+ int n_formats)
{
struct drm_property_blob *blob;
struct drm_connector *connector = &wb_connector->base;
@@ -189,12 +247,6 @@ int drm_writeback_connector_init(struct drm_device *dev,
if (IS_ERR(blob))
return PTR_ERR(blob);
- drm_encoder_helper_add(&wb_connector->encoder, enc_helper_funcs);
- ret = drm_encoder_init(dev, &wb_connector->encoder,
- &drm_writeback_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret)
- goto fail;
connector->interlace_allowed = 0;
@@ -203,8 +255,7 @@ int drm_writeback_connector_init(struct drm_device *dev,
if (ret)
goto connector_fail;
- ret = drm_connector_attach_encoder(connector,
- &wb_connector->encoder);
+ ret = drm_connector_attach_encoder(connector, enc);
if (ret)
goto attach_fail;
@@ -233,12 +284,10 @@ int drm_writeback_connector_init(struct drm_device *dev,
attach_fail:
drm_connector_cleanup(connector);
connector_fail:
- drm_encoder_cleanup(&wb_connector->encoder);
-fail:
drm_property_blob_put(blob);
return ret;
}
-EXPORT_SYMBOL(drm_writeback_connector_init);
+EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 864fdc20afef..4e0cbd682725 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select IOMMU_IO_PGTABLE
select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR
+ select DRM_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
@@ -48,12 +49,39 @@ config DRM_MSM_GPU_SUDO
Only use this if you are a driver developer. This should *not*
be enabled for production kernels. If unsure, say N.
-config DRM_MSM_HDMI_HDCP
- bool "Enable HDMI HDCP support in MSM DRM driver"
+config DRM_MSM_MDSS
+ bool
+ depends on DRM_MSM
+ default n
+
+config DRM_MSM_MDP4
+ bool "Enable MDP4 support in MSM DRM driver"
depends on DRM_MSM
default y
help
- Choose this option to enable HDCP state machine
+ Compile in support for the Mobile Display Processor v4 (MDP4) in
+ the MSM DRM driver. It is the older display controller found in
+ devices using APQ8064/MSM8960/MSM8x60 platforms.
+
+config DRM_MSM_MDP5
+ bool "Enable MDP5 support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_MSM_MDSS
+ default y
+ help
+ Compile in support for the Mobile Display Processor v5 (MDP5) in
+ the MSM DRM driver. It is the display controller found in devices
+ using e.g. APQ8016/MSM8916/APQ8096/MSM8996/MSM8974/SDM6x0 platforms.
+
+config DRM_MSM_DPU
+ bool "Enable DPU support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_MSM_MDSS
+ default y
+ help
+ Compile in support for the Display Processing Unit in
+ the MSM DRM driver. It is the display controller found in devices
+ using e.g. SDM845 and newer platforms.
config DRM_MSM_DP
bool "Enable DisplayPort support in MSM DRM driver"
@@ -118,3 +146,20 @@ config DRM_MSM_DSI_7NM_PHY
help
Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
the platform.
+
+config DRM_MSM_HDMI
+ bool "Enable HDMI support in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Compile in support for the HDMI output MSM DRM driver. It can
+ be a primary or a secondary display on device. Note that this is used
+ only for the direct HDMI output. If the device outputs HDMI data
+ throught some kind of DSI-to-HDMI bridge, this option can be disabled.
+
+config DRM_MSM_HDMI_HDCP
+ bool "Enable HDMI HDCP support in MSM DRM driver"
+ depends on DRM_MSM && DRM_MSM_HDMI
+ default y
+ help
+ Choose this option to enable HDCP state machine
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index e9cc7d8ac301..66395ee0862a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -16,6 +16,8 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+
+msm-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -27,9 +29,10 @@ msm-y := \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
- disp/mdp_format.o \
- disp/mdp_kms.o \
+
+msm-$(CONFIG_DRM_MSM_MDP4) += \
disp/mdp4/mdp4_crtc.o \
+ disp/mdp4/mdp4_dsi_encoder.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
@@ -37,25 +40,31 @@ msm-y := \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
+
+msm-$(CONFIG_DRM_MSM_MDP5) += \
disp/mdp5/mdp5_cfg.o \
+ disp/mdp5/mdp5_cmd_encoder.o \
disp/mdp5/mdp5_ctl.o \
disp/mdp5/mdp5_crtc.o \
disp/mdp5/mdp5_encoder.o \
disp/mdp5/mdp5_irq.o \
- disp/mdp5/mdp5_mdss.o \
disp/mdp5/mdp5_kms.o \
disp/mdp5/mdp5_pipe.o \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
+
+msm-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
disp/dpu1/dpu_encoder_phys_cmd.o \
disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_encoder_phys_wb.o \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
disp/dpu1/dpu_hw_lm.o \
@@ -66,11 +75,19 @@ msm-y := \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_hw_wb.o \
disp/dpu1/dpu_kms.o \
- disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
+ disp/dpu1/dpu_writeback.o
+
+msm-$(CONFIG_DRM_MSM_MDSS) += \
+ msm_mdss.o \
+
+msm-y += \
+ disp/mdp_format.o \
+ disp/mdp_kms.o \
disp/msm_disp_snapshot.o \
disp/msm_disp_snapshot_util.o \
msm_atomic.o \
@@ -118,12 +135,10 @@ msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- disp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
- dsi/phy/dsi_phy.o \
- disp/mdp5/mdp5_cmd_encoder.o
+ dsi/phy/dsi_phy.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 407f50a15faa..c424e9a37669 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- ring ? ring->id : -1, ring ? ring->seqno : 0,
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
@@ -1662,28 +1662,23 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
-static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
+static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
- u64 busy_cycles, busy_time;
+ u64 busy_cycles;
/* Only read the gpu busy if the hardware is already active */
- if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
+ if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0) {
+ *out_sample_rate = 1;
return 0;
+ }
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
-
- busy_time = busy_cycles - gpu->devfreq.busy_cycles;
- do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
-
- gpu->devfreq.busy_cycles = busy_cycles;
+ *out_sample_rate = clk_get_rate(gpu->core_clk);
pm_runtime_put(&gpu->pdev->dev);
- if (WARN_ON(busy_time > ~0LU))
- return ~0LU;
-
- return (unsigned long)busy_time;
+ return busy_cycles;
}
static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 3e325e2a2b1b..9f76f5b15759 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1172,7 +1172,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
return PTR_ERR(bo->obj);
ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
- range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
+ range_start, range_end);
if (ret) {
drm_gem_object_put(bo->obj);
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index ccc4fcf7a630..fc10923d42de 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- ring ? ring->id : -1, ring ? ring->seqno : 0,
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
@@ -1649,12 +1649,14 @@ static void a6xx_destroy(struct msm_gpu *gpu)
kfree(a6xx_gpu);
}
-static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
+static u64 a6xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- u64 busy_cycles, busy_time;
+ u64 busy_cycles;
+ /* 19.2MHz */
+ *out_sample_rate = 19200000;
/* Only read the gpu busy if the hardware is already active */
if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
@@ -1664,17 +1666,10 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
- busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
- do_div(busy_time, 192);
-
- gpu->devfreq.busy_cycles = busy_cycles;
pm_runtime_put(a6xx_gpu->gmu.dev);
- if (WARN_ON(busy_time > ~0LU))
- return ~0LU;
-
- return (unsigned long)busy_time;
+ return busy_cycles;
}
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9efc84929be0..4e665c806a14 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -229,10 +229,14 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t *value)
+ uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ /* No pointer params yet */
+ if (*len != 0)
+ return -EINVAL;
+
switch (param) {
case MSM_PARAM_GPU_ID:
*value = adreno_gpu->info->revn;
@@ -272,11 +276,24 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
*value = 0;
return 0;
case MSM_PARAM_FAULTS:
- *value = gpu->global_faults + ctx->aspace->faults;
+ if (ctx->aspace)
+ *value = gpu->global_faults + ctx->aspace->faults;
+ else
+ *value = gpu->global_faults;
return 0;
case MSM_PARAM_SUSPENDS:
*value = gpu->suspend_count;
return 0;
+ case MSM_PARAM_VA_START:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_start;
+ return 0;
+ case MSM_PARAM_VA_SIZE:
+ if (ctx->aspace == gpu->aspace)
+ return -EINVAL;
+ *value = ctx->aspace->va_size;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -284,9 +301,50 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t value)
+ uint32_t param, uint64_t value, uint32_t len)
{
switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE:
+ /* kstrdup_quotable_cmdline() limits to PAGE_SIZE, so
+ * that should be a reasonable upper bound
+ */
+ if (len > PAGE_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ if (len != 0)
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case MSM_PARAM_COMM:
+ case MSM_PARAM_CMDLINE: {
+ char *str, **paramp;
+
+ str = kmalloc(len + 1, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ if (copy_from_user(str, u64_to_user_ptr(value), len)) {
+ kfree(str);
+ return -EFAULT;
+ }
+
+ /* Ensure string is null terminated: */
+ str[len] = '\0';
+
+ if (param == MSM_PARAM_COMM) {
+ paramp = &ctx->comm;
+ } else {
+ paramp = &ctx->cmdline;
+ }
+
+ kfree(*paramp);
+ *paramp = str;
+
+ return 0;
+ }
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -533,7 +591,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova;
- state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]);
@@ -783,7 +841,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence,
- ring->seqno);
+ ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0490c5fbb780..ab3b5ef80332 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -281,9 +281,9 @@ static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t *value);
+ uint32_t param, uint64_t *value, uint32_t *len);
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t value);
+ uint32_t param, uint64_t value, uint32_t len);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 90ae6c9ccc95..b5b6e7031fb9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -10,46 +10,42 @@
/**
* dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: none
*/
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+void dpu_core_irq_preinstall(struct msm_kms *kms);
/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: none
*/
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+void dpu_core_irq_uninstall(struct msm_kms *kms);
/**
* dpu_core_irq - core IRQ handler
- * @dpu_kms: DPU handle
+ * @kms: MSM KMS handle
* @return: interrupt handling status
*/
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+irqreturn_t dpu_core_irq(struct msm_kms *kms);
/**
* dpu_core_irq_read - IRQ helper function for reading IRQ status
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @clear: True to clear the irq after read
* @return: non-zero if irq detected; otherwise no irq detected
*/
u32 dpu_core_irq_read(
struct dpu_kms *dpu_kms,
- int irq_idx,
- bool clear);
+ int irq_idx);
/**
* dpu_core_irq_register_callback - For registering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @irq_cb: IRQ callback structure, containing callback function
- * and argument. Passing NULL for irq_cb will unregister
- * the callback for the given irq_idx
- * This must exist until un-registration.
+ * @irq_cb: IRQ callback funcion.
+ * @irq_arg: IRQ callback argument.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
@@ -57,25 +53,21 @@ u32 dpu_core_irq_read(
int dpu_core_irq_register_callback(
struct dpu_kms *dpu_kms,
int irq_idx,
- struct dpu_irq_callback *irq_cb);
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg);
/**
* dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
* interrupt
* @dpu_kms: DPU handle
* @irq_idx: irq index
- * @irq_cb: IRQ callback structure, containing callback function
- * and argument. Passing NULL for irq_cb will unregister
- * the callback for the given irq_idx
- * This must match with registration.
* @return: 0 for success registering callback, otherwise failure
*
* This function supports registration of multiple callbacks for each interrupt.
*/
int dpu_core_irq_unregister_callback(
struct dpu_kms *dpu_kms,
- int irq_idx,
- struct dpu_irq_callback *irq_cb);
+ int irq_idx);
/**
* dpu_debugfs_core_irq_init - register core irq debugfs
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 7763558ef566..b56f777dbd0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -204,7 +204,8 @@ static int dpu_crtc_get_crc(struct drm_crtc *crtc)
rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
if (rc) {
- DRM_DEBUG_DRIVER("MISR read failed\n");
+ if (rc != -ENODATA)
+ DRM_DEBUG_DRIVER("MISR read failed\n");
return rc;
}
}
@@ -869,6 +870,13 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
+ if (!dpu_encoder_is_valid_for_commit(encoder)) {
+ DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
+ goto end;
+ }
+ }
/*
* Encoder will flush/start now, unless it has a tx pending. If so, it
* may delay and flush at an irq event (e.g. ppdone)
@@ -891,6 +899,8 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
dpu_encoder_kickoff(encoder);
reinit_completion(&dpu_crtc->frame_done_comp);
+
+end:
DPU_ATRACE_END("crtc_commit");
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 3940b9c6323b..52516eb20cb8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -21,6 +23,8 @@
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_dspp.h"
+#include "dpu_hw_dsc.h"
+#include "dpu_hw_merge3d.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -34,18 +38,6 @@
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
-#define DPU_DEBUG_PHYS(p, fmt, ...) DRM_DEBUG_ATOMIC("enc%d intf%d pp%d " fmt,\
- (p) ? (p)->parent->base.id : -1, \
- (p) ? (p)->intf_idx - INTF_0 : -1, \
- (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
- ##__VA_ARGS__)
-
-#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
- (p) ? (p)->parent->base.id : -1, \
- (p) ? (p)->intf_idx - INTF_0 : -1, \
- (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
- ##__VA_ARGS__)
-
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -135,6 +127,8 @@ enum dpu_enc_rc_states {
* @cur_slave: As above but for the slave encoder.
* @hw_pp: Handle to the pingpong blocks used for the display. No.
* pingpong blocks can be different than num_phys_encs.
+ * @hw_dsc: Handle to the DSC blocks used for the display.
+ * @dsc_mask: Bitmask of used DSC blocks.
* @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
@@ -168,6 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -180,6 +175,9 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+
+ unsigned int dsc_mask;
bool intfs_swapped;
@@ -206,6 +204,11 @@ struct dpu_encoder_virt {
struct msm_display_topology topology;
u32 idle_timeout;
+
+ bool wide_bus_en;
+
+ /* DSC configuration */
+ struct msm_display_dsc_config *dsc;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -214,6 +217,14 @@ static u32 dither_matrix[DITHER_MATRIX_SZ] = {
15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
};
+
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->wide_bus_en;
+}
+
static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
{
struct dpu_hw_dither_cfg dither_cfg = { 0 };
@@ -240,12 +251,30 @@ static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bp
hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
}
+static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
+{
+ switch (intf_mode) {
+ case INTF_MODE_VIDEO:
+ return "INTF_MODE_VIDEO";
+ case INTF_MODE_CMD:
+ return "INTF_MODE_CMD";
+ case INTF_MODE_WB_BLOCK:
+ return "INTF_MODE_WB_BLOCK";
+ case INTF_MODE_WB_LINE:
+ return "INTF_MODE_WB_LINE";
+ default:
+ return "INTF_MODE_UNKNOWN";
+ }
+}
+
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
{
- DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
- DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
- phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+ DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent),
+ dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
+ phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
if (phys_enc->parent_ops->handle_frame_done)
phys_enc->parent_ops->handle_frame_done(
@@ -257,73 +286,69 @@ static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
u32 irq_idx, struct dpu_encoder_wait_info *info);
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info)
{
- struct dpu_encoder_irq *irq;
u32 irq_status;
int ret;
- if (!wait_info || intr_idx >= INTR_IDX_MAX) {
+ if (!wait_info) {
DPU_ERROR("invalid params\n");
return -EINVAL;
}
- irq = &phys_enc->irq[intr_idx];
-
/* note: do master / slave checking outside */
/* return EWOULDBLOCK since we know the wait isn't necessary */
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
- DRM_ERROR("encoder is disabled id=%u, intr=%d, irq=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
+ DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq);
return -EWOULDBLOCK;
}
- if (irq->irq_idx < 0) {
- DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, irq=%s\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->name);
+ if (irq < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
+ DRMID(phys_enc->parent), func);
return 0;
}
- DRM_DEBUG_KMS("id=%u, intr=%d, irq=%d, pp=%d, pending_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq, phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
ret = dpu_encoder_helper_wait_event_timeout(
DRMID(phys_enc->parent),
- irq->irq_idx,
+ irq,
wait_info);
if (ret <= 0) {
- irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
- irq->irq_idx, true);
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
if (irq_status) {
unsigned long flags;
- DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx,
+ DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
local_irq_save(flags);
- irq->cb.func(phys_enc, irq->irq_idx);
+ func(phys_enc, irq);
local_irq_restore(flags);
ret = 0;
} else {
ret = -ETIMEDOUT;
- DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d\n",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx,
+ DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
+ DRMID(phys_enc->parent), func,
+ irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
} else {
ret = 0;
trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
- intr_idx, irq->irq_idx,
+ func, irq,
phys_enc->hw_pp->idx - PINGPONG_0,
atomic_read(wait_info->atomic_cnt));
}
@@ -331,70 +356,6 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
return ret;
}
-int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx)
-{
- struct dpu_encoder_irq *irq;
- int ret = 0;
-
- if (intr_idx >= INTR_IDX_MAX) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
- irq = &phys_enc->irq[intr_idx];
-
- if (irq->irq_idx < 0) {
- DPU_ERROR_PHYS(phys_enc,
- "invalid IRQ index:%d\n", irq->irq_idx);
- return -EINVAL;
- }
-
- ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
- &irq->cb);
- if (ret) {
- DPU_ERROR_PHYS(phys_enc,
- "failed to register IRQ callback for %s\n",
- irq->name);
- irq->irq_idx = -EINVAL;
- return ret;
- }
-
- trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
-
- return ret;
-}
-
-int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx)
-{
- struct dpu_encoder_irq *irq;
- int ret;
-
- irq = &phys_enc->irq[intr_idx];
-
- /* silently skip irqs that weren't registered */
- if (irq->irq_idx < 0) {
- DRM_ERROR("duplicate unregister id=%u, intr=%d, irq=%d",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
- return 0;
- }
-
- ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
- &irq->cb);
- if (ret) {
- DRM_ERROR("unreg cb fail id=%u, intr=%d, irq=%d ret=%d",
- DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx, ret);
- }
-
- trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
- irq->irq_idx);
-
- return 0;
-}
-
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@@ -501,6 +462,22 @@ void dpu_encoder_helper_split_config(
}
}
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ int i, intf_count = 0, num_dsc = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
+ if (dpu_enc->dsc)
+ num_dsc = 2;
+
+ return (num_dsc > 0) && (num_dsc > intf_count);
+}
+
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
@@ -541,8 +518,21 @@ static struct msm_display_topology dpu_encoder_get_topology(
topology.num_enc = 0;
topology.num_intf = intf_count;
+ if (dpu_enc->dsc) {
+ /* In case of Display Stream Compression (DSC), we would use
+ * 2 encoders, 2 layer mixers and 1 interface
+ * this is power optimal and can drive up to (including) 4k
+ * screens
+ */
+ topology.num_enc = 2;
+ topology.num_dsc = 2;
+ topology.num_intf = 1;
+ topology.num_lm = 2;
+ }
+
return topology;
}
+
static int dpu_encoder_virt_atomic_check(
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
@@ -929,6 +919,40 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
return 0;
}
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.prepare_wb_job)
+ phys->ops.prepare_wb_job(phys, job);
+
+ }
+}
+
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys->ops.cleanup_wb_job)
+ phys->ops.cleanup_wb_job(phys, job);
+
+ }
+}
+
static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -942,7 +966,9 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
- int num_lm, num_ctl, num_pp;
+ struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
+ int num_lm, num_ctl, num_pp, num_dsc;
+ unsigned int dsc_mask = 0;
int i;
if (!drm_enc) {
@@ -980,6 +1006,18 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
: NULL;
+ if (dpu_enc->dsc) {
+ num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSC,
+ hw_dsc, ARRAY_SIZE(hw_dsc));
+ for (i = 0; i < num_dsc; i++) {
+ dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
+ dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
+ }
+ }
+
+ dpu_enc->dsc_mask = dsc_mask;
+
cstate = to_dpu_crtc_state(crtc_state);
for (i = 0; i < num_lm; i++) {
@@ -1015,9 +1053,18 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
- if (!phys->hw_intf) {
+ if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
+ phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
+
+ if (!phys->hw_intf && !phys->hw_wb) {
DPU_ERROR_ENC(dpu_enc,
- "no intf block assigned at idx: %d\n", i);
+ "no intf or wb block assigned at idx: %d\n", i);
+ return;
+ }
+
+ if (phys->hw_intf && phys->hw_wb) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid phys both intf and wb block at idx: %d\n", i);
return;
}
@@ -1165,16 +1212,35 @@ static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
{
int i = 0;
- for (i = 0; i < catalog->intf_count; i++) {
- if (catalog->intf[i].type == type
- && catalog->intf[i].controller_id == controller_id) {
- return catalog->intf[i].id;
+ if (type != INTF_WB) {
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
}
}
return INTF_MAX;
}
+static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ if (type != INTF_WB)
+ goto end;
+
+ for (i = 0; i < catalog->wb_count; i++) {
+ if (catalog->wb[i].id == controller_id)
+ return catalog->wb[i].id;
+ }
+
+end:
+ return WB_MAX;
+}
+
static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
@@ -1288,8 +1354,9 @@ static void dpu_encoder_frame_done_callback(
* suppress frame_done without waiter,
* likely autorefresh
*/
- trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
- event, ready_phys->intf_idx);
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
+ dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
+ ready_phys->intf_idx, ready_phys->wb_idx);
return;
}
@@ -1367,9 +1434,11 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
if (ctl->ops.get_pending_flush)
ret = ctl->ops.get_pending_flush(ctl);
- trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
- pending_kickoff_cnt, ctl->idx,
- extra_flush_bits, ret);
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc),
+ dpu_encoder_helper_get_intf_type(phys->intf_mode),
+ phys->intf_idx, phys->wb_idx,
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
}
/**
@@ -1677,6 +1746,95 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
}
+static u32
+dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
+ u32 enc_ip_width)
+{
+ int ssm_delay, total_pixels, soft_slice_per_enc;
+
+ soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
+
+ /*
+ * minimum number of initial line pixels is a sum of:
+ * 1. sub-stream multiplexer delay (83 groups for 8bpc,
+ * 91 for 10 bpc) * 3
+ * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
+ * 3. the initial xmit delay
+ * 4. total pipeline delay through the "lock step" of encoder (47)
+ * 5. 6 additional pixels as the output of the rate buffer is
+ * 48 bits wide
+ */
+ ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
+ total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
+ if (soft_slice_per_enc > 1)
+ total_pixels += (ssm_delay * 3);
+ return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
+}
+
+static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
+ struct dpu_hw_pingpong *hw_pp,
+ struct msm_display_dsc_config *dsc,
+ u32 common_mode,
+ u32 initial_lines)
+{
+ if (hw_dsc->ops.dsc_config)
+ hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
+
+ if (hw_dsc->ops.dsc_config_thresh)
+ hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
+
+ if (hw_pp->ops.setup_dsc)
+ hw_pp->ops.setup_dsc(hw_pp);
+
+ if (hw_pp->ops.enable_dsc)
+ hw_pp->ops.enable_dsc(hw_pp);
+}
+
+static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_dsc_config *dsc)
+{
+ /* coding only for 2LM, 2enc, 1 dsc config */
+ struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
+ struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+ int this_frame_slices;
+ int intf_ip_w, enc_ip_w;
+ int dsc_common_mode;
+ int pic_width;
+ u32 initial_lines;
+ int i;
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ hw_pp[i] = dpu_enc->hw_pp[i];
+ hw_dsc[i] = dpu_enc->hw_dsc[i];
+
+ if (!hw_pp[i] || !hw_dsc[i]) {
+ DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
+ return;
+ }
+ }
+
+ dsc_common_mode = 0;
+ pic_width = dsc->drm->pic_width;
+
+ dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ if (enc_master->intf_mode == INTF_MODE_VIDEO)
+ dsc_common_mode |= DSC_MODE_VIDEO;
+
+ this_frame_slices = pic_width / dsc->drm->slice_width;
+ intf_ip_w = this_frame_slices * dsc->drm->slice_width;
+
+ /*
+ * dsc merge case: when using 2 encoders for the same stream,
+ * no. of slices need to be same on both the encoders.
+ */
+ enc_ip_w = intf_ip_w / 2;
+ initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
+ dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
+}
+
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1708,6 +1866,30 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
+
+ if (dpu_enc->dsc)
+ dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
+}
+
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ unsigned int i;
+ struct dpu_encoder_phys *phys;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
+ DPU_DEBUG("invalid FB not kicking off\n");
+ return false;
+ }
+ }
+ }
+
+ return true;
}
void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
@@ -1751,6 +1933,102 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
DPU_ATRACE_END("encoder_kickoff");
}
+static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_mixer_cfg mixer;
+ int i, num_lm;
+ u32 flush_mask = 0;
+ struct dpu_global_state *global_state;
+ struct dpu_hw_blk *hw_lm[2];
+ struct dpu_hw_mixer *hw_mixer[2];
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+
+ memset(&mixer, 0, sizeof(mixer));
+
+ /* reset all mixers for this encoder */
+ if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+ phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+ global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
+
+ num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
+ phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+
+ for (i = 0; i < num_lm; i++) {
+ hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
+ flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
+ if (phys_enc->hw_ctl->ops.update_pending_flush)
+ phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
+
+ /* clear all blendstages */
+ if (phys_enc->hw_ctl->ops.setup_blendstage)
+ phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
+ }
+}
+
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+ int i;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+
+ phys_enc->hw_ctl->ops.reset(ctl);
+
+ dpu_encoder_helper_reset_mixers(phys_enc);
+
+ /*
+ * TODO: move the once-only operation like CTL flush/trigger
+ * into dpu_encoder_virt_disable() and all operations which need
+ * to be done per phys encoder into the phys_disable() op.
+ */
+ if (phys_enc->hw_wb) {
+ /* disable the PP block */
+ if (phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
+ phys_enc->hw_pp->idx);
+
+ /* mark WB flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
+ phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
+ } else {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ dpu_enc->phys_encs[i]->hw_intf, false,
+ dpu_enc->phys_encs[i]->hw_pp->idx);
+
+ /* mark INTF flush as pending */
+ if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
+ phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
+ dpu_enc->phys_encs[i]->hw_intf->idx);
+ }
+ }
+
+ /* reset the merge 3D HW block */
+ if (phys_enc->hw_pp->merge_3d) {
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ BLEND_3D_NONE);
+ if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
+ phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
+ phys_enc->hw_pp->merge_3d->idx);
+ }
+
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ if (phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ if (ctl->ops.reset_intf_cfg)
+ ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
+
+ ctl->ops.trigger_flush(ctl);
+ ctl->ops.trigger_start(ctl);
+ ctl->ops.clear_pending_flush(ctl);
+}
+
void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1780,22 +2058,12 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
- phys->intf_idx - INTF_0,
+ seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
atomic_read(&phys->vsync_cnt),
atomic_read(&phys->underrun_cnt));
- switch (phys->intf_mode) {
- case INTF_MODE_VIDEO:
- seq_puts(s, "mode: video\n");
- break;
- case INTF_MODE_CMD:
- seq_puts(s, "mode: command\n");
- break;
- default:
- seq_puts(s, "mode: ???\n");
- break;
- }
+ seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
}
mutex_unlock(&dpu_enc->enc_lock);
@@ -1854,7 +2122,7 @@ static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
}
static int dpu_encoder_virt_add_phys_encs(
- u32 display_caps,
+ struct msm_display_info *disp_info,
struct dpu_encoder_virt *dpu_enc,
struct dpu_enc_phys_init_params *params)
{
@@ -1873,7 +2141,7 @@ static int dpu_encoder_virt_add_phys_encs(
return -EINVAL;
}
- if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
enc = dpu_encoder_phys_vid_init(params);
if (IS_ERR_OR_NULL(enc)) {
@@ -1886,7 +2154,7 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
- if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
enc = dpu_encoder_phys_cmd_init(params);
if (IS_ERR_OR_NULL(enc)) {
@@ -1899,6 +2167,19 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
+ if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
+ enc = dpu_encoder_phys_wb_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == NULL ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
if (params->split_role == ENC_ROLE_SLAVE)
dpu_enc->cur_slave = enc;
else
@@ -1942,6 +2223,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
case DRM_MODE_ENCODER_TMDS:
intf_type = INTF_DP;
break;
+ case DRM_MODE_ENCODER_VIRTUAL:
+ intf_type = INTF_WB;
+ break;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
@@ -1953,6 +2237,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
dpu_enc->idle_pc_supported =
dpu_kms->catalog->caps->has_idle_pc;
+ dpu_enc->dsc = disp_info->dsc;
+
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
/*
@@ -1977,16 +2263,30 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
intf_type,
controller_id);
- if (phys_params.intf_idx == INTF_MAX) {
- DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+
+ phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
+ intf_type, controller_id);
+ /*
+ * The phys_params might represent either an INTF or a WB unit, but not
+ * both of them at the same time.
+ */
+ if ((phys_params.intf_idx == INTF_MAX) &&
+ (phys_params.wb_idx == WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if ((phys_params.intf_idx != INTF_MAX) &&
+ (phys_params.wb_idx != WB_MAX)) {
+ DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
intf_type, controller_id);
ret = -EINVAL;
}
if (!ret) {
- ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
- dpu_enc,
- &phys_params);
+ ret = dpu_encoder_virt_add_phys_encs(disp_info,
+ dpu_enc, &phys_params);
if (ret)
DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
}
@@ -2066,6 +2366,9 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
+ priv->dp[disp_info->h_tile_instance[0]]);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
@@ -2100,8 +2403,9 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
if (!dpu_enc)
return ERR_PTR(-ENOMEM);
+
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
- drm_enc_mode, NULL);
+ drm_enc_mode, NULL);
if (rc) {
devm_kfree(dev->dev, dpu_enc);
return ERR_PTR(rc);
@@ -2180,3 +2484,11 @@ enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ return dpu_enc->dsc_mask;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 42db6ce12caf..781d41c91994 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -27,6 +27,7 @@
* based on num_of_h_tiles
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
+ * @dsc: DSC configuration data for DSC-enabled displays
*/
struct msm_display_info {
int intf_type;
@@ -34,6 +35,7 @@ struct msm_display_info {
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_te_using_watchdog_timer;
+ struct msm_display_dsc_config *dsc;
};
/**
@@ -170,4 +172,34 @@ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
*/
int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
+bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_cleanup_wb_job - cleanup writeback job for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @job: Pointer to the current drm writeback job
+ */
+void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
+ struct drm_writeback_job *job);
+
+/**
+ * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit.
+ * @drm_enc: Pointer to drm encoder structure
+ */
+bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc);
+
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index fa8493ac0340..f2af07d87f56 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -1,15 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*/
#ifndef __DPU_ENCODER_PHYS_H__
#define __DPU_ENCODER_PHYS_H__
+#include <drm/drm_writeback.h>
#include <linux/jiffies.h>
#include "dpu_kms.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
@@ -135,6 +138,11 @@ struct dpu_encoder_phys_ops {
void (*restore)(struct dpu_encoder_phys *phys);
int (*get_line_count)(struct dpu_encoder_phys *phys);
int (*get_frame_count)(struct dpu_encoder_phys *phys);
+ void (*prepare_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ void (*cleanup_wb_job)(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job);
+ bool (*is_valid_for_commit)(struct dpu_encoder_phys *phys_enc);
};
/**
@@ -143,6 +151,7 @@ struct dpu_encoder_phys_ops {
* @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
* @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
* @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ * @INTR_IDX_WB_DONE: Writeback fone interrupt for virtual connector
*/
enum dpu_intr_idx {
INTR_IDX_VSYNC,
@@ -150,25 +159,11 @@ enum dpu_intr_idx {
INTR_IDX_UNDERRUN,
INTR_IDX_CTL_START,
INTR_IDX_RDPTR,
+ INTR_IDX_WB_DONE,
INTR_IDX_MAX,
};
/**
- * dpu_encoder_irq - tracking structure for interrupts
- * @name: string name of interrupt
- * @intr_idx: Encoder interrupt enumeration
- * @irq_idx: IRQ interface lookup index from DPU IRQ framework
- * will be -EINVAL if IRQ is not registered
- * @irq_cb: interrupt callback
- */
-struct dpu_encoder_irq {
- const char *name;
- enum dpu_intr_idx intr_idx;
- int irq_idx;
- struct dpu_irq_callback cb;
-};
-
-/**
* struct dpu_encoder_phys - physical encoder that drives a single INTF block
* tied to a specific panel / sub-panel. Abstract type, sub-classed by
* phys_vid or phys_cmd for video mode or command mode encs respectively.
@@ -179,12 +174,14 @@ struct dpu_encoder_irq {
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
* @hw_intf: Hardware interface to the intf registers
+ * @hw_wb: Hardware interface to the wb registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
+ * @wb_idx: Writeback index on dpu hardware
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -197,7 +194,7 @@ struct dpu_encoder_irq {
* @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
* pending.
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
- * @irq: IRQ tracking structures
+ * @irq: IRQ indices
*/
struct dpu_encoder_phys {
struct drm_encoder *parent;
@@ -207,11 +204,13 @@ struct dpu_encoder_phys {
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
struct dpu_hw_intf *hw_intf;
+ struct dpu_hw_wb *hw_wb;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@@ -220,7 +219,7 @@ struct dpu_encoder_phys {
atomic_t pending_ctlstart_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
- struct dpu_encoder_irq irq[INTR_IDX_MAX];
+ int irq[INTR_IDX_MAX];
};
static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
@@ -230,6 +229,27 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
}
/**
+ * struct dpu_encoder_phys_wb - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @wbirq_refcount: Reference count of writeback interrupt
+ * @wb_done_timeout_cnt: number of wb done irq timeout errors
+ * @wb_cfg: writeback block config to store fb related details
+ * @wb_conn: backpointer to writeback connector
+ * @wb_job: backpointer to current writeback job
+ * @dest: dpu buffer layout for current writeback output buffer
+ */
+struct dpu_encoder_phys_wb {
+ struct dpu_encoder_phys base;
+ atomic_t wbirq_refcount;
+ int wb_done_timeout_cnt;
+ struct dpu_hw_wb_cfg wb_cfg;
+ struct drm_writeback_connector *wb_conn;
+ struct drm_writeback_job *wb_job;
+ struct dpu_hw_fmt_layout dest;
+};
+
+/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
@@ -257,6 +277,7 @@ struct dpu_encoder_phys_cmd {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @split_role: Role to play in a split-panel configuration
* @intf_idx: Interface index this phys_enc will control
+ * @wb_idx: Writeback index this phys_enc will control
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
*/
struct dpu_enc_phys_init_params {
@@ -265,6 +286,7 @@ struct dpu_enc_phys_init_params {
const struct dpu_encoder_virt_ops *parent_ops;
enum dpu_enc_split_role split_role;
enum dpu_intf intf_idx;
+ enum dpu_wb wb_idx;
spinlock_t *enc_spinlock;
};
@@ -297,6 +319,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
struct dpu_enc_phys_init_params *p);
/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
* dpu_encoder_helper_trigger_start - control start helper function
* This helper function may be optionally specified by physical
* encoders if they require ctl_start triggering.
@@ -314,14 +343,24 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+ /* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
+ !dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
}
/**
+ * dpu_encoder_helper_get_dsc - get DSC blocks mask for the DPU encoder
+ * This helper function is used by physical encoder to get DSC blocks mask
+ * used for this encoder.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
+
+/**
* dpu_encoder_helper_split_config - split display configuration helper function
* This helper function may be used by physical encoders to configure
* the split display related registers.
@@ -345,30 +384,20 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
* dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
* note: will call dpu_encoder_helper_wait_for_irq on timeout
* @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
+ * @irq: IRQ index
+ * @func: IRQ callback to be called in case of timeout
* @wait_info: wait info struct
* @Return: 0 or -ERROR
*/
int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx,
+ int irq,
+ void (*func)(void *arg, int irq_idx),
struct dpu_encoder_wait_info *wait_info);
/**
- * dpu_encoder_helper_register_irq - register and enable an irq
+ * dpu_encoder_helper_phys_cleanup - helper to cleanup dpu pipeline
* @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
*/
-int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx);
-
-/**
- * dpu_encoder_helper_unregister_irq - unregister and disable an irq
- * @phys_enc: Pointer to physical encoder structure
- * @intr_idx: encoder interrupt index
- * @Return: 0 or -ERROR
- */
-int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
- enum dpu_intr_idx intr_idx);
+void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index d59802b67d15..ae28b2b93e69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -62,6 +62,13 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+
+ /* setup which pp blk will connect to this intf */
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ true,
+ phys_enc->hw_pp->idx);
}
static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
@@ -140,19 +147,13 @@ static void dpu_encoder_phys_cmd_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct dpu_encoder_irq *irq;
-
- irq = &phys_enc->irq[INTR_IDX_CTL_START];
- irq->irq_idx = phys_enc->hw_ctl->caps->intr_start;
+ phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
- irq = &phys_enc->irq[INTR_IDX_PINGPONG];
- irq->irq_idx = phys_enc->hw_pp->caps->intr_done;
+ phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
- irq = &phys_enc->irq[INTR_IDX_RDPTR];
- irq->irq_idx = phys_enc->hw_pp->caps->intr_rdptr;
+ phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
@@ -192,7 +193,8 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
cmd_enc->pp_timeout_report_cnt,
atomic_read(&phys_enc->pending_kickoff_cnt));
msm_disp_snapshot_state(drm_enc->dev);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -219,7 +221,9 @@ static int _dpu_encoder_phys_cmd_wait_for_idle(
wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
&wait_info);
if (ret == -ETIMEDOUT)
_dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
@@ -258,10 +262,13 @@ static int dpu_encoder_phys_cmd_control_vblank_irq(
enable ? "true" : "false", refcount);
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
+ phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_RDPTR);
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_RDPTR]);
end:
if (ret) {
@@ -282,21 +289,31 @@ static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
enable, atomic_read(&phys_enc->vblank_refcount));
if (enable) {
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG],
+ dpu_encoder_phys_cmd_pp_tx_done_irq,
+ phys_enc);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_cmd_underrun_irq,
+ phys_enc);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_encoder_helper_register_irq(phys_enc,
- INTR_IDX_CTL_START);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
+ phys_enc);
} else {
if (dpu_encoder_phys_cmd_is_master(phys_enc))
- dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_CTL_START);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_CTL_START]);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_PINGPONG]);
}
}
@@ -488,6 +505,7 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
if (!phys_enc->hw_pp) {
DPU_ERROR("invalid encoder\n");
@@ -504,6 +522,17 @@ static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_pp->ops.enable_tearcheck)
phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+
+ if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
+ phys_enc->hw_intf->ops.bind_pingpong_blk(
+ phys_enc->hw_intf,
+ false,
+ phys_enc->hw_pp->idx);
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
+ }
+
phys_enc->enable_state = DPU_ENC_DISABLED;
}
@@ -623,7 +652,9 @@ static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_CTL_START],
+ dpu_encoder_phys_cmd_ctl_start_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
@@ -681,7 +712,9 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
atomic_inc(&cmd_enc->pending_vblank_cnt);
- rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_RDPTR],
+ dpu_encoder_phys_cmd_pp_rd_ptr_irq,
&wait_info);
return rc;
@@ -731,7 +764,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
- struct dpu_encoder_irq *irq;
int i, ret = 0;
DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
@@ -755,32 +787,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
phys_enc->enc_spinlock = p->enc_spinlock;
cmd_enc->stream_sel = 0;
phys_enc->enable_state = DPU_ENC_DISABLED;
- for (i = 0; i < INTR_IDX_MAX; i++) {
- irq = &phys_enc->irq[i];
- INIT_LIST_HEAD(&irq->cb.list);
- irq->irq_idx = -EINVAL;
- irq->cb.arg = phys_enc;
- }
-
- irq = &phys_enc->irq[INTR_IDX_CTL_START];
- irq->name = "ctl_start";
- irq->intr_idx = INTR_IDX_CTL_START;
- irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
-
- irq = &phys_enc->irq[INTR_IDX_PINGPONG];
- irq->name = "pp_done";
- irq->intr_idx = INTR_IDX_PINGPONG;
- irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
-
- irq = &phys_enc->irq[INTR_IDX_RDPTR];
- irq->name = "pp_rd_ptr";
- irq->intr_idx = INTR_IDX_RDPTR;
- irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->name = "underrun";
- irq->intr_idx = INTR_IDX_UNDERRUN;
- irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index f49f42e70b29..2c14646661b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -91,25 +91,27 @@ static void drm_mode_to_intf_timing_params(
timing->vsync_polarity = 0;
}
- /*
- * For edp only:
- * DISPLAY_V_START = (VBP * HCYCLE) + HBP
- * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
- */
- /*
- * if (vid_enc->hw->cap->type == INTF_EDP) {
- * display_v_start += mode->htotal - mode->hsync_start;
- * display_v_end -= mode->hsync_start - mode->hdisplay;
- * }
- */
/* for DP/EDP, Shift timings to align it to bottom right */
- if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
- (phys_enc->hw_intf->cap->type == INTF_EDP)) {
+ if (phys_enc->hw_intf->cap->type == INTF_DP) {
timing->h_back_porch += timing->h_front_porch;
timing->h_front_porch = 0;
timing->v_back_porch += timing->v_front_porch;
timing->v_front_porch = 0;
}
+
+ timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+
+ /*
+ * for DP, divide the horizonal parameters by 2 when
+ * widebus is enabled
+ */
+ if (phys_enc->hw_intf->cap->type == INTF_DP && timing->wide_bus_en) {
+ timing->width = timing->width >> 1;
+ timing->xres = timing->xres >> 1;
+ timing->h_back_porch = timing->h_back_porch >> 1;
+ timing->h_front_porch = timing->h_front_porch >> 1;
+ timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
+ }
}
static u32 get_horizontal_total(const struct intf_timing_params *timing)
@@ -353,13 +355,9 @@ static void dpu_encoder_phys_vid_atomic_mode_set(
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct dpu_encoder_irq *irq;
-
- irq = &phys_enc->irq[INTR_IDX_VSYNC];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_vsync;
+ phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->irq_idx = phys_enc->hw_intf->cap->intr_underrun;
+ phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
@@ -385,10 +383,13 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
atomic_read(&phys_enc->vblank_refcount));
if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
- ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
+ phys_enc);
else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
- ret = dpu_encoder_helper_unregister_irq(phys_enc,
- INTR_IDX_VSYNC);
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
end:
if (ret) {
@@ -461,7 +462,9 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
}
/* Wait for kickoff to complete */
- ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc,
+ phys_enc->irq[INTR_IDX_VSYNC],
+ dpu_encoder_phys_vid_vblank_irq,
&wait_info);
if (ret == -ETIMEDOUT) {
@@ -513,7 +516,8 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
msm_disp_snapshot_state(drm_enc->dev);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_VSYNC]);
}
}
@@ -602,10 +606,14 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
if (WARN_ON(ret))
return;
- dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_register_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN],
+ dpu_encoder_phys_vid_underrun_irq,
+ phys_enc);
} else {
dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
- dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ phys_enc->irq[INTR_IDX_UNDERRUN]);
}
}
@@ -669,7 +677,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_irq *irq;
int i;
if (!p) {
@@ -695,22 +702,8 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_VIDEO;
phys_enc->enc_spinlock = p->enc_spinlock;
- for (i = 0; i < INTR_IDX_MAX; i++) {
- irq = &phys_enc->irq[i];
- INIT_LIST_HEAD(&irq->cb.list);
- irq->irq_idx = -EINVAL;
- irq->cb.arg = phys_enc;
- }
-
- irq = &phys_enc->irq[INTR_IDX_VSYNC];
- irq->name = "vsync_irq";
- irq->intr_idx = INTR_IDX_VSYNC;
- irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
-
- irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
- irq->name = "underrun";
- irq->intr_idx = INTR_IDX_UNDERRUN;
- irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
new file mode 100644
index 000000000000..4829d1ce0cf8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -0,0 +1,753 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_encoder_phys.h"
+#include "dpu_formats.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_wb.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_blk.h"
+#include "dpu_hw_merge3d.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_vbif.h"
+#include "dpu_crtc.h"
+#include "disp/msm_disp_snapshot.h"
+
+#define DEFAULT_MAX_WRITEBACK_WIDTH 2048
+
+#define to_dpu_encoder_phys_wb(x) \
+ container_of(x, struct dpu_encoder_phys_wb, base)
+
+/**
+ * dpu_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
+{
+ /* there is only one physical enc for dpu_writeback */
+ return true;
+}
+
+/**
+ * dpu_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_ot_limit(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = phys_enc->cached_mode.hdisplay;
+ ot_params.height = phys_enc->cached_mode.vdisplay;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = drm_mode_vrefresh(&phys_enc->cached_mode);
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ dpu_vbif_set_ot_limit(phys_enc->dpu_kms, &ot_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos_remap(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_vbif_set_qos_params qos_params;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ if (!phys_enc->hw_wb || !phys_enc->hw_wb->caps) {
+ DPU_ERROR("invalid writeback hardware\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+ qos_params.xin_id = hw_wb->caps->xin_id;
+ qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ qos_params.num = hw_wb->idx - WB_0;
+ qos_params.is_rt = false;
+
+ DPU_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d is_rt:%d\n",
+ qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt);
+
+ dpu_vbif_set_qos_remap(phys_enc->dpu_kms, &qos_params);
+}
+
+/**
+ * dpu_encoder_phys_wb_set_qos - set QoS/danger/safe LUTs for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_qos_cfg qos_cfg;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_qos_lut_tbl *qos_lut_tb;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid parameter(s)\n");
+ return;
+ }
+
+ catalog = phys_enc->dpu_kms->catalog;
+
+ hw_wb = phys_enc->hw_wb;
+
+ memset(&qos_cfg, 0, sizeof(struct dpu_hw_wb_qos_cfg));
+ qos_cfg.danger_safe_en = true;
+ qos_cfg.danger_lut =
+ catalog->perf.danger_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_cfg.safe_lut = catalog->perf.safe_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+
+ qos_lut_tb = &catalog->perf.qos_lut_tbl[DPU_QOS_LUT_USAGE_NRT];
+ qos_cfg.creq_lut = _dpu_hw_get_qos_lut(qos_lut_tb, 0);
+
+ if (hw_wb->ops.setup_qos_lut)
+ hw_wb->ops.setup_qos_lut(hw_wb, &qos_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ * @wb_roi: Pointer to output region of interest
+ */
+static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ struct dpu_hw_cdp_cfg cdp_cfg;
+
+ if (!phys_enc || !phys_enc->dpu_kms || !phys_enc->dpu_kms->catalog) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->roi.x1 = 0;
+ wb_cfg->roi.x2 = phys_enc->cached_mode.hdisplay;
+ wb_cfg->roi.y1 = 0;
+ wb_cfg->roi.y2 = phys_enc->cached_mode.vdisplay;
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_cdp) {
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
+
+ cdp_cfg.enable = phys_enc->dpu_kms->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_NRT].wr_enable;
+ cdp_cfg.ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format);
+ cdp_cfg.tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+ DPU_FORMAT_IS_TILE(wb_cfg->dest.format);
+ cdp_cfg.preload_ahead = DPU_WB_CDP_PRELOAD_AHEAD_64;
+
+ hw_wb->ops.setup_cdp(hw_wb, &cdp_cfg);
+ }
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc:Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup_cdp(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = phys_enc->hw_wb;
+ ctl = phys_enc->hw_ctl;
+
+ if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) &&
+ (phys_enc->hw_ctl &&
+ phys_enc->hw_ctl->ops.setup_intf_cfg)) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+ struct dpu_hw_pingpong *hw_pp = phys_enc->hw_pp;
+ enum dpu_3d_blend_mode mode_3d;
+
+ mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+
+ if (mode_3d && hw_pp && hw_pp->merge_3d)
+ intf_cfg.merge_3d = hw_pp->merge_3d->idx;
+
+ if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode)
+ phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
+ mode_3d);
+
+ /* setup which pp blk will connect to this wb */
+ if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk)
+ phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, true,
+ phys_enc->hw_pp->idx);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ struct dpu_hw_intf_cfg intf_cfg = {0};
+
+ intf_cfg.intf = DPU_NONE;
+ intf_cfg.wb = hw_wb->idx;
+ intf_cfg.mode_3d =
+ dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ }
+}
+
+/**
+ * dpu_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int dpu_encoder_phys_wb_atomic_check(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_framebuffer *fb;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ DPU_DEBUG("[atomic_check:%d, \"%s\",%d,%d]\n",
+ phys_enc->wb_idx, mode->name, mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ if (!conn_state || !conn_state->connector) {
+ DPU_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ DPU_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ if (fb->width != mode->hdisplay) {
+ DPU_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ DPU_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > DEFAULT_MAX_WRITEBACK_WIDTH) {
+ DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, DEFAULT_MAX_WRITEBACK_WIDTH);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+/**
+ * _dpu_encoder_phys_wb_update_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_pingpong *hw_pp;
+ u32 pending_flush = 0;
+
+ if (!phys_enc)
+ return;
+
+ hw_wb = phys_enc->hw_wb;
+ hw_pp = phys_enc->hw_pp;
+ hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ DPU_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.update_pending_flush_wb)
+ hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
+
+ if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
+ hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
+ hw_pp->merge_3d->idx);
+
+ if (hw_ctl->ops.get_pending_flush)
+ pending_flush = hw_ctl->ops.get_pending_flush(hw_ctl);
+
+ DPU_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
+ hw_ctl->idx - CTL_0, pending_flush,
+ hw_wb->idx - WB_0);
+}
+
+/**
+ * dpu_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_setup(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb = NULL;
+
+ DPU_DEBUG("[mode_set:%d, \"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ dpu_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos_remap(phys_enc);
+
+ dpu_encoder_phys_wb_set_qos(phys_enc);
+
+ dpu_encoder_phys_wb_setup_fb(phys_enc, fb);
+
+ dpu_encoder_phys_wb_setup_cdp(phys_enc);
+
+}
+
+static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ unsigned long lock_flags;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+/**
+ * dpu_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void dpu_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ _dpu_encoder_phys_wb_frame_done_helper(arg);
+}
+
+/**
+ * dpu_encoder_phys_wb_irq_ctrl - irq control of WB
+ * @phys: Pointer to physical encoder
+ * @enable: indicates enable or disable interrupts
+ */
+static void dpu_encoder_phys_wb_irq_ctrl(
+ struct dpu_encoder_phys *phys, bool enable)
+{
+
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys);
+
+ if (enable && atomic_inc_return(&wb_enc->wbirq_refcount) == 1)
+ dpu_core_irq_register_callback(phys->dpu_kms,
+ phys->irq[INTR_IDX_WB_DONE], dpu_encoder_phys_wb_done_irq, phys);
+ else if (!enable &&
+ atomic_dec_return(&wb_enc->wbirq_refcount) == 0)
+ dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
+}
+
+static void dpu_encoder_phys_wb_atomic_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+
+ phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
+}
+
+static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+
+ wb_enc->wb_done_timeout_cnt++;
+
+ if (wb_enc->wb_done_timeout_cnt == 1)
+ msm_disp_snapshot_state(phys_enc->parent->dev);
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (wb_enc->wb_conn)
+ drm_writeback_signal_completion(wb_enc->wb_conn, 0);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+}
+
+/**
+ * dpu_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int dpu_encoder_phys_wb_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct dpu_encoder_wait_info wait_info;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_WB_DONE,
+ dpu_encoder_phys_wb_done_irq, &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_wb_handle_wbdone_timeout(phys_enc);
+ else if (!ret)
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ return ret;
+}
+
+/**
+ * dpu_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ * Returns: Zero on success
+ */
+static void dpu_encoder_phys_wb_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct drm_connector *drm_conn;
+ struct drm_connector_state *state;
+
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+ if (!wb_enc->wb_conn || !wb_enc->wb_job) {
+ DPU_ERROR("invalid wb_conn or wb_job\n");
+ return;
+ }
+
+ drm_conn = &wb_enc->wb_conn->base;
+ state = drm_conn->state;
+
+ if (wb_enc->wb_conn && wb_enc->wb_job)
+ drm_writeback_queue_job(wb_enc->wb_conn, state);
+
+ dpu_encoder_phys_wb_setup(phys_enc);
+
+ _dpu_encoder_phys_wb_update_flush(phys_enc);
+}
+
+/**
+ * dpu_encoder_phys_wb_needs_single_flush - trigger flush processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static bool dpu_encoder_phys_wb_needs_single_flush(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+
+}
+
+/**
+ * dpu_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_enable(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->hw_wb->idx - WB_0);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+/**
+ * dpu_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_wb *hw_wb = phys_enc->hw_wb;
+ struct dpu_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+
+ DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ /* reset h/w before final flush */
+ if (phys_enc->hw_ctl->ops.clear_pending_flush)
+ phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+
+ /*
+ * New CTL reset sequence from 5.0 MDP onwards.
+ * If has_3d_merge_reset is not set, legacy reset
+ * sequence is executed.
+ *
+ * Legacy reset sequence has not been implemented yet.
+ * Any target earlier than SM8150 will need it and when
+ * WB support is added to those targets will need to add
+ * the legacy teardown sequence as well.
+ */
+ if (hw_ctl->caps->features & BIT(DPU_CTL_ACTIVE_CFG))
+ dpu_encoder_helper_phys_cleanup(phys_enc);
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+/**
+ * dpu_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void dpu_encoder_phys_wb_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ DPU_DEBUG("[wb:%d]\n", phys_enc->wb_idx - WB_0);
+
+ if (!phys_enc)
+ return;
+
+ kfree(phys_enc);
+}
+
+static void dpu_encoder_phys_wb_prepare_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ const struct msm_format *format;
+ struct msm_gem_address_space *aspace;
+ struct dpu_hw_wb_cfg *wb_cfg;
+ int ret;
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (!job->fb)
+ return;
+
+ wb_enc->wb_job = job;
+ wb_enc->wb_conn = job->connector;
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ wb_cfg = &wb_enc->wb_cfg;
+
+ memset(wb_cfg, 0, sizeof(struct dpu_hw_wb_cfg));
+
+ ret = msm_framebuffer_prepare(job->fb, aspace, false);
+ if (ret) {
+ DPU_ERROR("prep fb failed, %d\n", ret);
+ return;
+ }
+
+ format = msm_framebuffer_format(job->fb);
+
+ wb_cfg->dest.format = dpu_get_dpu_format_ext(
+ format->pixel_format, job->fb->modifier);
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ DPU_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, job->fb, &wb_cfg->dest);
+ if (ret) {
+ DPU_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+
+ wb_cfg->dest.width = job->fb->width;
+ wb_cfg->dest.height = job->fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+
+ if ((wb_cfg->dest.format->fetch_planes == DPU_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ DPU_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0], wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2], wb_cfg->dest.plane_addr[3]);
+
+ DPU_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0], wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2], wb_cfg->dest.plane_pitch[3]);
+}
+
+static void dpu_encoder_phys_wb_cleanup_wb_job(struct dpu_encoder_phys *phys_enc,
+ struct drm_writeback_job *job)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+ struct msm_gem_address_space *aspace;
+
+ if (!job->fb)
+ return;
+
+ aspace = phys_enc->dpu_kms->base.aspace;
+
+ msm_framebuffer_cleanup(job->fb, aspace, false);
+ wb_enc->wb_job = NULL;
+ wb_enc->wb_conn = NULL;
+}
+
+static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
+
+ if (wb_enc->wb_job)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * dpu_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_wb_is_master;
+ ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
+ ops->enable = dpu_encoder_phys_wb_enable;
+ ops->disable = dpu_encoder_phys_wb_disable;
+ ops->destroy = dpu_encoder_phys_wb_destroy;
+ ops->atomic_check = dpu_encoder_phys_wb_atomic_check;
+ ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_wb_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_wb_needs_single_flush;
+ ops->trigger_start = dpu_encoder_helper_trigger_start;
+ ops->prepare_wb_job = dpu_encoder_phys_wb_prepare_wb_job;
+ ops->cleanup_wb_job = dpu_encoder_phys_wb_cleanup_wb_job;
+ ops->irq_control = dpu_encoder_phys_wb_irq_ctrl;
+ ops->is_valid_for_commit = dpu_encoder_phys_wb_is_valid_for_commit;
+
+}
+
+/**
+ * dpu_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_wb *wb_enc = NULL;
+ int ret = 0;
+ int i;
+
+ DPU_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ DPU_ERROR("invalid params\n");
+ ret = -EINVAL;
+ goto fail_alloc;
+ }
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ DPU_ERROR("failed to allocate wb phys_enc enc\n");
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ phys_enc = &wb_enc->base;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
+ phys_enc->wb_idx = p->wb_idx;
+
+ dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->wb_idx = p->wb_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+
+ atomic_set(&wb_enc->wbirq_refcount, 0);
+
+ for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
+ phys_enc->irq[i] = -EINVAL;
+
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ wb_enc->wb_done_timeout_cnt = 0;
+
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG("Created dpu_encoder_phys for wb %d\n",
+ phys_enc->wb_idx);
+
+ return phys_enc;
+
+fail_alloc:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
index 418f5ae91293..84b8b3289f18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -21,6 +21,28 @@ const struct dpu_format *dpu_get_dpu_format_ext(
#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
/**
+ * dpu_find_format - validate if the pixel format is supported
+ * @format: dpu format
+ * @supported_formats: supported formats by dpu HW
+ * @num_formatss: total number of formats
+ *
+ * Return: false if not valid format, true on success
+ */
+static inline bool dpu_find_format(u32 format, const u32 *supported_formats,
+ size_t num_formats)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ /* check for valid formats supported */
+ if (format == supported_formats[i])
+ return true;
+ }
+
+ return false;
+}
+
+/**
* dpu_get_msm_format - get an dpu_format by its msm_format base
* callback function registers with the msm_kms layer
* @kms: kms driver
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index a4fe77cddfea..400ebceb56bb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
@@ -35,6 +36,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+#define VIG_SC7280_MASK \
+ (VIG_SC7180_MASK | BIT(DPU_SSPP_INLINE_ROTATION))
+
#define DMA_SDM845_MASK \
(BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
@@ -117,6 +121,16 @@
BIT(MDP_AD4_0_INTR) | \
BIT(MDP_AD4_1_INTR))
+#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+ BIT(DPU_WB_UBWC) | \
+ BIT(DPU_WB_YUV_CONFIG) | \
+ BIT(DPU_WB_PIPE_ALPHA) | \
+ BIT(DPU_WB_XY_ROI_OFFSET) | \
+ BIT(DPU_WB_QOS) | \
+ BIT(DPU_WB_QOS_8LVL) | \
+ BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_INPUT_CTRL))
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -203,6 +217,45 @@ static const uint32_t plane_formats_yuv[] = {
DRM_FORMAT_YVU420,
};
+static const u32 rotation_v2_formats[] = {
+ DRM_FORMAT_NV12,
+ /* TODO add formats after validation */
+};
+
+static const uint32_t wb2_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_XBGR4444,
+};
+
/*************************************************************
* DPU sub blocks config
*************************************************************/
@@ -223,6 +276,17 @@ static const struct dpu_caps msm8998_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
+static const struct dpu_caps qcm2290_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0x4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .max_linewidth = 2160,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
static const struct dpu_caps sdm845_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
@@ -338,17 +402,6 @@ static const struct dpu_mdp_cfg msm8998_mdp[] = {
},
};
-static const struct dpu_caps qcm2290_dpu_caps = {
- .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .max_mixer_blendstages = 0x4,
- .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
- .ubwc_version = DPU_HW_UBWC_VER_20,
- .has_dim_layer = true,
- .has_idle_pc = true,
- .max_linewidth = 2160,
- .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
-};
-
static const struct dpu_mdp_cfg sdm845_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
@@ -440,6 +493,8 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
.reg_off = 0x2C4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
.reg_off = 0x2BC, .bit_off = 20},
+ .clk_ctrls[DPU_CLK_CTRL_WB2] = {
+ .reg_off = 0x3B8, .bit_off = 24},
},
};
@@ -642,7 +697,6 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
*************************************************************/
/* SSPP common configuration */
-
#define _VIG_SBLK(num, sdma_pri, qseed_ver) \
{ \
.maxdwnscale = MAX_DOWNSCALE_RATIO, \
@@ -660,6 +714,27 @@ static const struct dpu_ctl_cfg qcm2290_ctl[] = {
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.virt_format_list = plane_formats, \
.virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = NULL, \
+ }
+
+#define _VIG_SBLK_ROT(num, sdma_pri, qseed_ver, rot_cfg) \
+ { \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = qseed_ver, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .virt_format_list = plane_formats, \
+ .virt_num_formats = ARRAY_SIZE(plane_formats), \
+ .rotation_cfg = rot_cfg, \
}
#define _DMA_SBLK(num, sdma_pri) \
@@ -684,6 +759,12 @@ static const struct dpu_sspp_sub_blks msm8998_vig_sblk_2 =
static const struct dpu_sspp_sub_blks msm8998_vig_sblk_3 =
_VIG_SBLK("3", 0, DPU_SSPP_SCALER_QSEED3);
+static const struct dpu_rotation_cfg dpu_rot_sc7280_cfg_v2 = {
+ .rot_maxheight = 1088,
+ .rot_num_formats = ARRAY_SIZE(rotation_v2_formats),
+ .rot_format_list = rotation_v2_formats,
+};
+
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 =
_VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3);
static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 =
@@ -751,6 +832,9 @@ static const struct dpu_sspp_cfg sdm845_sspp[] = {
static const struct dpu_sspp_sub_blks sc7180_vig_sblk_0 =
_VIG_SBLK("0", 4, DPU_SSPP_SCALER_QSEED4);
+static const struct dpu_sspp_sub_blks sc7280_vig_sblk_0 =
+ _VIG_SBLK_ROT("0", 4, DPU_SSPP_SCALER_QSEED4, &dpu_rot_sc7280_cfg_v2);
+
static const struct dpu_sspp_cfg sc7180_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
@@ -791,8 +875,8 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
};
static const struct dpu_sspp_cfg sc7280_sspp[] = {
- SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
- sc7180_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK,
+ sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_CURSOR_SDM845_MASK,
@@ -1117,6 +1201,24 @@ static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
};
+
+/*************************************************************
+ * DSC sub blocks config
+ *************************************************************/
+#define DSC_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x140, \
+ .features = 0, \
+ }
+
+static struct dpu_dsc_cfg sdm845_dsc[] = {
+ DSC_BLK("dsc_0", DSC_0, 0x80000),
+ DSC_BLK("dsc_1", DSC_1, 0x80400),
+ DSC_BLK("dsc_2", DSC_2, 0x80800),
+ DSC_BLK("dsc_3", DSC_3, 0x80c00),
+};
+
/*************************************************************
* INTF sub blocks config
*************************************************************/
@@ -1180,6 +1282,29 @@ static const struct dpu_intf_cfg qcm2290_intf[] = {
};
/*************************************************************
+ * Writeback blocks config
+ *************************************************************/
+#define WB_BLK(_name, _id, _base, _features, _clk_ctrl, \
+ __xin_id, vbif_id, _reg, _wb_done_bit) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x2c8, \
+ .features = _features, \
+ .format_list = wb2_formats, \
+ .num_formats = ARRAY_SIZE(wb2_formats), \
+ .clk_ctrl = _clk_ctrl, \
+ .xin_id = __xin_id, \
+ .vbif_idx = vbif_id, \
+ .maxlinewidth = DEFAULT_DPU_LINE_WIDTH, \
+ .intr_wb_done = DPU_IRQ_IDX(_reg, _wb_done_bit) \
+ }
+
+static const struct dpu_wb_cfg sm8250_wb[] = {
+ WB_BLK("wb_2", WB_2, 0x65000, WB_SM8250_MASK, DPU_CLK_CTRL_WB2, 6,
+ VBIF_RT, MDP_SSPP_TOP0_INTR, 4),
+};
+
+/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -1643,6 +1768,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.mixer = sdm845_lm,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
+ .dsc_count = ARRAY_SIZE(sdm845_dsc),
+ .dsc = sdm845_dsc,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
@@ -1775,6 +1902,8 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
+ .wb_count = ARRAY_SIZE(sm8250_wb),
+ .wb = sm8250_wb,
.reg_dma_count = 1,
.dma_cfg = sm8250_regdma,
.perf = sm8250_perf_data,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index b85b24bd3f53..8cb6d1f25bf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_CATALOG_H
@@ -112,6 +114,7 @@ enum {
* @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
* @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
* @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_INLINE_ROTATION Support inline rotation
* @DPU_SSPP_MAX maximum value
*/
enum {
@@ -132,6 +135,7 @@ enum {
DPU_SSPP_TS_PREFILL,
DPU_SSPP_TS_PREFILL_REC1,
DPU_SSPP_CDP,
+ DPU_SSPP_INLINE_ROTATION,
DPU_SSPP_MAX
};
@@ -212,6 +216,42 @@ enum {
};
/**
+ * WB sub-blocks and features
+ * @DPU_WB_LINE_MODE Writeback module supports line/linear mode
+ * @DPU_WB_BLOCK_MODE Writeback module supports block mode read
+ * @DPU_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @DPU_WB_DOWNSCALE, Writeback integer downscaler,
+ * @DPU_WB_DITHER, Dither block
+ * @DPU_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @DPU_WB_UBWC, Writeback Universal bandwidth compression
+ * @DPU_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @DPU_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @DPU_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @DPU_WB_QOS, Writeback supports QoS control, danger/safe/creq
+ * @DPU_WB_QOS_8LVL, Writeback supports 8-level QoS control
+ * @DPU_WB_CDP Writeback supports client driven prefetch
+ * @DPU_WB_INPUT_CTRL Writeback supports from which pp block input pixel
+ * data arrives.
+ * @DPU_WB_CROP CWB supports cropping
+ * @DPU_WB_MAX maximum value
+ */
+enum {
+ DPU_WB_LINE_MODE = 0x1,
+ DPU_WB_BLOCK_MODE,
+ DPU_WB_UBWC,
+ DPU_WB_YUV_CONFIG,
+ DPU_WB_PIPE_ALPHA,
+ DPU_WB_XY_ROI_OFFSET,
+ DPU_WB_QOS,
+ DPU_WB_QOS_8LVL,
+ DPU_WB_CDP,
+ DPU_WB_INPUT_CTRL,
+ DPU_WB_CROP,
+ DPU_WB_MAX
+};
+
+/**
* VBIF sub-blocks and features
* @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
* @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
@@ -315,6 +355,18 @@ struct dpu_qos_lut_tbl {
};
/**
+ * struct dpu_rotation_cfg - define inline rotation config
+ * @rot_maxheight: max pre rotated height allowed for rotation
+ * @rot_num_formats: number of elements in @rot_format_list
+ * @rot_format_list: list of supported rotator formats
+ */
+struct dpu_rotation_cfg {
+ u32 rot_maxheight;
+ size_t rot_num_formats;
+ const u32 *rot_format_list;
+};
+
+/**
* struct dpu_caps - define DPU capabilities
* @max_mixer_width max layer mixer line width support.
* @max_mixer_blendstages max layer mixer blend stages or
@@ -369,6 +421,7 @@ struct dpu_caps {
* @num_formats: Number of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
* @virt_num_formats: Number of supported formats for virtual planes
+ * @dpu_rotation_cfg: inline rotation configuration
*/
struct dpu_sspp_sub_blks {
u32 creq_vblank;
@@ -390,6 +443,7 @@ struct dpu_sspp_sub_blks {
u32 num_formats;
const u32 *virt_format_list;
u32 virt_num_formats;
+ const struct dpu_rotation_cfg *rotation_cfg;
};
/**
@@ -444,6 +498,7 @@ enum dpu_clk_ctrl_type {
DPU_CLK_CTRL_CURSOR1,
DPU_CLK_CTRL_INLINE_ROT0_SSPP,
DPU_CLK_CTRL_REG_DMA,
+ DPU_CLK_CTRL_WB2,
DPU_CLK_CTRL_MAX,
};
@@ -562,6 +617,16 @@ struct dpu_merge_3d_cfg {
};
/**
+ * struct dpu_dsc_cfg - information of DSC blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_dsc_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
@@ -582,6 +647,28 @@ struct dpu_intf_cfg {
};
/**
+ * struct dpu_wb_cfg - information of writeback blocks
+ * @DPU_HW_BLK_INFO: refer to the description above for DPU_HW_BLK_INFO
+ * @vbif_idx: vbif client index
+ * @maxlinewidth: max line width supported by writeback block
+ * @xin_id: bus client identifier
+ * @intr_wb_done: interrupt index for WB_DONE
+ * @format_list: list of formats supported by this writeback block
+ * @num_formats: number of formats supported by this writeback block
+ * @clk_ctrl: clock control identifier
+ */
+struct dpu_wb_cfg {
+ DPU_HW_BLK_INFO;
+ u8 vbif_idx;
+ u32 maxlinewidth;
+ u32 xin_id;
+ s32 intr_wb_done;
+ const u32 *format_list;
+ u32 num_formats;
+ enum dpu_clk_ctrl_type clk_ctrl;
+};
+
+/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
* @ot_limit OT limit to use up to specified pixel per second
@@ -757,12 +844,18 @@ struct dpu_mdss_cfg {
u32 merge_3d_count;
const struct dpu_merge_3d_cfg *merge_3d;
+ u32 dsc_count;
+ struct dpu_dsc_cfg *dsc;
+
u32 intf_count;
const struct dpu_intf_cfg *intf;
u32 vbif_count;
const struct dpu_vbif_cfg *vbif;
+ u32 wb_count;
+ const struct dpu_wb_cfg *wb;
+
u32 reg_dma_count;
struct dpu_reg_dma_cfg dma_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 3584f5ee6bb3..c33e7ef611a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -23,8 +24,12 @@
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
+#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_MERGE_3D_FLUSH 0x100
+#define CTL_DSC_ACTIVE 0x0E8
+#define CTL_DSC_FLUSH 0x104
+#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
@@ -34,7 +39,9 @@
#define DPU_REG_RESET_TIMEOUT_US 2000
#define MERGE_3D_IDX 23
+#define DSC_IDX 22
#define INTF_IDX 31
+#define WB_IDX 16
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -126,13 +133,15 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
{
-
if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
ctx->pending_merge_3d_flush_mask);
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
+ if (ctx->pending_flush_mask & BIT(WB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
+ ctx->pending_wb_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
@@ -253,6 +262,27 @@ static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
}
}
+static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -502,6 +532,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -511,17 +542,32 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
mode_sel = CTL_DEFAULT_GROUP_ID << 28;
+ if (cfg->dsc)
+ DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH, cfg->dsc);
+
if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
- intf_active |= BIT(cfg->intf - INTF_0);
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+
+ if (cfg->intf)
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ if (cfg->wb)
+ wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
+ if (cfg->dsc) {
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+ }
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -537,6 +583,9 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
@@ -554,6 +603,44 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
}
+static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_active = 0;
+ u32 wb_active = 0;
+ u32 merge3d_active = 0;
+
+ /*
+ * This API resets each portion of the CTL path namely,
+ * clearing the sspps staged on the lm, merge_3d block,
+ * interfaces , writeback etc to ensure clean teardown of the pipeline.
+ * This will be used for writeback to begin with to have a
+ * proper teardown of the writeback session but upon further
+ * validation, this can be extended to all interfaces.
+ */
+ if (cfg->merge_3d) {
+ merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
+ merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
+ DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+ merge3d_active);
+ }
+
+ dpu_hw_ctl_clear_all_blendstages(ctx);
+
+ if (cfg->intf) {
+ intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
+ intf_active &= ~BIT(cfg->intf - INTF_0);
+ DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ }
+
+ if (cfg->wb) {
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ wb_active &= ~BIT(cfg->wb - WB_0);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ }
+}
+
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
unsigned long *fetch_active)
{
@@ -577,15 +664,18 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
+ ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index ac1544474022..5755307089b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -40,13 +41,16 @@ struct dpu_hw_stage_cfg {
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
* @stream_sel: Stream selection for multi-stream interfaces
+ * @dsc: DSC BIT masks used
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
int stream_sel;
+ unsigned int dsc;
};
/**
@@ -100,6 +104,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : writeback block index
+ */
+ void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
+ enum dpu_wb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -138,6 +151,14 @@ struct dpu_hw_ctl_ops {
void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
struct dpu_hw_intf_cfg *cfg);
+ /**
+ * reset ctl_path interface config
+ * @ctx : ctl path ctx pointer
+ * @cfg : interface config structure pointer
+ */
+ void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
int (*reset)(struct dpu_hw_ctl *c);
/*
@@ -189,6 +210,7 @@ struct dpu_hw_ctl_ops {
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
+ * @pending_wb_flush_mask: pending WB flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -202,6 +224,7 @@ struct dpu_hw_ctl {
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
+ u32 pending_wb_flush_mask;
u32 pending_merge_3d_flush_mask;
/* ops */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
new file mode 100644
index 000000000000..4ad8991fc7d9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2022, Linaro Limited
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_dsc.h"
+
+#define DSC_COMMON_MODE 0x000
+#define DSC_ENC 0x004
+#define DSC_PICTURE 0x008
+#define DSC_SLICE 0x00C
+#define DSC_CHUNK_SIZE 0x010
+#define DSC_DELAY 0x014
+#define DSC_SCALE_INITIAL 0x018
+#define DSC_SCALE_DEC_INTERVAL 0x01C
+#define DSC_SCALE_INC_INTERVAL 0x020
+#define DSC_FIRST_LINE_BPG_OFFSET 0x024
+#define DSC_BPG_OFFSET 0x028
+#define DSC_DSC_OFFSET 0x02C
+#define DSC_FLATNESS 0x030
+#define DSC_RC_MODEL_SIZE 0x034
+#define DSC_RC 0x038
+#define DSC_RC_BUF_THRESH 0x03C
+#define DSC_RANGE_MIN_QP 0x074
+#define DSC_RANGE_MAX_QP 0x0B0
+#define DSC_RANGE_BPG_OFFSET 0x0EC
+
+static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
+{
+ struct dpu_hw_blk_reg_map *c = &dsc->hw;
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, 0);
+}
+
+static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 data, lsb, bpp;
+ u32 slice_last_group_size;
+ u32 det_thresh_flatness;
+ bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+
+ DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
+
+ if (is_cmd_mode)
+ initial_lines += 1;
+
+ slice_last_group_size = 3 - (dsc->drm->slice_width % 3);
+ data = (initial_lines << 20);
+ data |= ((slice_last_group_size - 1) << 18);
+ /* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+ data |= dsc->drm->bits_per_pixel << 12;
+ lsb = dsc->drm->bits_per_pixel % 4;
+ bpp = dsc->drm->bits_per_pixel / 4;
+ bpp *= 4;
+ bpp <<= 4;
+ bpp |= lsb;
+
+ data |= bpp << 8;
+ data |= (dsc->drm->block_pred_enable << 7);
+ data |= (dsc->drm->line_buf_depth << 3);
+ data |= (dsc->drm->simple_422 << 2);
+ data |= (dsc->drm->convert_rgb << 1);
+ data |= dsc->drm->bits_per_component;
+
+ DPU_REG_WRITE(c, DSC_ENC, data);
+
+ data = dsc->drm->pic_width << 16;
+ data |= dsc->drm->pic_height;
+ DPU_REG_WRITE(c, DSC_PICTURE, data);
+
+ data = dsc->drm->slice_width << 16;
+ data |= dsc->drm->slice_height;
+ DPU_REG_WRITE(c, DSC_SLICE, data);
+
+ data = dsc->drm->slice_chunk_size << 16;
+ DPU_REG_WRITE(c, DSC_CHUNK_SIZE, data);
+
+ data = dsc->drm->initial_dec_delay << 16;
+ data |= dsc->drm->initial_xmit_delay;
+ DPU_REG_WRITE(c, DSC_DELAY, data);
+
+ data = dsc->drm->initial_scale_value;
+ DPU_REG_WRITE(c, DSC_SCALE_INITIAL, data);
+
+ data = dsc->drm->scale_decrement_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_DEC_INTERVAL, data);
+
+ data = dsc->drm->scale_increment_interval;
+ DPU_REG_WRITE(c, DSC_SCALE_INC_INTERVAL, data);
+
+ data = dsc->drm->first_line_bpg_offset;
+ DPU_REG_WRITE(c, DSC_FIRST_LINE_BPG_OFFSET, data);
+
+ data = dsc->drm->nfl_bpg_offset << 16;
+ data |= dsc->drm->slice_bpg_offset;
+ DPU_REG_WRITE(c, DSC_BPG_OFFSET, data);
+
+ data = dsc->drm->initial_offset << 16;
+ data |= dsc->drm->final_offset;
+ DPU_REG_WRITE(c, DSC_DSC_OFFSET, data);
+
+ det_thresh_flatness = 7 + 2 * (dsc->drm->bits_per_component - 8);
+ data = det_thresh_flatness << 10;
+ data |= dsc->drm->flatness_max_qp << 5;
+ data |= dsc->drm->flatness_min_qp;
+ DPU_REG_WRITE(c, DSC_FLATNESS, data);
+
+ data = dsc->drm->rc_model_size;
+ DPU_REG_WRITE(c, DSC_RC_MODEL_SIZE, data);
+
+ data = dsc->drm->rc_tgt_offset_low << 18;
+ data |= dsc->drm->rc_tgt_offset_high << 14;
+ data |= dsc->drm->rc_quant_incr_limit1 << 9;
+ data |= dsc->drm->rc_quant_incr_limit0 << 4;
+ data |= dsc->drm->rc_edge_factor;
+ DPU_REG_WRITE(c, DSC_RC, data);
+}
+
+static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc)
+{
+ struct drm_dsc_rc_range_parameters *rc = dsc->drm->rc_range_params;
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ u32 off;
+ int i;
+
+ off = DSC_RC_BUF_THRESH;
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) {
+ DPU_REG_WRITE(c, off, dsc->drm->rc_buf_thresh[i]);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MIN_QP;
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_min_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_MAX_QP;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_max_qp);
+ off += 4;
+ }
+
+ off = DSC_RANGE_BPG_OFFSET;
+ for (i = 0; i < 15; i++) {
+ DPU_REG_WRITE(c, off, rc[i].range_bpg_offset);
+ off += 4;
+ }
+}
+
+static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dsc_count; i++) {
+ if (dsc == m->dsc[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dsc[i].base;
+ b->length = m->dsc[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_DSC;
+ return &m->dsc[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
+ unsigned long cap)
+{
+ ops->dsc_disable = dpu_hw_dsc_disable;
+ ops->dsc_config = dpu_hw_dsc_config;
+ ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+};
+
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dsc *c;
+ struct dpu_dsc_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dsc_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_dsc_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
+{
+ kfree(dsc);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
new file mode 100644
index 000000000000..b39ee4ed32f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020-2022, Linaro Limited */
+
+#ifndef _DPU_HW_DSC_H
+#define _DPU_HW_DSC_H
+
+#include <drm/display/drm_dsc.h>
+
+#define DSC_MODE_SPLIT_PANEL BIT(0)
+#define DSC_MODE_MULTIPLEX BIT(1)
+#define DSC_MODE_VIDEO BIT(2)
+
+struct dpu_hw_dsc;
+
+/**
+ * struct dpu_hw_dsc_ops - interface to the dsc hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dsc_ops {
+ /**
+ * dsc_disable - disable dsc
+ * @hw_dsc: Pointer to dsc context
+ */
+ void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc);
+
+ /**
+ * dsc_config - configures dsc encoder
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ * @mode: dsc topology mode to be set
+ * @initial_lines: amount of initial lines to be used
+ */
+ void (*dsc_config)(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc,
+ u32 mode,
+ u32 initial_lines);
+
+ /**
+ * dsc_config_thresh - programs panel thresholds
+ * @hw_dsc: Pointer to dsc context
+ * @dsc: panel dsc parameters
+ */
+ void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
+ struct msm_display_dsc_config *dsc);
+};
+
+struct dpu_hw_dsc {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dsc */
+ enum dpu_dsc idx;
+ const struct dpu_dsc_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_dsc_ops ops;
+};
+
+/**
+ * dpu_hw_dsc_init - initializes the dsc block for the passed dsc idx.
+ * @idx: DSC index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_dsc context
+ */
+struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dsc_destroy - destroys dsc driver context
+ * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
+ */
+void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
+
+static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dsc, base);
+}
+
+#endif /* _DPU_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index c61b5b283f08..61284e6c313d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -151,25 +151,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
*/
static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- struct dpu_irq_callback *cb;
-
VERB("irq_idx=%d\n", irq_idx);
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+ atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
/*
* Perform registered function callback
*/
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
}
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+irqreturn_t dpu_core_irq(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
@@ -362,7 +359,7 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
wmb();
}
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
{
struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
@@ -389,7 +386,7 @@ u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
intr_status = DPU_REG_READ(&intr->hw,
dpu_intr_set[reg_idx].status_off) &
DPU_IRQ_MASK(irq_idx);
- if (intr_status && clear)
+ if (intr_status)
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
intr_status);
@@ -413,24 +410,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
struct dpu_mdss_cfg *m)
{
struct dpu_hw_intr *intr;
+ int nirq = MDP_INTR_MAX * 32;
if (!addr || !m)
return ERR_PTR(-EINVAL);
- intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
if (!intr)
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
-
- intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
- GFP_KERNEL);
- if (intr->cache_irq_mask == NULL) {
- kfree(intr);
- return ERR_PTR(-ENOMEM);
- }
+ intr->total_irqs = nirq;
intr->irq_mask = m->mdss_irqs;
@@ -441,31 +432,18 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
- if (intr) {
- kfree(intr->cache_irq_mask);
-
- kfree(intr->irq_cb_tbl);
- kfree(intr->irq_counts);
-
- kfree(intr);
- }
+ kfree(intr);
}
int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+ void (*irq_cb)(void *arg, int irq_idx),
+ void *irq_arg)
{
unsigned long irq_flags;
+ int ret;
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
+ if (!irq_cb) {
+ DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
return -EINVAL;
}
@@ -477,41 +455,34 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- list_add_tail(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
- if (list_is_first(&register_irq_cb->list,
- &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_enable_irq_locked(
+
+ if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return -EBUSY;
+ }
+
+ trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
+
+ ret = dpu_hw_intr_enable_irq_locked(
dpu_kms->hw_intr,
irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
irq_idx);
- }
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_register_success(irq_idx);
+
return 0;
}
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
{
unsigned long irq_flags;
-
- if (!dpu_kms->hw_intr->irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
+ int ret;
if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
@@ -521,20 +492,20 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
- int ret = dpu_hw_intr_disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
+ trace_dpu_core_irq_unregister_callback(irq_idx);
+
+ ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
+ irq_idx, ret);
+
+ dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
+ dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
+
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_irq_unregister_success(irq_idx);
+
return 0;
}
@@ -542,24 +513,18 @@ int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
{
struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq_callback *cb;
unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
- return 0;
+ int i, irq_count;
+ void *cb;
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
- cb_count = 0;
- irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
- list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
- cb_count++;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
+ cb = dpu_kms->hw_intr->irq_tbl[i].cb;
spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
+ if (irq_count || cb)
+ seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
}
return 0;
@@ -575,8 +540,9 @@ void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
}
#endif
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_preinstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
@@ -584,24 +550,21 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
dpu_disable_all_irqs(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
- atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
- }
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
}
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+void dpu_core_irq_uninstall(struct msm_kms *kms)
{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
int i;
+ if (!dpu_kms->hw_intr)
+ return;
+
pm_runtime_get_sync(&dpu_kms->pdev->dev);
for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
- if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ if (dpu_kms->hw_intr->irq_tbl[i].cb)
DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
dpu_clear_irqs(dpu_kms);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 37379966d8ec..4154c5e2b4ae 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -44,19 +44,21 @@ enum dpu_hw_intr_reg {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
- * @irq_cb_tbl: array of IRQ callbacks lists
- * @irq_counts: array of IRQ counts
+ * @irq_cb_tbl: array of IRQ callbacks
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
- u32 *cache_irq_mask;
+ u32 cache_irq_mask[MDP_INTR_MAX];
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
- struct list_head *irq_cb_tbl;
- atomic_t *irq_counts;
+ struct {
+ void (*cb)(void *arg, int irq_idx);
+ void *arg;
+ atomic_t count;
+ } irq_tbl[];
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 116e2b5b1a90..3f4d2c6e1b45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -33,6 +33,7 @@
#define INTF_TP_COLOR1 0x05C
#define INTF_CONFIG2 0x060
#define INTF_DISPLAY_DATA_HCTL 0x064
+#define INTF_ACTIVE_DATA_HCTL 0x068
#define INTF_FRAME_LINE_COUNT_EN 0x0A8
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
@@ -60,6 +61,12 @@
#define INTF_MUX 0x25C
+#define INTF_CFG_ACTIVE_H_EN BIT(29)
+#define INTF_CFG_ACTIVE_V_EN BIT(30)
+
+#define INTF_CFG2_DATABUS_WIDEN BIT(0)
+#define INTF_CFG2_DATA_HCTL_EN BIT(4)
+
static const struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -90,15 +97,23 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
u32 hsync_period, vsync_period;
u32 display_v_start, display_v_end;
u32 hsync_start_x, hsync_end_x;
+ u32 hsync_data_start_x, hsync_data_end_x;
u32 active_h_start, active_h_end;
u32 active_v_start, active_v_end;
u32 active_hctl, display_hctl, hsync_ctl;
u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
u32 panel_format;
- u32 intf_cfg, intf_cfg2 = 0, display_data_hctl = 0;
+ u32 intf_cfg, intf_cfg2 = 0;
+ u32 display_data_hctl = 0, active_data_hctl = 0;
+ u32 data_width;
+ bool dp_intf = false;
/* read interface_cfg */
intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+
+ if (ctx->cap->type == INTF_DP)
+ dp_intf = true;
+
hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
p->h_front_porch;
vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
@@ -112,7 +127,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
hsync_end_x = hsync_period - p->h_front_porch - 1;
- if (p->width != p->xres) {
+ if (p->width != p->xres) { /* border fill added */
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
} else {
@@ -120,7 +135,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
active_h_end = 0;
}
- if (p->height != p->yres) {
+ if (p->height != p->yres) { /* border fill added */
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
} else {
@@ -130,27 +145,46 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
if (active_h_end) {
active_hctl = (active_h_end << 16) | active_h_start;
- intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN;
} else {
active_hctl = 0;
}
if (active_v_end)
- intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+ intf_cfg |= INTF_CFG_ACTIVE_V_EN;
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (p->wide_bus_en)
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+
+ data_width = p->width;
+
+ hsync_data_start_x = hsync_start_x;
+ hsync_data_end_x = hsync_start_x + data_width - 1;
+
+ display_data_hctl = (hsync_data_end_x << 16) | hsync_data_start_x;
+
+ if (dp_intf) {
+ /* DP timing adjustment */
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+
active_h_start = hsync_start_x;
active_h_end = active_h_start + p->xres - 1;
active_v_start = display_v_start;
active_v_end = active_v_start + (p->yres * hsync_period) - 1;
- display_v_start += p->hsync_pulse_width + p->h_back_porch;
-
active_hctl = (active_h_end << 16) | active_h_start;
display_hctl = active_hctl;
+
+ intf_cfg |= INTF_CFG_ACTIVE_H_EN | INTF_CFG_ACTIVE_V_EN;
}
den_polarity = 0;
@@ -180,13 +214,6 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
(COLOR_8BIT << 4) |
(0x21 << 8));
- if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
- intf_cfg2 |= BIT(4);
- display_data_hctl = display_hctl;
- DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
- DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
- }
-
DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
@@ -204,6 +231,11 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+ if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+ DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
+ }
}
static void dpu_hw_intf_enable_timing_engine(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 230d122fa43b..7b2d96ac61e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -30,6 +30,8 @@ struct intf_timing_params {
u32 border_clr;
u32 underflow_clr;
u32 hsync_skew;
+
+ bool wide_bus_en;
};
struct intf_prog_fetch {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 86363c0ec834..462f5082099e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -138,7 +138,7 @@ static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
if (!(ctrl & LM_MISR_CTRL_ENABLE))
- return -EINVAL;
+ return -ENODATA;
if (!(ctrl & LM_MISR_CTRL_STATUS))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index bb9ceadeb0bb..9f402be55fbf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -97,6 +97,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_WB,
DPU_HW_BLK_DSPP,
DPU_HW_BLK_MERGE_3D,
+ DPU_HW_BLK_DSC,
DPU_HW_BLK_MAX,
};
@@ -176,6 +177,17 @@ enum dpu_ctl {
CTL_MAX
};
+enum dpu_dsc {
+ DSC_NONE = 0,
+ DSC_0,
+ DSC_1,
+ DSC_2,
+ DSC_3,
+ DSC_4,
+ DSC_5,
+ DSC_MAX
+};
+
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@@ -205,14 +217,21 @@ enum dpu_intf {
INTF_MAX
};
+/*
+ * Historically these values correspond to the values written to the
+ * DISP_INTF_SEL register, which had to programmed manually. On newer MDP
+ * generations this register is NOP, but we keep the values for historical
+ * reasons.
+ */
enum dpu_intf_type {
INTF_NONE = 0x0,
INTF_DSI = 0x1,
INTF_HDMI = 0x3,
INTF_LCDC = 0x5,
+ /* old eDP found on 8x74 and 8x84 */
INTF_EDP = 0x9,
+ /* both DP and eDP, handled by the new DP driver */
INTF_DP = 0xa,
- INTF_TYPE_MAX,
/* virtual interfaces */
INTF_WB = 0x100,
@@ -437,5 +456,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
#define DPU_DBG_MASK_DSPP (1 << 10)
+#define DPU_DBG_MASK_DSC (1 << 11)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 55766c97c4c8..47c6ab6caf95 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -28,6 +28,9 @@
#define PP_FBC_MODE 0x034
#define PP_FBC_BUDGET_CTL 0x038
#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
#define PP_DITHER_EN 0x000
#define PP_DITHER_BITDEPTH 0x004
@@ -245,6 +248,32 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
return line;
}
+static int dpu_hw_pp_dsc_enable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 1);
+ return 0;
+}
+
+static void dpu_hw_pp_dsc_disable(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_DSC_MODE, 0);
+}
+
+static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *pp_c = &pp->hw;
+ int data;
+
+ data = DPU_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
+ data |= BIT(18); /* endian flip */
+ DPU_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
+ return 0;
+}
+
static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
unsigned long features)
{
@@ -256,6 +285,9 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong *c,
c->ops.get_autorefresh = dpu_hw_pp_get_autorefresh_config;
c->ops.poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
c->ops.get_line_count = dpu_hw_pp_get_line_count;
+ c->ops.setup_dsc = dpu_hw_pp_setup_dsc;
+ c->ops.enable_dsc = dpu_hw_pp_dsc_enable;
+ c->ops.disable_dsc = dpu_hw_pp_dsc_disable;
if (test_bit(DPU_PINGPONG_DITHER, &features))
c->ops.setup_dither = dpu_hw_pp_setup_dither;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 89d08a715c16..12758468d9ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -124,6 +124,20 @@ struct dpu_hw_pingpong_ops {
*/
void (*setup_dither)(struct dpu_hw_pingpong *pp,
struct dpu_hw_dither_cfg *cfg);
+ /**
+ * Enable DSC
+ */
+ int (*enable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Disable DSC
+ */
+ void (*disable_dsc)(struct dpu_hw_pingpong *pp);
+
+ /**
+ * Setup DSC
+ */
+ int (*setup_dsc)(struct dpu_hw_pingpong *pp);
};
struct dpu_hw_merge_3d;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 09cdc3576653..0a0864dff783 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -627,7 +627,7 @@ static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg,
+ struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index)
{
u32 idx;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 92b071b78fdb..a81e16657d61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -193,22 +193,6 @@ enum {
};
/**
- * struct dpu_hw_pipe_cdp_cfg : CDP configuration
- * @enable: true to enable CDP
- * @ubwc_meta_enable: true to enable ubwc metadata preload
- * @tile_amortize_enable: true to enable amortization control for tile format
- * @preload_ahead: number of request to preload ahead
- * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
- * DPU_SSPP_CDP_PRELOAD_AHEAD_64
- */
-struct dpu_hw_pipe_cdp_cfg {
- bool enable;
- bool ubwc_meta_enable;
- bool tile_amortize_enable;
- u32 preload_ahead;
-};
-
-/**
* struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
* @size: size to prefill in bytes, or zero to disable
* @time: time to prefill in usec, or zero to disable
@@ -359,7 +343,7 @@ struct dpu_hw_sspp_ops {
* @index: rectangle index in multirect
*/
void (*setup_cdp)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_cdp_cfg *cfg,
+ struct dpu_hw_cdp_cfg *cfg,
enum dpu_sspp_multirect_index index);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index aad85116b0a0..512316f25a51 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -422,3 +422,28 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
}
+
+/**
+ * _dpu_hw_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 39134754579e..e4a65eb4f769 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
#define REG_MASK(n) ((BIT(n)) - 1)
@@ -298,6 +299,21 @@ struct dpu_drm_scaler_v2 {
struct dpu_drm_de_v1 de;
};
+/**
+ * struct dpu_hw_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_*_CDP_PRELOAD_AHEAD_32,
+ * DPU_*_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
u32 *dpu_hw_util_get_log_mask_ptr(void);
@@ -324,4 +340,7 @@ void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
const struct dpu_csc_cfg *data, bool csc10);
+u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl);
+
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
new file mode 100644
index 000000000000..bcccce292937
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+ /*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_wb.h"
+#include "dpu_formats.h"
+#include "dpu_kms.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_DANGER_LUT 0x084
+#define WB_SAFE_LUT 0x088
+#define WB_QOS_CTRL 0x090
+#define WB_CREQ_LUT_0 0x098
+#define WB_CREQ_LUT_1 0x09C
+#define WB_UBWC_STATIC_CTRL 0x144
+#define WB_MUX 0x150
+#define WB_CROP_CTRL 0x154
+#define WB_CROP_OFFSET 0x158
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CNTL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+/* WB_QOS_CTRL */
+#define WB_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+
+static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
+ const struct dpu_mdss_cfg *m, void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->wb[i].base;
+ b->length = m->wb[i].len;
+ b->hwversion = m->hwversion;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_wb_setup_outaddress(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ DPU_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ DPU_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ DPU_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ DPU_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *data)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(DPU_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (drm_rect_height(&data->roi) && drm_rect_width(&data->roi))
+ outsize = (drm_rect_height(&data->roi) << 16) | drm_rect_width(&data->roi);
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ DPU_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ DPU_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ DPU_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ DPU_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ DPU_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ DPU_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ DPU_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+}
+
+static void dpu_hw_wb_roi(struct dpu_hw_wb *ctx, struct dpu_hw_wb_cfg *wb)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = 0;
+ out_size = (drm_rect_height(&wb->roi) << 16) | drm_rect_width(&wb->roi);
+
+ DPU_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ DPU_REG_WRITE(c, WB_OUT_XY, out_xy);
+ DPU_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void dpu_hw_wb_setup_qos_lut(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 qos_ctrl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ DPU_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
+ DPU_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
+
+ /*
+ * for chipsets not using DPU_WB_QOS_8LVL but still using DPU
+ * driver such as msm8998, the reset value of WB_CREQ_LUT is
+ * sufficient for writeback to work. SW doesn't need to explicitly
+ * program a value.
+ */
+ if (ctx->caps && test_bit(DPU_WB_QOS_8LVL, &ctx->caps->features)) {
+ DPU_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
+ DPU_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
+}
+
+static void dpu_hw_wb_setup_cdp(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->preload_ahead == DPU_WB_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
+static void dpu_hw_wb_bind_pingpong_blk(
+ struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int mux_cfg;
+
+ if (!ctx)
+ return;
+
+ c = &ctx->hw;
+
+ mux_cfg = DPU_REG_READ(c, WB_MUX);
+ mux_cfg &= ~0xf;
+
+ if (enable)
+ mux_cfg |= (pp - PINGPONG_0) & 0x7;
+ else
+ mux_cfg |= 0xf;
+
+ DPU_REG_WRITE(c, WB_MUX, mux_cfg);
+}
+
+static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = dpu_hw_wb_setup_outaddress;
+ ops->setup_outformat = dpu_hw_wb_setup_format;
+
+ if (test_bit(DPU_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = dpu_hw_wb_roi;
+
+ if (test_bit(DPU_WB_QOS, &features))
+ ops->setup_qos_lut = dpu_hw_wb_setup_qos_lut;
+
+ if (test_bit(DPU_WB_CDP, &features))
+ ops->setup_cdp = dpu_hw_wb_setup_cdp;
+
+ if (test_bit(DPU_WB_INPUT_CTRL, &features))
+ ops->bind_pingpong_blk = dpu_hw_wb_bind_pingpong_blk;
+}
+
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_wb *c;
+ const struct dpu_wb_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find wb idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->mdp = &m->mdp[0];
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+
+ return c;
+}
+
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
new file mode 100644
index 000000000000..3ff5a48541e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+
+#ifndef _DPU_HW_WB_H
+#define _DPU_HW_WB_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_pingpong.h"
+
+struct dpu_hw_wb;
+
+struct dpu_hw_wb_cfg {
+ struct dpu_hw_fmt_layout dest;
+ enum dpu_intf_mode intf_mode;
+ struct drm_rect roi;
+ struct drm_rect crop;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_WB_CDP_PRELOAD_AHEAD_32,
+ DPU_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_wb_qos_cfg : Writeback pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_wb_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ bool danger_safe_en;
+};
+
+/**
+ *
+ * struct dpu_hw_wb_ops : Interface to the wb hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_outaddress: setup output address from the writeback job
+ * @setup_outformat: setup output format of writeback block from writeback job
+ * @setup_qos_lut: setup qos LUT for writeback block based on input
+ * @setup_cdp: setup chroma down prefetch block for writeback block
+ * @bind_pingpong_blk: enable/disable the connection with ping-pong block
+ */
+struct dpu_hw_wb_ops {
+ void (*setup_outaddress)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_cfg *wb);
+
+ void (*setup_qos_lut)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_wb_qos_cfg *cfg);
+
+ void (*setup_cdp)(struct dpu_hw_wb *ctx,
+ struct dpu_hw_cdp_cfg *cfg);
+
+ void (*bind_pingpong_blk)(struct dpu_hw_wb *ctx,
+ bool enable, const enum dpu_pingpong pp);
+};
+
+/**
+ * struct dpu_hw_wb : WB driver object
+ * @hw: block hardware details
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: hardware index number within type
+ * @wb_hw_caps: hardware capabilities
+ * @ops: function pointers
+ * @hw_mdp: MDP top level hardware block
+ */
+struct dpu_hw_wb {
+ struct dpu_hw_blk_reg_map hw;
+ const struct dpu_mdp_cfg *mdp;
+
+ /* wb path */
+ int idx;
+ const struct dpu_wb_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_wb_ops ops;
+
+ struct dpu_hw_mdp *hw_mdp;
+};
+
+/**
+ * dpu_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_wb *dpu_hw_wb_init(enum dpu_wb idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+
+#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index e29796c4f27b..2b9d931474e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -15,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_file.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_writeback.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@@ -29,6 +32,7 @@
#include "dpu_kms.h"
#include "dpu_plane.h"
#include "dpu_vbif.h"
+#include "dpu_writeback.h"
#define CREATE_TRACE_POINTS
#include "dpu_trace.h"
@@ -380,9 +384,13 @@ static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
struct icc_path *path0;
struct icc_path *path1;
struct drm_device *dev = dpu_kms->dev;
+ struct device *dpu_dev = dev->dev;
+ struct device *mdss_dev = dpu_dev->parent;
- path0 = of_icc_get(dev->dev, "mdp0-mem");
- path1 = of_icc_get(dev->dev, "mdp1-mem");
+ /* Interconnects are a part of MDSS device tree binding, not the
+ * MDP/DPU device. */
+ path0 = of_icc_get(mdss_dev, "mdp0-mem");
+ path1 = of_icc_get(mdss_dev, "mdp1-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
@@ -565,8 +573,6 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
return PTR_ERR(encoder);
}
- priv->encoders[priv->num_encoders++] = encoder;
-
memset(&info, 0, sizeof(info));
info.intf_type = encoder->encoder_type;
@@ -582,6 +588,8 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
MSM_DISPLAY_CAP_CMD_MODE :
MSM_DISPLAY_CAP_VID_MODE;
+ info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
+
if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
if (rc) {
@@ -629,8 +637,6 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return rc;
}
- priv->encoders[priv->num_encoders++] = encoder;
-
info.num_of_h_tiles = 1;
info.h_tile_instance[0] = i;
info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
@@ -646,6 +652,45 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
return 0;
}
+static int _dpu_kms_initialize_writeback(struct drm_device *dev,
+ struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
+ const u32 *wb_formats, int n_formats)
+{
+ struct drm_encoder *encoder = NULL;
+ struct msm_display_info info;
+ int rc;
+
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ rc = dpu_writeback_init(dev, encoder, wb_formats,
+ n_formats);
+ if (rc) {
+ DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
+
+ info.num_of_h_tiles = 1;
+ /* use only WB idx 2 instance for DPU */
+ info.h_tile_instance[0] = WB_2;
+ info.intf_type = encoder->encoder_type;
+
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
/**
* _dpu_kms_setup_displays - create encoders, bridges and connectors
* for underlying displays
@@ -659,6 +704,7 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
struct dpu_kms *dpu_kms)
{
int rc = 0;
+ int i;
rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
if (rc) {
@@ -672,39 +718,33 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
return rc;
}
- return rc;
-}
-
-static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
-{
- struct msm_drm_private *priv;
- int i;
-
- priv = dpu_kms->dev->dev_private;
-
- for (i = 0; i < priv->num_crtcs; i++)
- priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
- priv->num_crtcs = 0;
-
- for (i = 0; i < priv->num_planes; i++)
- priv->planes[i]->funcs->destroy(priv->planes[i]);
- priv->num_planes = 0;
-
- for (i = 0; i < priv->num_connectors; i++)
- priv->connectors[i]->funcs->destroy(priv->connectors[i]);
- priv->num_connectors = 0;
+ /* Since WB isn't a driver check the catalog before initializing */
+ if (dpu_kms->catalog->wb_count) {
+ for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
+ if (dpu_kms->catalog->wb[i].id == WB_2) {
+ rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
+ dpu_kms->catalog->wb[i].format_list,
+ dpu_kms->catalog->wb[i].num_formats);
+ if (rc) {
+ DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ }
- for (i = 0; i < priv->num_encoders; i++)
- priv->encoders[i]->funcs->destroy(priv->encoders[i]);
- priv->num_encoders = 0;
+ return rc;
}
+#define MAX_PLANES 20
static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
@@ -721,9 +761,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
*/
ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
if (ret)
- goto fail;
+ return ret;
- max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
+ max_crtc_count = min(catalog->mixer_count, num_encoders);
/* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
@@ -746,9 +790,8 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
- goto fail;
+ return ret;
}
- priv->planes[priv->num_planes++] = plane;
if (type == DRM_PLANE_TYPE_CURSOR)
cursor_planes[cursor_planes_idx++] = plane;
@@ -763,19 +806,16 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- goto fail;
+ return ret;
}
priv->crtcs[priv->num_crtcs++] = crtc;
}
/* All CRTCs are compatible with all encoders */
- for (i = 0; i < priv->num_encoders; i++)
- priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+ drm_for_each_encoder(encoder, dev)
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
return 0;
-fail:
- _dpu_kms_drm_obj_destroy(dpu_kms);
- return ret;
}
static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
@@ -793,8 +833,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
- if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ }
}
}
@@ -837,20 +879,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
_dpu_kms_hw_destroy(dpu_kms);
msm_kms_destroy(&dpu_kms->base);
-}
-static irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-static void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- dpu_core_irq_preinstall(dpu_kms);
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&dpu_kms->pdev->dev);
}
static int dpu_irq_postinstall(struct msm_kms *kms)
@@ -872,13 +903,6 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
return 0;
}
-static void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- dpu_core_irq_uninstall(dpu_kms);
-}
-
static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
{
int i;
@@ -923,6 +947,11 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
+ /* dump WB sub-blocks HW regs info */
+ for (i = 0; i < cat->wb_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
+ dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
+
msm_disp_snapshot_add_block(disp_state, top->hw.length,
dpu_kms->mmio + top->hw.blk_off, "top");
@@ -931,10 +960,10 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
- .irq_preinstall = dpu_irq_preinstall,
+ .irq_preinstall = dpu_core_irq_preinstall,
.irq_postinstall = dpu_irq_postinstall,
- .irq_uninstall = dpu_irq_uninstall,
- .irq = dpu_irq,
+ .irq_uninstall = dpu_core_irq_uninstall,
+ .irq = dpu_core_irq,
.enable_commit = dpu_kms_enable_commit,
.disable_commit = dpu_kms_disable_commit,
.vsync_time = dpu_kms_vsync_time,
@@ -973,12 +1002,16 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
+ struct device *dpu_dev = dpu_kms->dev->dev;
+ struct device *mdss_dev = dpu_dev->parent;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
- mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ /* IOMMUs are a part of MDSS device tree binding, not the
+ * MDP/DPU device. */
+ mmu = msm_iommu_new(mdss_dev, domain);
if (IS_ERR(mmu)) {
iommu_domain_free(domain);
return PTR_ERR(mmu);
@@ -1172,37 +1205,16 @@ error:
return rc;
}
-struct msm_kms *dpu_kms_init(struct drm_device *dev)
+static int dpu_kms_init(struct drm_device *ddev)
{
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- int irq;
-
- if (!dev) {
- DPU_ERROR("drm device node invalid\n");
- return ERR_PTR(-EINVAL);
- }
-
- priv = dev->dev_private;
- dpu_kms = to_dpu_kms(priv->kms);
-
- irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
- if (irq < 0) {
- DPU_ERROR("failed to get irq: %d\n", irq);
- return ERR_PTR(irq);
- }
- dpu_kms->base.irq = irq;
-
- return &dpu_kms->base;
-}
-
-static int dpu_bind(struct device *dev, struct device *master, void *data)
-{
- struct msm_drm_private *priv = dev_get_drvdata(master);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct device *dev = ddev->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *ddev = priv->dev;
struct dpu_kms *dpu_kms;
+ int irq;
+ struct dev_pm_opp *opp;
int ret = 0;
+ unsigned long max_freq = ULONG_MAX;
dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
if (!dpu_kms)
@@ -1225,7 +1237,11 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
}
dpu_kms->num_clocks = ret;
- platform_set_drvdata(pdev, dpu_kms);
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (!IS_ERR(opp))
+ dev_pm_opp_put(opp);
+
+ dev_pm_opp_set_rate(dev, max_freq);
ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
if (ret) {
@@ -1240,31 +1256,25 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
priv->kms = &dpu_kms->base;
- return ret;
-}
-
-static void dpu_unbind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (!irq) {
+ DPU_ERROR("failed to get irq\n");
+ return -EINVAL;
+ }
+ dpu_kms->base.irq = irq;
- if (dpu_kms->rpm_enabled)
- pm_runtime_disable(&pdev->dev);
+ return 0;
}
-static const struct component_ops dpu_ops = {
- .bind = dpu_bind,
- .unbind = dpu_unbind,
-};
-
static int dpu_dev_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &dpu_ops);
+ return msm_drv_probe(&pdev->dev, dpu_kms_init);
}
static int dpu_dev_remove(struct platform_device *pdev)
{
- component_del(&pdev->dev, &dpu_ops);
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
return 0;
}
@@ -1272,7 +1282,8 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
{
int i;
struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
/* Drop the performance state vote */
dev_pm_opp_set_rate(dev, 0);
@@ -1288,7 +1299,8 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
{
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
- struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = platform_get_drvdata(pdev);
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *encoder;
struct drm_device *ddev;
int i;
@@ -1318,9 +1330,11 @@ static const struct dev_pm_ops dpu_pm_ops = {
SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
};
-const struct of_device_id dpu_dt_match[] = {
+static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,msm8998-dpu", },
{ .compatible = "qcom,qcm2290-dpu", },
{ .compatible = "qcom,sdm845-dpu", },
@@ -1336,6 +1350,7 @@ MODULE_DEVICE_TABLE(of, dpu_dt_match);
static struct platform_driver dpu_driver = {
.probe = dpu_dev_probe,
.remove = dpu_dev_remove,
+ .shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_dpu",
.of_match_table = dpu_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 779e7bd01efd..832a0769f2e7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -65,18 +65,6 @@
#define DPU_NAME_SIZE 12
-/*
- * struct dpu_irq_callback - IRQ callback handlers
- * @list: list to callback
- * @func: intr handler
- * @arg: argument for the handler
- */
-struct dpu_irq_callback {
- struct list_head list;
- void (*func)(void *arg, int irq_idx);
- void *arg;
-};
-
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -145,6 +133,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
+ uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
};
struct dpu_global_state
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
deleted file mode 100644
index b10ca505f9ac..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/irqdesc.h>
-#include <linux/irqchip/chained_irq.h>
-#include "dpu_kms.h"
-
-#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
-
-#define HW_REV 0x0
-#define HW_INTR_STATUS 0x0010
-
-#define UBWC_STATIC 0x144
-#define UBWC_CTRL_2 0x150
-#define UBWC_PREDICTION_MODE 0x154
-
-/* Max BW defined in KBps */
-#define MAX_BW 6800000
-
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-struct dpu_mdss {
- struct msm_mdss base;
- void __iomem *mmio;
- struct clk_bulk_data *clocks;
- size_t num_clocks;
- struct dpu_irq_controller irq_controller;
-};
-
-static void dpu_mdss_irq(struct irq_desc *desc)
-{
- struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 interrupts;
-
- chained_irq_enter(chip, desc);
-
- interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
-
- while (interrupts) {
- irq_hw_number_t hwirq = fls(interrupts) - 1;
- int rc;
-
- rc = generic_handle_domain_irq(dpu_mdss->irq_controller.domain,
- hwirq);
- if (rc < 0) {
- DRM_ERROR("handle irq fail: irq=%lu rc=%d\n",
- hwirq, rc);
- break;
- }
-
- interrupts &= ~(1 << hwirq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void dpu_mdss_irq_mask(struct irq_data *irqd)
-{
- struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
-
- /* memory barrier */
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
- /* memory barrier */
- smp_mb__after_atomic();
-}
-
-static void dpu_mdss_irq_unmask(struct irq_data *irqd)
-{
- struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
-
- /* memory barrier */
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
- /* memory barrier */
- smp_mb__after_atomic();
-}
-
-static struct irq_chip dpu_mdss_irq_chip = {
- .name = "dpu_mdss",
- .irq_mask = dpu_mdss_irq_mask,
- .irq_unmask = dpu_mdss_irq_unmask,
-};
-
-static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
-
-static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct dpu_mdss *dpu_mdss = domain->host_data;
-
- irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
- irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
- return irq_set_chip_data(irq, dpu_mdss);
-}
-
-static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
- .map = dpu_mdss_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
-{
- struct device *dev;
- struct irq_domain *domain;
-
- dev = dpu_mdss->base.dev;
-
- domain = irq_domain_add_linear(dev->of_node, 32,
- &dpu_mdss_irqdomain_ops, dpu_mdss);
- if (!domain) {
- DPU_ERROR("failed to add irq_domain\n");
- return -EINVAL;
- }
-
- dpu_mdss->irq_controller.enabled_mask = 0;
- dpu_mdss->irq_controller.domain = domain;
-
- return 0;
-}
-
-static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
-{
- if (dpu_mdss->irq_controller.domain) {
- irq_domain_remove(dpu_mdss->irq_controller.domain);
- dpu_mdss->irq_controller.domain = NULL;
- }
-}
-static int dpu_mdss_enable(struct msm_mdss *mdss)
-{
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
- int ret;
-
- ret = clk_bulk_prepare_enable(dpu_mdss->num_clocks, dpu_mdss->clocks);
- if (ret) {
- DPU_ERROR("clock enable failed, ret:%d\n", ret);
- return ret;
- }
-
- /*
- * ubwc config is part of the "mdss" region which is not accessible
- * from the rest of the driver. hardcode known configurations here
- */
- switch (readl_relaxed(dpu_mdss->mmio + HW_REV)) {
- case DPU_HW_VER_500:
- case DPU_HW_VER_501:
- writel_relaxed(0x420, dpu_mdss->mmio + UBWC_STATIC);
- break;
- case DPU_HW_VER_600:
- /* TODO: 0x102e for LP_DDR4 */
- writel_relaxed(0x103e, dpu_mdss->mmio + UBWC_STATIC);
- writel_relaxed(2, dpu_mdss->mmio + UBWC_CTRL_2);
- writel_relaxed(1, dpu_mdss->mmio + UBWC_PREDICTION_MODE);
- break;
- case DPU_HW_VER_620:
- writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
- break;
- case DPU_HW_VER_720:
- writel_relaxed(0x101e, dpu_mdss->mmio + UBWC_STATIC);
- break;
- }
-
- return ret;
-}
-
-static int dpu_mdss_disable(struct msm_mdss *mdss)
-{
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
-
- clk_bulk_disable_unprepare(dpu_mdss->num_clocks, dpu_mdss->clocks);
-
- return 0;
-}
-
-static void dpu_mdss_destroy(struct msm_mdss *mdss)
-{
- struct platform_device *pdev = to_platform_device(mdss->dev);
- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
- int irq;
-
- pm_runtime_suspend(mdss->dev);
- pm_runtime_disable(mdss->dev);
- _dpu_mdss_irq_domain_fini(dpu_mdss);
- irq = platform_get_irq(pdev, 0);
- irq_set_chained_handler_and_data(irq, NULL, NULL);
-
- if (dpu_mdss->mmio)
- devm_iounmap(&pdev->dev, dpu_mdss->mmio);
- dpu_mdss->mmio = NULL;
-}
-
-static const struct msm_mdss_funcs mdss_funcs = {
- .enable = dpu_mdss_enable,
- .disable = dpu_mdss_disable,
- .destroy = dpu_mdss_destroy,
-};
-
-int dpu_mdss_init(struct platform_device *pdev)
-{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct dpu_mdss *dpu_mdss;
- int ret;
- int irq;
-
- dpu_mdss = devm_kzalloc(&pdev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
- if (!dpu_mdss)
- return -ENOMEM;
-
- dpu_mdss->mmio = msm_ioremap(pdev, "mdss");
- if (IS_ERR(dpu_mdss->mmio))
- return PTR_ERR(dpu_mdss->mmio);
-
- DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
-
- ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_mdss->clocks);
- if (ret < 0) {
- DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
- goto clk_parse_err;
- }
- dpu_mdss->num_clocks = ret;
-
- dpu_mdss->base.dev = &pdev->dev;
- dpu_mdss->base.funcs = &mdss_funcs;
-
- ret = _dpu_mdss_irq_domain_add(dpu_mdss);
- if (ret)
- goto irq_domain_error;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto irq_error;
- }
-
- irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
- dpu_mdss);
-
- priv->mdss = &dpu_mdss->base;
-
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-
-irq_error:
- _dpu_mdss_irq_domain_fini(dpu_mdss);
-irq_domain_error:
-clk_parse_err:
- if (dpu_mdss->mmio)
- devm_iounmap(&pdev->dev, dpu_mdss->mmio);
- dpu_mdss->mmio = NULL;
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6565682fe227..9d2f0364d2c7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -280,31 +280,6 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
}
/**
- * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
- * @tbl: Pointer to LUT table
- * @total_fl: fill level
- * Return: LUT setting corresponding to the fill level
- */
-static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
- u32 total_fl)
-{
- int i;
-
- if (!tbl || !tbl->nentry || !tbl->entries)
- return 0;
-
- for (i = 0; i < tbl->nentry; i++)
- if (total_fl <= tbl->entries[i].fl)
- return tbl->entries[i].lut;
-
- /* if last fl is zero, use as default */
- if (!tbl->entries[i-1].fl)
- return tbl->entries[i-1].lut;
-
- return 0;
-}
-
-/**
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
@@ -333,7 +308,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
}
- qos_lut = _dpu_plane_get_qos_lut(
+ qos_lut = _dpu_hw_get_qos_lut(
&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
@@ -528,11 +503,19 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct dpu_hw_scaler3_cfg *scale_cfg,
- struct dpu_hw_pixel_ext *pixel_ext,
const struct dpu_format *fmt,
uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
{
uint32_t i;
+ bool inline_rotation = pstate->rotation & DRM_MODE_ROTATE_90;
+
+ /*
+ * For inline rotation cases, scaler config is post-rotation,
+ * so swap the dimensions here. However, pixel extension will
+ * need pre-rotation settings.
+ */
+ if (inline_rotation)
+ swap(src_w, src_h);
scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
@@ -571,11 +554,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
}
-
- pixel_ext->num_ext_pxls_top[i] =
- scale_cfg->src_height[i];
- pixel_ext->num_ext_pxls_left[i] =
- scale_cfg->src_width[i];
}
if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
&& (src_w == dst_w))
@@ -591,6 +569,24 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
+static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
+ struct dpu_hw_pixel_ext *pixel_ext,
+ uint32_t src_w, uint32_t src_h,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ int i;
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ src_w /= chroma_subsmpl_h;
+ src_h /= chroma_subsmpl_v;
+ }
+
+ pixel_ext->num_ext_pxls_top[i] = src_h;
+ pixel_ext->num_ext_pxls_left[i] = src_w;
+ }
+}
+
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
/* S15.16 format */
@@ -654,6 +650,10 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
+ u32 src_width = drm_rect_width(&pipe_cfg->src_rect);
+ u32 src_height = drm_rect_height(&pipe_cfg->src_rect);
+ u32 dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ u32 dst_height = drm_rect_height(&pipe_cfg->dst_rect);
memset(&scaler3_cfg, 0, sizeof(scaler3_cfg));
memset(&pixel_ext, 0, sizeof(pixel_ext));
@@ -661,13 +661,17 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
- drm_rect_width(&pipe_cfg->src_rect),
- drm_rect_height(&pipe_cfg->src_rect),
- drm_rect_width(&pipe_cfg->dst_rect),
- drm_rect_height(&pipe_cfg->dst_rect),
- &scaler3_cfg, &pixel_ext, fmt,
+ src_width,
+ src_height,
+ dst_width,
+ dst_height,
+ &scaler3_cfg, fmt,
info->hsub, info->vsub);
+ /* configure pixel extension based on scalar config */
+ _dpu_plane_setup_pixel_ext(&scaler3_cfg, &pixel_ext,
+ src_width, src_height, info->hsub, info->vsub);
+
if (pdpu->pipe_hw->ops.setup_pe)
pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
&pixel_ext);
@@ -956,6 +960,34 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
drm_rect_equals(fb_rect, src);
}
+static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
+ const struct dpu_sspp_sub_blks *sblk,
+ struct drm_rect src, const struct dpu_format *fmt)
+{
+ size_t num_formats;
+ const u32 *supported_formats;
+
+ if (!sblk->rotation_cfg) {
+ DPU_ERROR("invalid rotation cfg\n");
+ return -EINVAL;
+ }
+
+ if (drm_rect_width(&src) > sblk->rotation_cfg->rot_maxheight) {
+ DPU_DEBUG_PLANE(pdpu, "invalid height for inline rot:%d max:%d\n",
+ src.y2, sblk->rotation_cfg->rot_maxheight);
+ return -EINVAL;
+ }
+
+ supported_formats = sblk->rotation_cfg->rot_format_list;
+ num_formats = sblk->rotation_cfg->rot_num_formats;
+
+ if (!DPU_FORMAT_IS_UBWC(fmt) ||
+ !dpu_find_format(fmt->base.pixel_format, supported_formats, num_formats))
+ return -EINVAL;
+
+ return 0;
+}
+
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -968,15 +1000,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
uint32_t min_src_size, max_linewidth;
+ unsigned int rotation;
+ uint32_t supported_rotations;
+ const struct dpu_sspp_cfg *pipe_hw_caps = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = pdpu->pipe_hw->cap->sblk;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale);
+ min_scale = FRAC_16_16(1, sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- pdpu->pipe_hw->cap->sblk->maxdwnscale << 16,
+ sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -1002,8 +1038,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) ||
- !(pdpu->pipe_hw->cap->features & DPU_SSPP_CSC_ANY))) {
+ (!(pipe_hw_caps->features & DPU_SSPP_SCALER) ||
+ !(pipe_hw_caps->features & DPU_SSPP_CSC_ANY))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
return -EINVAL;
@@ -1036,6 +1072,22 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
return -E2BIG;
}
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_90;
+
+ rotation = drm_rotation_simplify(new_plane_state->rotation,
+ supported_rotations);
+
+ if ((pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION)) &&
+ (rotation & DRM_MODE_ROTATE_90)) {
+ ret = dpu_plane_check_inline_rotation(pdpu, sblk, src, fmt);
+ if (ret)
+ return ret;
+ }
+
+ pstate->rotation = rotation;
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
@@ -1151,29 +1203,27 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_mode);
if (pdpu->pipe_hw->ops.setup_format) {
- unsigned int rotation;
+ unsigned int rotation = pstate->rotation;
src_flags = 0x0;
- rotation = drm_rotation_simplify(state->rotation,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
-
if (rotation & DRM_MODE_REFLECT_X)
src_flags |= DPU_SSPP_FLIP_LR;
if (rotation & DRM_MODE_REFLECT_Y)
src_flags |= DPU_SSPP_FLIP_UD;
+ if (rotation & DRM_MODE_ROTATE_90)
+ src_flags |= DPU_SSPP_ROT_90;
+
/* update format */
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_cdp) {
- struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+ struct dpu_hw_cdp_cfg cdp_cfg;
- memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_cdp_cfg));
cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
@@ -1411,13 +1461,9 @@ static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
- if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) {
- int i;
- for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) {
- if (format == qcom_compressed_supported_formats[i])
- return true;
- }
- }
+ if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
+ return dpu_find_format(format, qcom_compressed_supported_formats,
+ ARRAY_SIZE(qcom_compressed_supported_formats));
return false;
}
@@ -1462,6 +1508,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
uint32_t num_formats;
+ uint32_t supported_rotations;
int ret = -EINVAL;
/* create and zero local structure */
@@ -1530,12 +1577,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
+ supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
+
+ if (pdpu->pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
+ supported_rotations |= DRM_MODE_ROTATE_MASK;
+
drm_plane_create_rotation_property(plane,
- DRM_MODE_ROTATE_0,
- DRM_MODE_ROTATE_0 |
- DRM_MODE_ROTATE_180 |
- DRM_MODE_REFLECT_X |
- DRM_MODE_REFLECT_Y);
+ DRM_MODE_ROTATE_0, supported_rotations);
drm_plane_enable_fb_damage_clips(plane);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 50781e2d3577..e1463107a6fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -26,6 +26,7 @@
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
* @needs_dirtyfb: whether attached CRTC needs pixel data explicitly flushed
+ * @rotation: simplified drm rotation hint
*/
struct dpu_plane_state {
struct drm_plane_state base;
@@ -40,6 +41,7 @@ struct dpu_plane_state {
u64 plane_clk;
bool needs_dirtyfb;
+ unsigned int rotation;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 7497538adae1..06f03e7081bc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -9,8 +9,10 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_wb.h"
#include "dpu_hw_dspp.h"
#include "dpu_hw_merge3d.h"
+#include "dpu_hw_dsc.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
@@ -77,6 +79,18 @@ int dpu_rm_destroy(struct dpu_rm *rm)
for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
dpu_hw_intf_destroy(rm->hw_intf[i]);
+ for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
+ struct dpu_hw_dsc *hw;
+
+ if (rm->dsc_blks[i]) {
+ hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
+ dpu_hw_dsc_destroy(hw);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
+ dpu_hw_wb_destroy(rm->hw_wb[i]);
+
return 0;
}
@@ -176,6 +190,24 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->hw_intf[intf->id - INTF_0] = hw;
}
+ for (i = 0; i < cat->wb_count; i++) {
+ struct dpu_hw_wb *hw;
+ const struct dpu_wb_cfg *wb = &cat->wb[i];
+
+ if (wb->id < WB_0 || wb->id >= WB_MAX) {
+ DPU_ERROR("skip intf %d with invalid id\n", wb->id);
+ continue;
+ }
+
+ hw = dpu_hw_wb_init(wb->id, mmio, cat);
+ if (IS_ERR(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed wb object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->hw_wb[wb->id - WB_0] = hw;
+ }
+
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@@ -210,6 +242,19 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
}
+ for (i = 0; i < cat->dsc_count; i++) {
+ struct dpu_hw_dsc *hw;
+ const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
+
+ hw = dpu_hw_dsc_init(dsc->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dsc object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dsc_blks[dsc->id - DSC_0] = &hw->base;
+ }
+
return 0;
fail:
@@ -441,6 +486,28 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
+static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ struct drm_encoder *enc,
+ const struct msm_display_topology *top)
+{
+ int num_dsc = top->num_dsc;
+ int i;
+
+ /* check if DSC required are allocated or not */
+ for (i = 0; i < num_dsc; i++) {
+ if (global_state->dsc_to_enc_id[i]) {
+ DPU_ERROR("DSC %d is already allocated\n", i);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < num_dsc; i++)
+ global_state->dsc_to_enc_id[i] = enc->base.id;
+
+ return 0;
+}
+
static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
@@ -462,6 +529,10 @@ static int _dpu_rm_make_reservation(
return ret;
}
+ ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -499,6 +570,8 @@ void dpu_rm_release(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->mixer_to_enc_id), enc->base.id);
_dpu_rm_clear_mapping(global_state->ctl_to_enc_id,
ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id);
+ _dpu_rm_clear_mapping(global_state->dsc_to_enc_id,
+ ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id);
}
int dpu_rm_reserve(
@@ -567,6 +640,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->dspp_to_enc_id;
max_blks = ARRAY_SIZE(rm->dspp_blks);
break;
+ case DPU_HW_BLK_DSC:
+ hw_blks = rm->dsc_blks;
+ hw_to_enc_id = global_state->dsc_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dsc_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 9b13200a050a..2f34a31d8d0d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
+ * @hw_wb: array of wb hardware resources
* @dspp_blks: array of dspp hardware resources
*/
struct dpu_rm {
@@ -26,8 +27,10 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
+ struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
+ struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
};
/**
@@ -95,5 +98,15 @@ static inline struct dpu_hw_intf *dpu_rm_get_intf(struct dpu_rm *rm, enum dpu_in
return rm->hw_intf[intf_idx - INTF_0];
}
+/**
+ * dpu_rm_get_wb - Return a struct dpu_hw_wb instance given it's index.
+ * @rm: DPU Resource Manager handle
+ * @wb_idx: WB index
+ */
+static inline struct dpu_hw_wb *dpu_rm_get_wb(struct dpu_rm *rm, enum dpu_wb wb_idx)
+{
+ return rm->hw_wb[wb_idx - WB_0];
+}
+
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 54d74341e690..76169f406505 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -167,55 +167,46 @@ TRACE_EVENT(dpu_perf_crtc_update,
__entry->update_clk)
);
-DECLARE_EVENT_CLASS(dpu_enc_irq_template,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx),
+DECLARE_EVENT_CLASS(dpu_irq_template,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
TP_STRUCT__entry(
- __field( uint32_t, drm_id )
- __field( enum dpu_intr_idx, intr_idx )
__field( int, irq_idx )
),
TP_fast_assign(
- __entry->drm_id = drm_id;
- __entry->intr_idx = intr_idx;
__entry->irq_idx = irq_idx;
),
- TP_printk("id=%u, intr=%d, irq=%d",
- __entry->drm_id, __entry->intr_idx,
- __entry->irq_idx)
+ TP_printk("irq=%d", __entry->irq_idx)
);
-DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx)
+DEFINE_EVENT(dpu_irq_template, dpu_irq_register_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
);
-DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
- int irq_idx),
- TP_ARGS(drm_id, intr_idx, irq_idx)
+DEFINE_EVENT(dpu_irq_template, dpu_irq_unregister_success,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx)
);
TRACE_EVENT(dpu_enc_irq_wait_success,
- TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx,
+ TP_PROTO(uint32_t drm_id, void *func,
int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
- TP_ARGS(drm_id, intr_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_ARGS(drm_id, func, irq_idx, pp_idx, atomic_cnt),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
- __field( enum dpu_intr_idx, intr_idx )
+ __field( void *, func )
__field( int, irq_idx )
__field( enum dpu_pingpong, pp_idx )
__field( int, atomic_cnt )
),
TP_fast_assign(
__entry->drm_id = drm_id;
- __entry->intr_idx = intr_idx;
+ __entry->func = func;
__entry->irq_idx = irq_idx;
__entry->pp_idx = pp_idx;
__entry->atomic_cnt = atomic_cnt;
),
- TP_printk("id=%u, intr=%d, irq=%d, pp=%d, atomic_cnt=%d",
- __entry->drm_id, __entry->intr_idx,
+ TP_printk("id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->func,
__entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
);
@@ -389,20 +380,26 @@ TRACE_EVENT(dpu_enc_rc,
);
TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
- TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
- TP_ARGS(drm_id, event, intf_idx),
+ TP_PROTO(uint32_t drm_id, u32 event, char *intf_mode, enum dpu_intf intf_idx,
+ enum dpu_wb wb_idx),
+ TP_ARGS(drm_id, event, intf_mode, intf_idx, wb_idx),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( u32, event )
+ __string( intf_mode_str, intf_mode )
__field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->event = event;
+ __assign_str(intf_mode_str, intf_mode);
__entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
),
- TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
- __entry->intf_idx)
+ TP_printk("id=%u, event=%u, intf_mode=%s intf=%d wb=%d", __entry->drm_id,
+ __entry->event, __get_str(intf_mode_str),
+ __entry->intf_idx, __entry->wb_idx)
);
TRACE_EVENT(dpu_enc_frame_done_cb,
@@ -424,14 +421,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb,
);
TRACE_EVENT(dpu_enc_trigger_flush,
- TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ TP_PROTO(uint32_t drm_id, char *intf_mode, enum dpu_intf intf_idx, enum dpu_wb wb_idx,
int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
u32 pending_flush_ret),
- TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ TP_ARGS(drm_id, intf_mode, intf_idx, wb_idx, pending_kickoff_cnt, ctl_idx,
extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
+ __string( intf_mode_str, intf_mode )
__field( enum dpu_intf, intf_idx )
+ __field( enum dpu_wb, wb_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
__field( u32, extra_flush_bits )
@@ -439,15 +438,17 @@ TRACE_EVENT(dpu_enc_trigger_flush,
),
TP_fast_assign(
__entry->drm_id = drm_id;
+ __assign_str(intf_mode_str, intf_mode);
__entry->intf_idx = intf_idx;
+ __entry->wb_idx = wb_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
__entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
- TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ TP_printk("id=%u, intf_mode=%s, intf_idx=%d, wb_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
"extra_flush_bits=0x%x pending_flush_ret=0x%x",
- __entry->drm_id, __entry->intf_idx,
+ __entry->drm_id, __get_str(intf_mode_str), __entry->intf_idx, __entry->wb_idx,
__entry->pending_kickoff_cnt, __entry->ctl_idx,
__entry->extra_flush_bits, __entry->pending_flush_ret)
);
@@ -871,27 +872,31 @@ TRACE_EVENT(dpu_pp_connect_ext_te,
TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
);
-DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+TRACE_EVENT(dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, void *callback),
TP_ARGS(irq_idx, callback),
TP_STRUCT__entry(
__field( int, irq_idx )
- __field( struct dpu_irq_callback *, callback)
+ __field( void *, callback)
),
TP_fast_assign(
__entry->irq_idx = irq_idx;
__entry->callback = callback;
),
- TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ TP_printk("irq_idx:%d callback:%ps", __entry->irq_idx,
__entry->callback)
);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
-);
-DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
- TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
- TP_ARGS(irq_idx, callback)
+
+TRACE_EVENT(dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx),
+ TP_ARGS(irq_idx),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("irq_idx:%d", __entry->irq_idx)
);
TRACE_EVENT(dpu_core_perf_update_clk,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
new file mode 100644
index 000000000000..7620ffe55779
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dpu_writeback.h"
+
+static int dpu_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static const struct drm_connector_funcs dpu_wb_conn_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int dpu_wb_conn_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return 0;
+
+ dpu_encoder_prepare_wb_job(dpu_wb_conn->wb_enc, job);
+
+ return 0;
+}
+
+static void dpu_wb_conn_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct dpu_wb_connector *dpu_wb_conn = to_dpu_wb_conn(connector);
+
+ if (!job->fb)
+ return;
+
+ dpu_encoder_cleanup_wb_job(dpu_wb_conn->wb_enc, job);
+}
+
+static const struct drm_connector_helper_funcs dpu_wb_conn_helper_funcs = {
+ .get_modes = dpu_wb_conn_get_modes,
+ .prepare_writeback_job = dpu_wb_conn_prepare_job,
+ .cleanup_writeback_job = dpu_wb_conn_cleanup_job,
+};
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats)
+{
+ struct dpu_wb_connector *dpu_wb_conn;
+ int rc = 0;
+
+ dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
+
+ drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
+
+ /* DPU initializes the encoder and sets it up completely for writeback
+ * cases and hence should use the new API drm_writeback_connector_init_with_encoder
+ * to initialize the writeback connector
+ */
+ rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
+ &dpu_wb_conn_funcs, format_list, num_formats);
+
+ if (!rc)
+ dpu_wb_conn->wb_enc = enc;
+
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
new file mode 100644
index 000000000000..5a75ea916101
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DPU_WRITEBACK_H
+#define _DPU_WRITEBACK_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_file.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_encoder_phys.h"
+
+struct dpu_wb_connector {
+ struct drm_writeback_connector base;
+ struct drm_encoder *wb_enc;
+};
+
+static inline struct dpu_wb_connector *to_dpu_wb_conn(struct drm_writeback_connector *conn)
+{
+ return container_of(conn, struct dpu_wb_connector, base);
+}
+
+int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
+ const u32 *format_list, u32 num_formats);
+
+#endif /*_DPU_WRITEBACK_H */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index aaf2f26f8505..39b8fe53c29d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
@@ -11,6 +11,8 @@
#include "mdp4_kms.h"
+#ifdef CONFIG_DRM_MSM_DSI
+
struct mdp4_dsi_encoder {
struct drm_encoder base;
struct drm_panel *panel;
@@ -170,3 +172,4 @@ fail:
return ERR_PTR(ret);
}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 3cf476c55158..fb48c8c19ec3 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -21,7 +21,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
struct drm_device *dev = mdp4_kms->dev;
u32 dmap_cfg, vg_cfg;
unsigned long clk;
- int ret = 0;
pm_runtime_get_sync(dev->dev);
@@ -72,7 +71,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
pm_runtime_put_sync(dev->dev);
- return ret;
+ return 0;
}
static void mdp4_enable_commit(struct msm_kms *kms)
@@ -229,9 +228,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
return PTR_ERR(connector);
}
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
-
break;
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
@@ -252,8 +248,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
}
}
- priv->encoders[priv->num_encoders++] = encoder;
-
break;
case DRM_MODE_ENCODER_DSI:
/* only DSI1 supported for now */
@@ -272,7 +266,6 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* TODO: Add DMA_S later? */
encoder->possible_crtcs = 1 << DMA_P;
- priv->encoders[priv->num_encoders++] = encoder;
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
@@ -324,7 +317,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
ret = PTR_ERR(plane);
goto fail;
}
- priv->planes[priv->num_planes++] = plane;
}
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
@@ -389,7 +381,7 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
}
-struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+static int mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
@@ -403,8 +395,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
ret = mdp_kms_init(&mdp4_kms->base, &kms_funcs);
@@ -551,12 +542,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- return kms;
+ return 0;
fail:
if (kms)
mdp4_destroy(kms);
- return ERR_PTR(ret);
+
+ return ret;
}
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
@@ -569,3 +561,47 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
return &config;
}
+
+static const struct dev_pm_ops mdp4_pm_ops = {
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
+};
+
+static int mdp4_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, mdp4_kms_init);
+}
+
+static int mdp4_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &msm_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mdp4_dt_match[] = {
+ { .compatible = "qcom,mdp4" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mdp4_dt_match);
+
+static struct platform_driver mdp4_platform_driver = {
+ .probe = mdp4_probe,
+ .remove = mdp4_remove,
+ .shutdown = msm_drv_shutdown,
+ .driver = {
+ .name = "mdp4",
+ .of_match_table = mdp4_dt_match,
+ .pm = &mdp4_pm_ops,
+ },
+};
+
+void __init msm_mdp4_register(void)
+{
+ platform_driver_register(&mdp4_platform_driver);
+}
+
+void __exit msm_mdp4_unregister(void)
+{
+ platform_driver_unregister(&mdp4_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index ec6c7b09865e..a640af22eafc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -8,6 +8,8 @@
#include "mdp5_kms.h"
+#ifdef CONFIG_DRM_MSM_DSI
+
static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
{
struct msm_drm_private *priv = encoder->dev->dev_private;
@@ -198,3 +200,4 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return 0;
}
+#endif /* CONFIG_DRM_MSM_DSI */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b966cd69f99d..fe2922c8d21b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -612,9 +612,15 @@ static int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
if (ret)
return ret;
- mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (ret)
+ return ret;
+
if (old_r_mixer) {
- mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ ret = mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (ret)
+ return ret;
+
if (!need_right_mixer)
pipeline->r_mixer = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3b92372e7bdf..3d5621a68f85 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -203,6 +203,8 @@ static int mdp5_set_split_display(struct msm_kms *kms,
slave_encoder);
}
+static void mdp5_destroy(struct platform_device *pdev);
+
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -221,6 +223,7 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
}
mdp_kms_destroy(&mdp5_kms->base);
+ mdp5_destroy(mdp5_kms->pdev);
}
#ifdef CONFIG_DEBUG_FS
@@ -319,7 +322,6 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
encoder = mdp5_encoder_init(dev, intf, ctl);
@@ -328,8 +330,6 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
return encoder;
}
- priv->encoders[priv->num_encoders++] = encoder;
-
return encoder;
}
@@ -434,6 +434,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
int i, ret, pi = 0, ci = 0;
struct drm_plane *primary[MAX_BASES] = { NULL };
struct drm_plane *cursor[MAX_BASES] = { NULL };
+ struct drm_encoder *encoder;
+ unsigned int num_encoders;
/*
* Construct encoders and modeset initialize connector devices
@@ -445,12 +447,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
+ num_encoders = 0;
+ drm_for_each_encoder(encoder, dev)
+ num_encoders++;
+
/*
* We should ideally have less number of encoders (set up by parsing
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
- num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
+ num_crtcs = min(num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
@@ -475,7 +481,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
- priv->planes[priv->num_planes++] = plane;
if (type == DRM_PLANE_TYPE_PRIMARY)
primary[pi++] = plane;
@@ -499,11 +504,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* Now that we know the number of crtcs we've created, set the possible
* crtcs for the encoders
*/
- for (i = 0; i < priv->num_encoders; i++) {
- struct drm_encoder *encoder = priv->encoders[i];
-
+ drm_for_each_encoder(encoder, dev)
encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
- }
return 0;
@@ -544,7 +546,9 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-struct msm_kms *mdp5_kms_init(struct drm_device *dev)
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev);
+
+static int mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev;
@@ -555,10 +559,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
int irq, i, ret;
struct device *iommu_dev;
+ ret = mdp5_init(to_platform_device(dev->dev), dev);
+
/* priv->kms would have been populated by the MDP5 driver */
kms = priv->kms;
if (!kms)
- return NULL;
+ return -ENOMEM;
mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
pdev = mdp5_kms->pdev;
@@ -570,9 +576,9 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
}
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (irq < 0) {
- ret = irq;
- DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
+ if (!irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq\n");
goto fail;
}
@@ -637,11 +643,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
- return kms;
+ return 0;
fail:
if (kms)
mdp5_kms_destroy(kms);
- return ERR_PTR(ret);
+
+ return ret;
}
static void mdp5_destroy(struct platform_device *pdev)
@@ -912,35 +919,14 @@ fail:
return ret;
}
-static int mdp5_bind(struct device *dev, struct device *master, void *data)
-{
- struct msm_drm_private *priv = dev_get_drvdata(master);
- struct drm_device *ddev = priv->dev;
- struct platform_device *pdev = to_platform_device(dev);
-
- DBG("");
-
- return mdp5_init(pdev, ddev);
-}
-
-static void mdp5_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- mdp5_destroy(pdev);
-}
-
-static const struct component_ops mdp5_ops = {
- .bind = mdp5_bind,
- .unbind = mdp5_unbind,
-};
-
static int mdp5_setup_interconnect(struct platform_device *pdev)
{
- struct icc_path *path0 = of_icc_get(&pdev->dev, "mdp0-mem");
- struct icc_path *path1 = of_icc_get(&pdev->dev, "mdp1-mem");
- struct icc_path *path_rot = of_icc_get(&pdev->dev, "rotator-mem");
+ /* Interconnects are a part of MDSS device tree binding, not the
+ * MDP5 device. */
+ struct device *mdss_dev = pdev->dev.parent;
+ struct icc_path *path0 = of_icc_get(mdss_dev, "mdp0-mem");
+ struct icc_path *path1 = of_icc_get(mdss_dev, "mdp1-mem");
+ struct icc_path *path_rot = of_icc_get(mdss_dev, "rotator-mem");
if (IS_ERR(path0))
return PTR_ERR(path0);
@@ -976,13 +962,13 @@ static int mdp5_dev_probe(struct platform_device *pdev)
if (ret)
return ret;
- return component_add(&pdev->dev, &mdp5_ops);
+ return msm_drv_probe(&pdev->dev, mdp5_kms_init);
}
static int mdp5_dev_remove(struct platform_device *pdev)
{
DBG("");
- component_del(&pdev->dev, &mdp5_ops);
+ component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
@@ -1008,9 +994,11 @@ static __maybe_unused int mdp5_runtime_resume(struct device *dev)
static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
+ .prepare = msm_pm_prepare,
+ .complete = msm_pm_complete,
};
-const struct of_device_id mdp5_dt_match[] = {
+static const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
@@ -1021,6 +1009,7 @@ MODULE_DEVICE_TABLE(of, mdp5_dt_match);
static struct platform_driver mdp5_driver = {
.probe = mdp5_dev_probe,
.remove = mdp5_dev_remove,
+ .shutdown = msm_drv_shutdown,
.driver = {
.name = "msm_mdp",
.of_match_table = mdp5_dt_match,
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
deleted file mode 100644
index 049c6784a531..000000000000
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ /dev/null
@@ -1,252 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-
-#include "msm_drv.h"
-#include "mdp5_kms.h"
-
-#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
-
-struct mdp5_mdss {
- struct msm_mdss base;
-
- void __iomem *mmio, *vbif;
-
- struct clk *ahb_clk;
- struct clk *axi_clk;
- struct clk *vsync_clk;
-
- struct {
- volatile unsigned long enabled_mask;
- struct irq_domain *domain;
- } irqcontroller;
-};
-
-static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
-{
- msm_writel(data, mdp5_mdss->mmio + reg);
-}
-
-static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
-{
- return msm_readl(mdp5_mdss->mmio + reg);
-}
-
-static irqreturn_t mdss_irq(int irq, void *arg)
-{
- struct mdp5_mdss *mdp5_mdss = arg;
- u32 intr;
-
- intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
-
- VERB("intr=%08x", intr);
-
- while (intr) {
- irq_hw_number_t hwirq = fls(intr) - 1;
-
- generic_handle_domain_irq(mdp5_mdss->irqcontroller.domain, hwirq);
- intr &= ~(1 << hwirq);
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
- MDSS_HW_INTR_STATUS_INTR_DSI0 | \
- MDSS_HW_INTR_STATUS_INTR_DSI1 | \
- MDSS_HW_INTR_STATUS_INTR_HDMI | \
- MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdss_hw_mask_irq(struct irq_data *irqd)
-{
- struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void mdss_hw_unmask_irq(struct irq_data *irqd)
-{
- struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip mdss_hw_irq_chip = {
- .name = "mdss",
- .irq_mask = mdss_hw_mask_irq,
- .irq_unmask = mdss_hw_unmask_irq,
-};
-
-static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct mdp5_mdss *mdp5_mdss = d->host_data;
-
- if (!(VALID_IRQS & (1 << hwirq)))
- return -EPERM;
-
- irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdp5_mdss);
-
- return 0;
-}
-
-static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
- .map = mdss_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-
-static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
-{
- struct device *dev = mdp5_mdss->base.dev;
- struct irq_domain *d;
-
- d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdp5_mdss);
- if (!d) {
- DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
- return -ENXIO;
- }
-
- mdp5_mdss->irqcontroller.enabled_mask = 0;
- mdp5_mdss->irqcontroller.domain = d;
-
- return 0;
-}
-
-static int mdp5_mdss_enable(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
- DBG("");
-
- clk_prepare_enable(mdp5_mdss->ahb_clk);
- clk_prepare_enable(mdp5_mdss->axi_clk);
- clk_prepare_enable(mdp5_mdss->vsync_clk);
-
- return 0;
-}
-
-static int mdp5_mdss_disable(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
- DBG("");
-
- clk_disable_unprepare(mdp5_mdss->vsync_clk);
- clk_disable_unprepare(mdp5_mdss->axi_clk);
- clk_disable_unprepare(mdp5_mdss->ahb_clk);
-
- return 0;
-}
-
-static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
-{
- struct platform_device *pdev =
- to_platform_device(mdp5_mdss->base.dev);
-
- mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdp5_mdss->ahb_clk))
- mdp5_mdss->ahb_clk = NULL;
-
- mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdp5_mdss->axi_clk))
- mdp5_mdss->axi_clk = NULL;
-
- mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdp5_mdss->vsync_clk))
- mdp5_mdss->vsync_clk = NULL;
-
- return 0;
-}
-
-static void mdp5_mdss_destroy(struct msm_mdss *mdss)
-{
- struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
-
- if (!mdp5_mdss)
- return;
-
- irq_domain_remove(mdp5_mdss->irqcontroller.domain);
- mdp5_mdss->irqcontroller.domain = NULL;
-
- pm_runtime_disable(mdss->dev);
-}
-
-static const struct msm_mdss_funcs mdss_funcs = {
- .enable = mdp5_mdss_enable,
- .disable = mdp5_mdss_disable,
- .destroy = mdp5_mdss_destroy,
-};
-
-int mdp5_mdss_init(struct platform_device *pdev)
-{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct mdp5_mdss *mdp5_mdss;
- int ret;
-
- DBG("");
-
- if (!of_device_is_compatible(pdev->dev.of_node, "qcom,mdss"))
- return 0;
-
- mdp5_mdss = devm_kzalloc(&pdev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
- if (!mdp5_mdss) {
- ret = -ENOMEM;
- goto fail;
- }
-
- mdp5_mdss->base.dev = &pdev->dev;
-
- mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys");
- if (IS_ERR(mdp5_mdss->mmio)) {
- ret = PTR_ERR(mdp5_mdss->mmio);
- goto fail;
- }
-
- mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys");
- if (IS_ERR(mdp5_mdss->vbif)) {
- ret = PTR_ERR(mdp5_mdss->vbif);
- goto fail;
- }
-
- ret = msm_mdss_get_clocks(mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to get clocks: %d\n", ret);
- goto fail;
- }
-
- ret = devm_request_irq(&pdev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to init irq: %d\n", ret);
- goto fail;
- }
-
- ret = mdss_irq_domain_init(mdp5_mdss);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to init sub-block irqs: %d\n", ret);
- goto fail;
- }
-
- mdp5_mdss->base.funcs = &mdss_funcs;
- priv->mdss = &mdp5_mdss->base;
-
- pm_runtime_enable(&pdev->dev);
-
- return 0;
-fail:
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 954db683ae44..2536def2a000 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -116,21 +116,28 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
return 0;
}
-void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+int mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
struct mdp5_global_state *global_state = mdp5_get_global_state(s);
- struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
+ struct mdp5_hw_mixer_state *new_state;
if (!mixer)
- return;
+ return 0;
+
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
+
+ new_state = &global_state->hwmixer;
if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from crtc %s", mixer->name,
new_state->hwmixer_to_crtc[mixer->idx]->name);
new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+
+ return 0;
}
void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 43c9ba43ce18..545ee223b9d7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
@@ -30,7 +30,7 @@ void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
uint32_t caps, struct mdp5_hw_mixer **mixer,
struct mdp5_hw_mixer **r_mixer);
-void mdp5_mixer_release(struct drm_atomic_state *s,
- struct mdp5_hw_mixer *mixer);
+int mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ba6695963aa6..a4f5cb90f3e8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -119,18 +119,23 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
return 0;
}
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
struct mdp5_global_state *state = mdp5_get_global_state(s);
- struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
+ struct mdp5_hw_pipe_state *new_state;
if (!hwpipe)
- return;
+ return 0;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwpipe;
if (WARN_ON(!new_state->hwpipe_to_plane[hwpipe->idx]))
- return;
+ return -EINVAL;
DBG("%s: release from plane %s", hwpipe->name,
new_state->hwpipe_to_plane[hwpipe->idx]->name);
@@ -141,6 +146,8 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
}
new_state->hwpipe_to_plane[hwpipe->idx] = NULL;
+
+ return 0;
}
void mdp5_pipe_destroy(struct mdp5_hw_pipe *hwpipe)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index 9b26d0761bd4..cca67938cab2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
@@ -37,7 +37,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
uint32_t caps, uint32_t blkcfg,
struct mdp5_hw_pipe **hwpipe,
struct mdp5_hw_pipe **r_hwpipe);
-void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
+int mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe);
struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
uint32_t reg_offset, uint32_t caps);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index c478d25f7825..e8c47a4a1d31 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -314,12 +314,24 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
mdp5_state->r_hwpipe = NULL;
- mdp5_pipe_release(state->state, old_hwpipe);
- mdp5_pipe_release(state->state, old_right_hwpipe);
+ ret = mdp5_pipe_release(state->state, old_hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, old_right_hwpipe);
+ if (ret)
+ return ret;
+
}
} else {
- mdp5_pipe_release(state->state, mdp5_state->hwpipe);
- mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
+ if (ret)
+ return ret;
+
+ ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
+ if (ret)
+ return ret;
+
mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
}
@@ -385,8 +397,6 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
if (!crtc_state->active)
return -EINVAL;
- mdp5_state = to_mdp5_plane_state(new_plane_state);
-
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 077d3b6507e7..6666783e1468 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -26,6 +26,7 @@
struct dp_audio_private {
struct platform_device *audio_pdev;
struct platform_device *pdev;
+ struct drm_device *drm_dev;
struct dp_catalog *catalog;
struct dp_panel *panel;
@@ -136,7 +137,8 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
- DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
@@ -148,7 +150,8 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
- DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
@@ -162,7 +165,8 @@ static void dp_audio_stream_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
- DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
@@ -183,8 +187,9 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
- DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
- value, parity_byte);
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
@@ -196,7 +201,8 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
- DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
@@ -209,7 +215,8 @@ static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
- DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
@@ -229,7 +236,8 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
- DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
@@ -242,7 +250,8 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
- DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
@@ -255,7 +264,8 @@ static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
- DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
@@ -275,7 +285,8 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
- DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
@@ -288,7 +299,8 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
- DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
@@ -301,7 +313,8 @@ static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
- DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
@@ -321,7 +334,8 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
- DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
@@ -334,7 +348,8 @@ static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
parity_byte = dp_audio_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
- DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+ drm_dbg_dp(audio->drm_dev,
+ "Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
@@ -370,7 +385,7 @@ static void dp_audio_setup_acr(struct dp_audio_private *audio)
select = 3;
break;
default:
- DRM_DEBUG_DP("Unknown link rate\n");
+ drm_dbg_dp(audio->drm_dev, "Unknown link rate\n");
select = 0;
break;
}
@@ -395,7 +410,8 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
safe_to_exit_level = 5;
break;
default:
- DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n",
+ drm_dbg_dp(audio->drm_dev,
+ "setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
safe_to_exit_level = 14;
break;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 6d36f63c3338..d030a93a08c3 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -34,6 +34,7 @@ struct dp_aux_private {
bool no_send_addr;
bool no_send_stop;
bool initted;
+ bool is_edp;
u32 offset;
u32 segment;
@@ -337,6 +338,22 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
goto exit;
}
+ /*
+ * For eDP it's important to give a reasonably long wait here for HPD
+ * to be asserted. This is because the panel driver may have _just_
+ * turned on the panel and then tried to do an AUX transfer. The panel
+ * driver has no way of knowing when the panel is ready, so it's up
+ * to us to wait. For DP we never get into this situation so let's
+ * avoid ever doing the extra long wait for DP.
+ */
+ if (aux->is_edp) {
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ if (ret) {
+ DRM_DEBUG_DP("Panel not ready for aux transactions\n");
+ goto exit;
+ }
+ }
+
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@@ -491,7 +508,8 @@ void dp_aux_unregister(struct drm_dp_aux *dp_aux)
drm_dp_aux_unregister(dp_aux);
}
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ bool is_edp)
{
struct dp_aux_private *aux;
@@ -506,6 +524,7 @@ struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
init_completion(&aux->comp);
aux->cmd_busy = false;
+ aux->is_edp = is_edp;
mutex_init(&aux->mutex);
aux->dev = dev;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index c64951215ab5..e930974bcb5b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,7 +16,8 @@ void dp_aux_init(struct drm_dp_aux *dp_aux);
void dp_aux_deinit(struct drm_dp_aux *dp_aux);
void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
-struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog);
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog,
+ bool is_edp);
void dp_aux_put(struct drm_dp_aux *aux);
#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index b5dd0240d1dc..7257515871a9 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -24,6 +24,8 @@
#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
+
#define DP_INTERRUPT_STATUS1 \
(DP_INTR_AUX_I2C_DONE| \
DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
@@ -47,6 +49,7 @@
struct dp_catalog_private {
struct device *dev;
+ struct drm_device *drm_dev;
struct dp_io *io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct dp_catalog dp_catalog;
@@ -80,7 +83,7 @@ static inline void dp_write_aux(struct dp_catalog_private *catalog,
writel(data, catalog->io->dp_controller.aux.base + offset);
}
-static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
+static inline u32 dp_read_ahb(const struct dp_catalog_private *catalog, u32 offset)
{
return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
}
@@ -242,6 +245,19 @@ void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
phy_calibrate(phy);
}
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
+{
+ u32 state;
+ struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ /* poll for hpd connected status every 2ms and timeout after 500ms */
+ return readl_poll_timeout(catalog->io->dp_controller.aux.base +
+ REG_DP_DP_HPD_INT_STATUS,
+ state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
+ 2000, 500000);
+}
+
static void dump_regs(void __iomem *base, int len)
{
int i;
@@ -322,7 +338,7 @@ void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+ drm_dbg_dp(catalog->drm_dev, "DP_CONFIGURATION_CTRL=0x%x\n", cfg);
dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
}
@@ -350,7 +366,7 @@ void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- DRM_DEBUG_DP("enable=%d\n", enable);
+ drm_dbg_dp(catalog->drm_dev, "enable=%d\n", enable);
if (enable) {
/*
* To make sure link reg writes happens before other operation,
@@ -395,7 +411,7 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
/* Configure clock to synchronous mode */
misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
- DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val);
+ drm_dbg_dp(catalog->drm_dev, "misc settings = 0x%x\n", misc_val);
dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
}
@@ -450,7 +466,7 @@ void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
if (link_rate_hbr3 == rate)
nvid *= 3;
- DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+ drm_dbg_dp(catalog->drm_dev, "mvid=0x%x, nvid=0x%x\n", mvid, nvid);
dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
@@ -465,7 +481,7 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
struct dp_catalog_private, dp_catalog);
bit = BIT(state_bit - 1);
- DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, state_bit);
+ drm_dbg_dp(catalog->drm_dev, "hw: bit=%d train=%d\n", bit, state_bit);
dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
bit = BIT(state_bit - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
@@ -483,6 +499,22 @@ int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog,
}
/**
+ * dp_catalog_hw_revision() - retrieve DP hw revision
+ *
+ * @dp_catalog: DP catalog structure
+ *
+ * Return: DP controller hw revision
+ *
+ */
+u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog)
+{
+ const struct dp_catalog_private *catalog = container_of(dp_catalog,
+ struct dp_catalog_private, dp_catalog);
+
+ return dp_read_ahb(catalog, REG_DP_HW_VERSION);
+}
+
+/**
* dp_catalog_ctrl_reset() - reset DP controller
*
* @dp_catalog: DP catalog structure
@@ -557,7 +589,8 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
config = (en ? config | intr_mask : config & ~intr_mask);
- DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config);
+ drm_dbg_dp(catalog->drm_dev, "intr_mask=%#x config=%#x\n",
+ intr_mask, config);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
config & DP_DP_HPD_INT_MASK);
}
@@ -569,10 +602,6 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
- /* enable HPD plug and unplug interrupts */
- dp_catalog_hpd_config_intr(dp_catalog,
- DP_DP_HPD_PLUG_INT_MASK | DP_DP_HPD_UNPLUG_INT_MASK, true);
-
/* Configure REFTIMER and enable it */
reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
@@ -588,7 +617,7 @@ u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog)
u32 status;
status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
- DRM_DEBUG_DP("aux status: %#x\n", status);
+ drm_dbg_dp(catalog->drm_dev, "aux status: %#x\n", status);
status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
@@ -599,13 +628,21 @@ u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- int isr = 0;
+ int isr, mask;
isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
(isr & DP_DP_HPD_INT_MASK));
+ mask = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
- return isr;
+ /*
+ * We only want to return interrupts that are unmasked to the caller.
+ * However, the interrupt status field also contains other
+ * informational bits about the HPD state status, so we only mask
+ * out the part of the register that tells us about which interrupts
+ * are pending.
+ */
+ return isr & (mask | ~DP_DP_HPD_INT_MASK);
}
int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
@@ -664,7 +701,7 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
/* Make sure to clear the current pattern before starting a new one */
dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
- DRM_DEBUG_DP("pattern: %#x\n", pattern);
+ drm_dbg_dp(catalog->drm_dev, "pattern: %#x\n", pattern);
switch (pattern) {
case DP_PHY_TEST_PATTERN_D10_2:
dp_write_link(catalog, REG_DP_STATE_CTRL,
@@ -725,7 +762,8 @@ void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
break;
default:
- DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern);
+ drm_dbg_dp(catalog->drm_dev,
+ "No valid test pattern requested: %#x\n", pattern);
break;
}
}
@@ -743,6 +781,7 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ u32 reg;
dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
dp_catalog->total);
@@ -751,7 +790,18 @@ int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
dp_catalog->width_blanking);
dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
- dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
+
+ reg = dp_read_p0(catalog, MMSS_DP_INTF_CONFIG);
+
+ if (dp_catalog->wide_bus_en)
+ reg |= DP_INTF_CONFIG_DATABUS_WIDEN;
+ else
+ reg &= ~DP_INTF_CONFIG_DATABUS_WIDEN;
+
+
+ DRM_DEBUG_DP("wide_bus_en=%d reg=%#x\n", dp_catalog->wide_bus_en, reg);
+
+ dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, reg);
return 0;
}
@@ -820,7 +870,7 @@ void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
DP_BIST_ENABLE_DPBIST_EN);
dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
DP_TIMING_ENGINE_EN_EN);
- DRM_DEBUG_DP("%s: enabled tpg\n", __func__);
+ drm_dbg_dp(catalog->drm_dev, "%s: enabled tpg\n", __func__);
}
void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
@@ -909,7 +959,8 @@ void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
select = dp_catalog->audio_data;
acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
- DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl);
+ drm_dbg_dp(catalog->drm_dev, "select: %#x, acr_ctrl: %#x\n",
+ select, acr_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
}
@@ -934,7 +985,7 @@ void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
else
audio_ctrl &= ~BIT(0);
- DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl);
+ drm_dbg_dp(catalog->drm_dev, "dp_audio_cfg = 0x%x\n", audio_ctrl);
dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
/* make sure audio engine is disabled */
@@ -965,7 +1016,7 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
/* AUDIO_INFOFRAME_SDP_EN */
sdp_cfg |= BIT(20);
- DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg);
+ drm_dbg_dp(catalog->drm_dev, "sdp_cfg = 0x%x\n", sdp_cfg);
dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
@@ -975,7 +1026,7 @@ void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
/* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
sdp_cfg2 &= ~BIT(1);
- DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2);
+ drm_dbg_dp(catalog->drm_dev, "sdp_cfg2 = 0x%x\n", sdp_cfg2);
dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
@@ -1037,7 +1088,8 @@ void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
mainlink_levels &= 0xFE0;
mainlink_levels |= safe_to_exit_level;
- DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+ drm_dbg_dp(catalog->drm_dev,
+ "mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
mainlink_levels, safe_to_exit_level);
dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 7dea1012ae66..1f717f45c115 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -70,6 +70,7 @@ struct dp_catalog {
enum dp_catalog_audio_sdp_type sdp_type;
enum dp_catalog_audio_header_type sdp_header;
u32 audio_data;
+ bool wide_bus_en;
};
/* Debug module */
@@ -84,6 +85,7 @@ int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
/* DP Controller APIs */
@@ -95,6 +97,7 @@ void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
u32 stream_rate_khz, bool fixed_nvid);
int dp_catalog_ctrl_set_pattern_state_bit(struct dp_catalog *dp_catalog, u32 pattern);
+u32 dp_catalog_hw_revision(const struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index a96f6a8fa9bd..d21971baa24c 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -70,6 +70,7 @@ struct dp_vc_tu_mapping_table {
struct dp_ctrl_private {
struct dp_ctrl dp_ctrl;
+ struct drm_device *drm_dev;
struct device *dev;
struct drm_dp_aux *aux;
struct dp_panel *panel;
@@ -114,7 +115,7 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
pr_warn("PUSH_IDLE pattern timedout\n");
- DRM_DEBUG_DP("mainlink off done\n");
+ drm_dbg_dp(ctrl->drm_dev, "mainlink off\n");
}
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
@@ -603,8 +604,9 @@ static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
}
}
-static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
- struct dp_vc_tu_mapping_table *tu_table)
+static void _dp_ctrl_calc_tu(struct dp_ctrl_private *ctrl,
+ struct dp_tu_calc_input *in,
+ struct dp_vc_tu_mapping_table *tu_table)
{
struct tu_algo_data *tu;
int compare_result_1, compare_result_2;
@@ -687,8 +689,8 @@ static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
if (tu->dsc_en && compare_result_1 && compare_result_2) {
HBLANK_MARGIN += 4;
- DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n",
- HBLANK_MARGIN);
+ drm_dbg_dp(ctrl->drm_dev,
+ "increase HBLANK_MARGIN to %d\n", HBLANK_MARGIN);
}
tu_size_calc:
@@ -722,8 +724,10 @@ tu_size_calc:
tu->n_tus += 1;
tu->even_distribution_legacy = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
- DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n",
- tu->valid_boundary_link, tu->n_tus);
+
+ drm_dbg_dp(ctrl->drm_dev,
+ "n_sym = %d, num_of_tus = %d\n",
+ tu->valid_boundary_link, tu->n_tus);
temp1_fp = drm_fixp_from_fraction(tu->tu_size_desired, 1);
temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
@@ -916,19 +920,20 @@ tu_size_calc:
tu_table->lower_boundary_count = tu->lower_boundary_count;
tu_table->tu_size_minus1 = tu->tu_size_minus1;
- DRM_DEBUG_DP("TU: valid_boundary_link: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: valid_boundary_link: %d\n",
tu_table->valid_boundary_link);
- DRM_DEBUG_DP("TU: delay_start_link: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: delay_start_link: %d\n",
tu_table->delay_start_link);
- DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: boundary_moderation_en: %d\n",
tu_table->boundary_moderation_en);
- DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: valid_lower_boundary_link: %d\n",
tu_table->valid_lower_boundary_link);
- DRM_DEBUG_DP("TU: upper_boundary_count: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: upper_boundary_count: %d\n",
tu_table->upper_boundary_count);
- DRM_DEBUG_DP("TU: lower_boundary_count: %d\n",
+ drm_dbg_dp(ctrl->drm_dev, "TU: lower_boundary_count: %d\n",
tu_table->lower_boundary_count);
- DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
+ drm_dbg_dp(ctrl->drm_dev, "TU: tu_size_minus1: %d\n",
+ tu_table->tu_size_minus1);
kfree(tu);
}
@@ -954,7 +959,7 @@ static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
in.num_of_dsc_slices = 0;
in.compress_ratio = 100;
- _dp_ctrl_calc_tu(&in, tu_table);
+ _dp_ctrl_calc_tu(ctrl, &in, tu_table);
}
static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
@@ -1005,8 +1010,9 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
u32 voltage_swing_level = link->phy_params.v_level;
u32 pre_emphasis_level = link->phy_params.p_level;
- DRM_DEBUG_DP("voltage level: %d emphasis level: %d\n", voltage_swing_level,
- pre_emphasis_level);
+ drm_dbg_dp(ctrl->drm_dev,
+ "voltage level: %d emphasis level: %d\n",
+ voltage_swing_level, pre_emphasis_level);
ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
voltage_swing_level, pre_emphasis_level);
@@ -1014,13 +1020,15 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
return ret;
if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
- DRM_DEBUG_DP("max. voltage swing level reached %d\n",
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. voltage swing level reached %d\n",
voltage_swing_level);
max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
}
if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
- DRM_DEBUG_DP("max. pre-emphasis level reached %d\n",
+ drm_dbg_dp(ctrl->drm_dev,
+ "max. pre-emphasis level reached %d\n",
pre_emphasis_level);
max_level_reached |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
}
@@ -1032,8 +1040,8 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
buf[lane] = voltage_swing_level | pre_emphasis_level
| max_level_reached;
- DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level
- | pre_emphasis_level);
+ drm_dbg_dp(ctrl->drm_dev, "sink: p|v=0x%x\n",
+ voltage_swing_level | pre_emphasis_level);
ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
buf, lane_cnt);
if (ret == lane_cnt)
@@ -1048,7 +1056,7 @@ static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
u8 buf;
int ret = 0;
- DRM_DEBUG_DP("sink: pattern=%x\n", pattern);
+ drm_dbg_dp(ctrl->drm_dev, "sink: pattern=%x\n", pattern);
buf = pattern;
@@ -1119,8 +1127,6 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
old_v_level = ctrl->link->phy_params.v_level;
}
- DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n");
-
dp_link_adjust_levels(ctrl->link, link_status);
ret = dp_ctrl_update_vx_px(ctrl);
if (ret)
@@ -1151,8 +1157,10 @@ static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
break;
}
- if (!ret)
- DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
+ if (!ret) {
+ drm_dbg_dp(ctrl->drm_dev, "new rate=0x%x\n",
+ ctrl->link->link_params.rate);
+ }
return ret;
}
@@ -1271,7 +1279,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
}
/* print success info as this is a result of user initiated action */
- DRM_DEBUG_DP("link training #1 successful\n");
+ drm_dbg_dp(ctrl->drm_dev, "link training #1 successful\n");
ret = dp_ctrl_link_train_2(ctrl, training_step);
if (ret) {
@@ -1280,7 +1288,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
}
/* print success info as this is a result of user initiated action */
- DRM_DEBUG_DP("link training #2 successful\n");
+ drm_dbg_dp(ctrl->drm_dev, "link training #2 successful\n");
end:
dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
@@ -1320,7 +1328,8 @@ static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
cfg++;
}
- DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name);
+ drm_dbg_dp(ctrl->drm_dev, "setting rate=%lu on clk=%s\n",
+ rate, name);
if (num)
cfg->rate = rate;
@@ -1350,7 +1359,7 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
- DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
return ret;
@@ -1367,7 +1376,7 @@ static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
if (ret)
DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
- DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "link rate=%d pixel_clk=%d\n",
ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
return ret;
@@ -1397,7 +1406,8 @@ void dp_ctrl_phy_init(struct dp_ctrl *dp_ctrl)
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_init(phy);
- DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
+
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
@@ -1413,7 +1423,7 @@ void dp_ctrl_phy_exit(struct dp_ctrl *dp_ctrl)
dp_catalog_ctrl_phy_reset(ctrl->catalog);
phy_exit(phy);
- DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
}
@@ -1489,7 +1499,7 @@ static int dp_ctrl_deinitialize_mainlink(struct dp_ctrl_private *ctrl)
phy_exit(phy);
phy_init(phy);
- DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return 0;
}
@@ -1524,7 +1534,8 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
int ret = 0;
if (!ctrl->link->phy_params.phy_test_pattern_sel) {
- DRM_DEBUG_DP("no test pattern selected by sink\n");
+ drm_dbg_dp(ctrl->drm_dev,
+ "no test pattern selected by sink\n");
return ret;
}
@@ -1533,7 +1544,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
* running. Add the global reset just before disabling the
* link clocks and core clocks.
*/
- ret = dp_ctrl_off_link_stream(&ctrl->dp_ctrl);
+ ret = dp_ctrl_off(&ctrl->dp_ctrl);
if (ret) {
DRM_ERROR("failed to disable DP controller\n");
return ret;
@@ -1554,7 +1565,7 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
u32 pattern_sent = 0x0;
u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
- DRM_DEBUG_DP("request: 0x%x\n", pattern_requested);
+ drm_dbg_dp(ctrl->drm_dev, "request: 0x%x\n", pattern_requested);
if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
ctrl->link->phy_params.v_level,
@@ -1595,8 +1606,8 @@ static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
success = false;
}
- DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed",
- pattern_requested);
+ drm_dbg_dp(ctrl->drm_dev, "%s: test->0x%x\n",
+ success ? "success" : "failed", pattern_requested);
return success;
}
@@ -1614,7 +1625,7 @@ void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
sink_request = ctrl->link->sink_request;
if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
- DRM_DEBUG_DP("PHY_TEST_PATTERN request\n");
+ drm_dbg_dp(ctrl->drm_dev, "PHY_TEST_PATTERN request\n");
if (dp_ctrl_process_phy_test_request(ctrl)) {
DRM_ERROR("process phy_test_req failed\n");
return;
@@ -1686,7 +1697,8 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
- DRM_DEBUG_DP("using phy test link parameters\n");
+ drm_dbg_dp(ctrl->drm_dev,
+ "using phy test link parameters\n");
if (!ctrl->panel->dp_mode.drm_mode.clock)
ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
} else {
@@ -1696,12 +1708,10 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
}
- DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
- ctrl->link->link_params.rate,
- ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ ctrl->link->link_params.rate, ctrl->link->link_params.num_lanes,
+ ctrl->dp_ctrl.pixel_rate);
- ctrl->link->phy_params.p_level = 0;
- ctrl->link->phy_params.v_level = 0;
rc = dp_ctrl_enable_mainlink_clocks(ctrl);
if (rc)
@@ -1803,6 +1813,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
int ret = 0;
bool mainlink_ready = false;
struct dp_ctrl_private *ctrl;
+ unsigned long pixel_rate_orig;
if (!dp_ctrl)
return -EINVAL;
@@ -1811,7 +1822,11 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
- DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+ pixel_rate_orig = ctrl->dp_ctrl.pixel_rate;
+ if (dp_ctrl->wide_bus_en)
+ ctrl->dp_ctrl.pixel_rate >>= 1;
+
+ drm_dbg_dp(ctrl->drm_dev, "rate=%d, num_lanes=%d, pixel_rate=%d\n",
ctrl->link->link_params.rate,
ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
@@ -1823,12 +1838,6 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
}
}
- if (!dp_ctrl_channel_eq_ok(ctrl))
- dp_ctrl_link_retrain(ctrl);
-
- /* stop txing train pattern to end link training */
- dp_ctrl_clear_training_pattern(ctrl);
-
ret = dp_ctrl_enable_stream_clocks(ctrl);
if (ret) {
DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
@@ -1840,6 +1849,12 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
return 0;
}
+ if (!dp_ctrl_channel_eq_ok(ctrl))
+ dp_ctrl_link_retrain(ctrl);
+
+ /* stop txing train pattern to end link training */
+ dp_ctrl_clear_training_pattern(ctrl);
+
/*
* Set up transfer unit values and set controller state to send
* video.
@@ -1850,7 +1865,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
dp_catalog_ctrl_config_msa(ctrl->catalog,
ctrl->link->link_params.rate,
- ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
+ pixel_rate_orig, dp_ctrl_use_fixed_nvid(ctrl));
dp_ctrl_setup_tr_unit(ctrl);
@@ -1861,7 +1876,8 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
return ret;
mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
- DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+ drm_dbg_dp(ctrl->drm_dev,
+ "mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
end:
return ret;
@@ -1897,20 +1913,46 @@ int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl)
return ret;
}
- DRM_DEBUG_DP("Before, phy=%x init_count=%d power_on=%d\n",
- (u32)(uintptr_t)phy, phy->init_count, phy->power_count);
-
phy_power_off(phy);
/* aux channel down, reinit phy */
phy_exit(phy);
phy_init(phy);
- DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret;
}
+int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl)
+{
+ struct dp_ctrl_private *ctrl;
+ struct dp_io *dp_io;
+ struct phy *phy;
+ int ret;
+
+ ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+ dp_io = &ctrl->parser->io;
+ phy = dp_io->phy;
+
+ dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+ ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+ }
+
+ DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+
+ phy_power_off(phy);
+
+ DRM_DEBUG_DP("After, phy=%p init_count=%d power_on=%d\n",
+ phy, phy->init_count, phy->power_count);
+
+ return ret;
+}
+
int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
{
struct dp_ctrl_private *ctrl;
@@ -1939,7 +1981,7 @@ int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
}
phy_power_off(phy);
- DRM_DEBUG_DP("phy=%p init=%d power_on=%d\n",
+ drm_dbg_dp(ctrl->drm_dev, "phy=%p init=%d power_on=%d\n",
phy, phy->init_count, phy->power_count);
return ret;
@@ -1958,12 +2000,12 @@ void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
- DRM_DEBUG_DP("dp_video_ready\n");
+ drm_dbg_dp(ctrl->drm_dev, "dp_video_ready\n");
complete(&ctrl->video_comp);
}
if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
- DRM_DEBUG_DP("idle_patterns_sent\n");
+ drm_dbg_dp(ctrl->drm_dev, "idle_patterns_sent\n");
complete(&ctrl->idle_comp);
}
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 2433edbc70a6..0745fde01b45 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -17,11 +17,13 @@ struct dp_ctrl {
bool orientation;
atomic_t aborted;
u32 pixel_rate;
+ bool wide_bus_en;
};
int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl);
int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 2f9c943f12d5..5e35033ba3e4 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -44,8 +44,6 @@ static int dp_debug_show(struct seq_file *seq, void *p)
drm_mode = &debug->panel->dp_mode.drm_mode;
seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
- seq_printf(seq, "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
- debug->panel->max_pclk_khz);
seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
debug->panel->link_info.rate);
seq_printf(seq, "\t\tnum_lanes = %u\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a42732b67349..09174c2a9827 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -10,7 +10,7 @@
#include <linux/component.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
-#include <drm/drm_panel.h>
+#include <drm/display/drm_dp_aux_bus.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -42,7 +42,7 @@ enum {
/* event thread connection state */
enum {
ST_DISCONNECTED,
- ST_CONNECT_PENDING,
+ ST_MAINLINK_READY,
ST_CONNECTED,
ST_DISCONNECT_PENDING,
ST_DISPLAY_OFF,
@@ -57,14 +57,11 @@ enum {
EV_IRQ_HPD_INT,
EV_HPD_UNPLUG_INT,
EV_USER_NOTIFICATION,
- EV_CONNECT_PENDING_TIMEOUT,
- EV_DISCONNECT_PENDING_TIMEOUT,
};
#define EVENT_TIMEOUT (HZ/10) /* 100ms */
#define DP_EVENT_Q_MAX 8
-#define DP_TIMEOUT_5_SECOND (5000/EVENT_TIMEOUT)
#define DP_TIMEOUT_NONE 0
#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
@@ -87,6 +84,7 @@ struct dp_display_private {
bool hpd_irq_on;
bool audio_supported;
+ struct drm_device *drm_dev;
struct platform_device *pdev;
struct dentry *root;
@@ -113,15 +111,19 @@ struct dp_display_private {
u32 hpd_state;
u32 event_pndx;
u32 event_gndx;
+ struct task_struct *ev_tsk;
struct dp_event event_list[DP_EVENT_Q_MAX];
spinlock_t event_lock;
+ bool wide_bus_en;
+
struct dp_audio *audio;
};
struct msm_dp_desc {
phys_addr_t io_start;
unsigned int connector_type;
+ bool wide_bus_en;
};
struct msm_dp_config {
@@ -138,8 +140,8 @@ static const struct msm_dp_config sc7180_dp_cfg = {
static const struct msm_dp_config sc7280_dp_cfg = {
.descs = (const struct msm_dp_desc[]) {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
},
.num_descs = 2,
};
@@ -249,6 +251,8 @@ void dp_display_signal_audio_complete(struct msm_dp *dp_display)
complete_all(&dp->audio_comp);
}
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv);
+
static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
@@ -260,14 +264,14 @@ static int dp_display_bind(struct device *dev, struct device *master,
dp->dp_display.drm_dev = drm;
priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type);
+ rc = dp->parser->parse(dp->parser);
if (rc) {
DRM_ERROR("device tree parsing failed\n");
goto end;
}
- dp->dp_display.next_bridge = dp->parser->next_bridge;
+ dp->drm_dev = drm;
dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
@@ -282,9 +286,18 @@ static int dp_display_bind(struct device *dev, struct device *master,
}
rc = dp_register_audio_driver(dev, dp->audio);
- if (rc)
+ if (rc) {
DRM_ERROR("Audio registration Dp failed\n");
+ goto end;
+ }
+
+ rc = dp_hpd_event_thread_start(dp);
+ if (rc) {
+ DRM_ERROR("Event thread create failed\n");
+ goto end;
+ }
+ return 0;
end:
return rc;
}
@@ -295,6 +308,11 @@ static void dp_display_unbind(struct device *dev, struct device *master,
struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv = dev_get_drvdata(master);
+ /* disable all HPD interrupts */
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+
+ kthread_stop(dp->ev_tsk);
+
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
priv->dp[dp->id] = NULL;
@@ -313,7 +331,8 @@ static bool dp_display_is_ds_bridge(struct dp_panel *panel)
static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
{
- DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
+ drm_dbg_dp(dp->drm_dev, "present=%#x sink_count=%d\n",
+ dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
dp->link->sink_count);
return dp_display_is_ds_bridge(dp->panel) &&
(dp->link->sink_count == 0);
@@ -336,7 +355,8 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
{
if ((hpd && dp->dp_display.is_connected) ||
(!hpd && !dp->dp_display.is_connected)) {
- DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
return 0;
}
@@ -346,7 +366,8 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
dp->dp_display.is_connected = hpd;
- DRM_DEBUG_DP("hpd=%d\n", hpd);
+ drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
+ dp->dp_display.connector_type, hpd);
dp_display_send_hpd_event(&dp->dp_display);
return 0;
@@ -370,7 +391,6 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
dp->audio_supported = drm_detect_monitor_audio(edid);
dp_panel_handle_sink_request(dp->panel);
- dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
/*
@@ -394,7 +414,7 @@ end:
static void dp_display_host_phy_init(struct dp_display_private *dp)
{
- DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
@@ -406,7 +426,7 @@ static void dp_display_host_phy_init(struct dp_display_private *dp)
static void dp_display_host_phy_exit(struct dp_display_private *dp)
{
- DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
@@ -418,7 +438,7 @@ static void dp_display_host_phy_exit(struct dp_display_private *dp)
static void dp_display_host_init(struct dp_display_private *dp)
{
- DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
@@ -430,7 +450,7 @@ static void dp_display_host_init(struct dp_display_private *dp)
static void dp_display_host_deinit(struct dp_display_private *dp)
{
- DRM_DEBUG_DP("type=%d core_init=%d phy_init=%d\n",
+ drm_dbg_dp(dp->drm_dev, "type=%d core_init=%d phy_init=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized);
@@ -451,6 +471,11 @@ static int dp_display_usbpd_configure_cb(struct device *dev)
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
+ return 0;
+}
+
+static int dp_display_notify_disconnect(struct device *dev)
+{
struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
@@ -471,14 +496,14 @@ static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
int rc = 0;
if (dp_display_is_sink_count_zero(dp)) {
- DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+ drm_dbg_dp(dp->drm_dev, "sink count is zero, nothing to do\n");
if (dp->hpd_state != ST_DISCONNECTED) {
dp->hpd_state = ST_DISCONNECT_PENDING;
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
}
} else {
if (dp->hpd_state == ST_DISCONNECTED) {
- dp->hpd_state = ST_CONNECT_PENDING;
+ dp->hpd_state = ST_MAINLINK_READY;
rc = dp_display_process_hpd_high(dp);
if (rc)
dp->hpd_state = ST_DISCONNECTED;
@@ -492,10 +517,11 @@ static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
{
u32 sink_request = dp->link->sink_request;
- DRM_DEBUG_DP("%d\n", sink_request);
+ drm_dbg_dp(dp->drm_dev, "%d\n", sink_request);
if (dp->hpd_state == ST_DISCONNECTED) {
if (sink_request & DP_LINK_STATUS_UPDATED) {
- DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request);
+ drm_dbg_dp(dp->drm_dev, "Disconnected sink_request: %d\n",
+ sink_request);
DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
return -EINVAL;
}
@@ -519,7 +545,8 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
rc = dp_link_process_request(dp->link);
if (!rc) {
sink_request = dp->link->sink_request;
- DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request);
+ drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
+ dp->hpd_state, sink_request);
if (sink_request & DS_PORT_STATUS_CHANGED)
rc = dp_display_handle_port_ststus_changed(dp);
else
@@ -533,7 +560,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
{
struct dp_usbpd *hpd = dp->usbpd;
u32 state;
- u32 tout = DP_TIMEOUT_5_SECOND;
int ret;
if (!hpd)
@@ -542,7 +568,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
@@ -550,7 +576,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) {
+ if (state == ST_MAINLINK_READY || state == ST_CONNECTED) {
mutex_unlock(&dp->event_mutex);
return 0;
}
@@ -562,21 +588,18 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- dp->hpd_state = ST_CONNECT_PENDING;
-
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
if (ret) { /* link train failed */
dp->hpd_state = ST_DISCONNECTED;
} else {
- /* start sentinel checking in case of missing uevent */
- dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+ dp->hpd_state = ST_MAINLINK_READY;
}
/* enable HDP irq_hpd/replug interrupt */
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
- DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
@@ -593,23 +616,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
static int dp_display_enable(struct dp_display_private *dp, u32 data);
static int dp_display_disable(struct dp_display_private *dp, u32 data);
-static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
-{
- u32 state;
-
- mutex_lock(&dp->event_mutex);
-
- state = dp->hpd_state;
- if (state == ST_CONNECT_PENDING) {
- dp->hpd_state = ST_CONNECTED;
- DRM_DEBUG_DP("type=%d\n", dp->dp_display.connector_type);
- }
-
- mutex_unlock(&dp->event_mutex);
-
- return 0;
-}
-
static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
bool plugged)
{
@@ -636,7 +642,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
state = dp->hpd_state;
- DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
/* disable irq_hpd/replug interrupts */
@@ -651,24 +657,21 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
if (dp->link->sink_count == 0) {
dp_display_host_phy_exit(dp);
}
+ dp_display_notify_disconnect(&dp->pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
- }
-
- if (state == ST_DISCONNECT_PENDING) {
+ } else if (state == ST_DISCONNECT_PENDING) {
mutex_unlock(&dp->event_mutex);
return 0;
- }
-
- if (state == ST_CONNECT_PENDING) {
- /* wait until CONNECTED */
- dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */
+ } else if (state == ST_MAINLINK_READY) {
+ dp_ctrl_off_link(dp->ctrl);
+ dp_display_host_phy_exit(dp);
+ dp->hpd_state = ST_DISCONNECTED;
+ dp_display_notify_disconnect(&dp->pdev->dev);
mutex_unlock(&dp->event_mutex);
return 0;
}
- dp->hpd_state = ST_DISCONNECT_PENDING;
-
/* disable HPD plug interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
@@ -676,18 +679,22 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
*/
- dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
+ dp_display_notify_disconnect(&dp->pdev->dev);
- /* start sentinel checking in case of missing uevent */
- dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+ if (state == ST_DISPLAY_OFF) {
+ dp->hpd_state = ST_DISCONNECTED;
+ } else {
+ dp->hpd_state = ST_DISCONNECT_PENDING;
+ }
/* signal the disconnect event early to ensure proper teardown */
dp_display_handle_plugged_change(&dp->dp_display, false);
/* enable HDP plug interrupt to prepare for next plugin */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
- DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
/* uevent will complete disconnection part */
@@ -695,23 +702,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data)
-{
- u32 state;
-
- mutex_lock(&dp->event_mutex);
-
- state = dp->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
- dp->hpd_state = ST_DISCONNECTED;
- DRM_DEBUG_DP("type=%d\n", dp->dp_display.connector_type);
- }
-
- mutex_unlock(&dp->event_mutex);
-
- return 0;
-}
-
static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
{
u32 state;
@@ -720,7 +710,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
/* irq_hpd can happen at either connected or disconnected state */
state = dp->hpd_state;
- DRM_DEBUG_DP("Before, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
@@ -728,14 +718,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
- if (state == ST_CONNECT_PENDING) {
- /* wait until ST_CONNECTED */
- dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
- mutex_unlock(&dp->event_mutex);
- return 0;
- }
-
- if (state == ST_CONNECT_PENDING || state == ST_DISCONNECT_PENDING) {
+ if (state == ST_MAINLINK_READY || state == ST_DISCONNECT_PENDING) {
/* wait until ST_CONNECTED */
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
@@ -744,7 +727,7 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
dp_display_usbpd_attention_cb(&dp->pdev->dev);
- DRM_DEBUG_DP("After, type=%d hpd_state=%d\n",
+ drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
@@ -806,7 +789,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error;
}
- dp->aux = dp_aux_get(dev, dp->catalog);
+ dp->aux = dp_aux_get(dev, dp->catalog, dp->dp_display.is_edp);
if (IS_ERR(dp->aux)) {
rc = PTR_ERR(dp->aux);
DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
@@ -851,6 +834,10 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
goto error_ctrl;
}
+ /* populate wide_bus_en to differernt layers */
+ dp->ctrl->wide_bus_en = dp->wide_bus_en;
+ dp->catalog->wide_bus_en = dp->wide_bus_en;
+
return rc;
error_ctrl:
@@ -885,9 +872,9 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data)
int rc = 0;
struct msm_dp *dp_display = &dp->dp_display;
- DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
+ drm_dbg_dp(dp->drm_dev, "sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
- DRM_DEBUG_DP("Link already setup, return\n");
+ drm_dbg_dp(dp->drm_dev, "Link already setup, return\n");
return 0;
}
@@ -952,7 +939,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
dp_display->power_on = false;
- DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count);
+ drm_dbg_dp(dp->drm_dev, "sink count: %d\n", dp->link->sink_count);
return 0;
}
@@ -974,18 +961,42 @@ int dp_display_set_plugged_cb(struct msm_dp *dp_display,
return 0;
}
-int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz)
+/**
+ * dp_bridge_mode_valid - callback to determine if specified mode is valid
+ * @bridge: Pointer to drm bridge structure
+ * @info: display info
+ * @mode: Pointer to drm mode structure
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
{
const u32 num_components = 3, default_bpp = 24;
struct dp_display_private *dp_display;
struct dp_link_info *link_info;
u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+ struct msm_dp *dp;
+ int mode_pclk_khz = mode->clock;
+
+ dp = to_dp_bridge(bridge)->dp_display;
if (!dp || !mode_pclk_khz || !dp->connector) {
DRM_ERROR("invalid params\n");
return -EINVAL;
}
+ /*
+ * The eDP controller currently does not have a reliable way of
+ * enabling panel power to read sink capabilities. So, we rely
+ * on the panel driver to populate only supported modes for now.
+ */
+ if (dp->is_edp)
+ return MODE_OK;
+
+ if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+ return MODE_BAD;
+
dp_display = container_of(dp, struct dp_display_private, dp_display);
link_info = &dp_display->panel->link_info;
@@ -1005,11 +1016,9 @@ int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz)
return MODE_OK;
}
-int dp_display_get_modes(struct msm_dp *dp,
- struct dp_display_mode *dp_mode)
+int dp_display_get_modes(struct msm_dp *dp)
{
struct dp_display_private *dp_display;
- int ret = 0;
if (!dp) {
DRM_ERROR("invalid params\n");
@@ -1018,11 +1027,8 @@ int dp_display_get_modes(struct msm_dp *dp,
dp_display = container_of(dp, struct dp_display_private, dp_display);
- ret = dp_panel_get_modes(dp_display->panel,
- dp->connector, dp_mode);
- if (dp_mode->drm_mode.clock)
- dp->max_pclk_khz = dp_mode->drm_mode.clock;
- return ret;
+ return dp_panel_get_modes(dp_display->panel,
+ dp->connector);
}
bool dp_display_check_video_test(struct msm_dp *dp)
@@ -1080,6 +1086,13 @@ static void dp_display_config_hpd(struct dp_display_private *dp)
dp_display_host_init(dp);
dp_catalog_ctrl_hpd_config(dp->catalog);
+ /* Enable plug and unplug interrupts only for external DisplayPort */
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_HPD_UNPLUG_INT_MASK,
+ true);
+
/* Enable interrupt first time
* we are leaving dp clocks on during disconnect
* and never disable interrupt
@@ -1099,12 +1112,17 @@ static int hpd_event_thread(void *data)
while (1) {
if (timeout_mode) {
wait_event_timeout(dp_priv->event_q,
- (dp_priv->event_pndx == dp_priv->event_gndx),
- EVENT_TIMEOUT);
+ (dp_priv->event_pndx == dp_priv->event_gndx) ||
+ kthread_should_stop(), EVENT_TIMEOUT);
} else {
wait_event_interruptible(dp_priv->event_q,
- (dp_priv->event_pndx != dp_priv->event_gndx));
+ (dp_priv->event_pndx != dp_priv->event_gndx) ||
+ kthread_should_stop());
}
+
+ if (kthread_should_stop())
+ break;
+
spin_lock_irqsave(&dp_priv->event_lock, flag);
todo = &dp_priv->event_list[dp_priv->event_gndx];
if (todo->delay) {
@@ -1158,14 +1176,6 @@ static int hpd_event_thread(void *data)
dp_display_send_hpd_notification(dp_priv,
todo->data);
break;
- case EV_CONNECT_PENDING_TIMEOUT:
- dp_connect_pending_timeout(dp_priv,
- todo->data);
- break;
- case EV_DISCONNECT_PENDING_TIMEOUT:
- dp_disconnect_pending_timeout(dp_priv,
- todo->data);
- break;
default:
break;
}
@@ -1174,12 +1184,17 @@ static int hpd_event_thread(void *data)
return 0;
}
-static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+static int dp_hpd_event_thread_start(struct dp_display_private *dp_priv)
{
- init_waitqueue_head(&dp_priv->event_q);
- spin_lock_init(&dp_priv->event_lock);
+ /* set event q to empty */
+ dp_priv->event_gndx = 0;
+ dp_priv->event_pndx = 0;
+
+ dp_priv->ev_tsk = kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ if (IS_ERR(dp_priv->ev_tsk))
+ return PTR_ERR(dp_priv->ev_tsk);
- kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+ return 0;
}
static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
@@ -1196,15 +1211,13 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
if (hpd_isr_status & 0x0F) {
- DRM_DEBUG_DP("type=%d isr=0x%x\n",
+ drm_dbg_dp(dp->drm_dev, "type=%d isr=0x%x\n",
dp->dp_display.connector_type, hpd_isr_status);
/* hpd related interrupts */
if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
- /* stop sentinel connect pending checking */
- dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
}
@@ -1239,10 +1252,9 @@ int dp_display_request_irq(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
- if (dp->irq < 0) {
- rc = dp->irq;
- DRM_ERROR("failed to get irq: %d\n", rc);
- return rc;
+ if (!dp->irq) {
+ DRM_ERROR("failed to get irq\n");
+ return -EINVAL;
}
rc = devm_request_irq(&dp->pdev->dev, dp->irq,
@@ -1302,6 +1314,9 @@ static int dp_display_probe(struct platform_device *pdev)
dp->pdev = pdev;
dp->name = "drm_dp";
dp->dp_display.connector_type = desc->connector_type;
+ dp->wide_bus_en = desc->wide_bus_en;
+ dp->dp_display.is_edp =
+ (dp->dp_display.connector_type == DRM_MODE_CONNECTOR_eDP);
rc = dp_init_sub_modules(dp);
if (rc) {
@@ -1309,7 +1324,10 @@ static int dp_display_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ /* setup event q */
mutex_init(&dp->event_mutex);
+ init_waitqueue_head(&dp->event_q);
+ spin_lock_init(&dp->event_lock);
/* Store DP audio handle inside DP display */
dp->dp_display.dp_audio = dp->audio;
@@ -1350,7 +1368,8 @@ static int dp_pm_resume(struct device *dev)
mutex_lock(&dp->event_mutex);
- DRM_DEBUG_DP("Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ drm_dbg_dp(dp->drm_dev,
+ "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
@@ -1363,6 +1382,12 @@ static int dp_pm_resume(struct device *dev)
dp_catalog_ctrl_hpd_config(dp->catalog);
+ if (!dp->dp_display.is_edp)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_HPD_PLUG_INT_MASK |
+ DP_DP_HPD_UNPLUG_INT_MASK,
+ true);
+
if (dp_catalog_link_is_connected(dp->catalog)) {
/*
* set sink to normal operation mode -- D0
@@ -1391,8 +1416,8 @@ static int dp_pm_resume(struct device *dev)
dp_display_handle_plugged_change(dp_display, false);
}
- DRM_DEBUG_DP("After, type=%d sink_count=%d is_connected=%d \
- core_inited=%d phy_inited=%d power_on=%d\n",
+ drm_dbg_dp(dp->drm_dev,
+ "After, type=%d sink=%d conn=%d core_init=%d phy_init=%d power=%d\n",
dp->dp_display.connector_type, dp->link->sink_count,
dp->dp_display.is_connected, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
@@ -1412,7 +1437,8 @@ static int dp_pm_suspend(struct device *dev)
mutex_lock(&dp->event_mutex);
- DRM_DEBUG_DP("Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ drm_dbg_dp(dp->drm_dev,
+ "Before, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
@@ -1427,7 +1453,8 @@ static int dp_pm_suspend(struct device *dev)
dp->hpd_state = ST_SUSPENDED;
- DRM_DEBUG_DP("After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
+ drm_dbg_dp(dp->drm_dev,
+ "After, type=%d core_inited=%d phy_inited=%d power_on=%d\n",
dp->dp_display.connector_type, dp->core_initialized,
dp->phy_initialized, dp_display->power_on);
@@ -1489,9 +1516,17 @@ void msm_dp_irq_postinstall(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_hpd_event_setup(dp);
+ if (!dp_display->is_edp)
+ dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+}
+
+bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
- dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+ return dp->wide_bus_en;
}
void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
@@ -1513,6 +1548,64 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
}
}
+static int dp_display_get_next_bridge(struct msm_dp *dp)
+{
+ int rc;
+ struct dp_display_private *dp_priv;
+ struct device_node *aux_bus;
+ struct device *dev;
+
+ dp_priv = container_of(dp, struct dp_display_private, dp_display);
+ dev = &dp_priv->pdev->dev;
+ aux_bus = of_get_child_by_name(dev->of_node, "aux-bus");
+
+ if (aux_bus && dp->is_edp) {
+ dp_display_host_init(dp_priv);
+ dp_catalog_ctrl_hpd_config(dp_priv->catalog);
+ dp_display_host_phy_init(dp_priv);
+ enable_irq(dp_priv->irq);
+
+ /*
+ * The code below assumes that the panel will finish probing
+ * by the time devm_of_dp_aux_populate_ep_devices() returns.
+ * This isn't a great assumption since it will fail if the
+ * panel driver is probed asynchronously but is the best we
+ * can do without a bigger driver reorganization.
+ */
+ rc = devm_of_dp_aux_populate_ep_devices(dp_priv->aux);
+ of_node_put(aux_bus);
+ if (rc)
+ goto error;
+ } else if (dp->is_edp) {
+ DRM_ERROR("eDP aux_bus not found\n");
+ return -ENODEV;
+ }
+
+ /*
+ * External bridges are mandatory for eDP interfaces: one has to
+ * provide at least an eDP panel (which gets wrapped into panel-bridge).
+ *
+ * For DisplayPort interfaces external bridges are optional, so
+ * silently ignore an error if one is not present (-ENODEV).
+ */
+ rc = dp_parser_find_next_bridge(dp_priv->parser);
+ if (!dp->is_edp && rc == -ENODEV)
+ return 0;
+
+ if (!rc) {
+ dp->next_bridge = dp_priv->parser->next_bridge;
+ return 0;
+ }
+
+error:
+ if (dp->is_edp) {
+ disable_irq(dp_priv->irq);
+ dp_display_host_phy_exit(dp_priv);
+ dp_display_host_deinit(dp_priv);
+ }
+ return rc;
+}
+
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
@@ -1536,20 +1629,11 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
dp_display->encoder = encoder;
- dp_display->connector = dp_drm_connector_init(dp_display);
- if (IS_ERR(dp_display->connector)) {
- ret = PTR_ERR(dp_display->connector);
- DRM_DEV_ERROR(dev->dev,
- "failed to create dp connector: %d\n", ret);
- dp_display->connector = NULL;
+ ret = dp_display_get_next_bridge(dp_display);
+ if (ret)
return ret;
- }
-
- dp_priv->panel->connector = dp_display->connector;
- priv->connectors[priv->num_connectors++] = dp_display->connector;
-
- dp_display->bridge = msm_dp_bridge_init(dp_display, dev, encoder);
+ dp_display->bridge = dp_bridge_init(dp_display, dev, encoder);
if (IS_ERR(dp_display->bridge)) {
ret = PTR_ERR(dp_display->bridge);
DRM_DEV_ERROR(dev->dev,
@@ -1560,11 +1644,24 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
priv->bridges[priv->num_bridges++] = dp_display->bridge;
+ dp_display->connector = dp_drm_connector_init(dp_display);
+ if (IS_ERR(dp_display->connector)) {
+ ret = PTR_ERR(dp_display->connector);
+ DRM_DEV_ERROR(dev->dev,
+ "failed to create dp connector: %d\n", ret);
+ dp_display->connector = NULL;
+ return ret;
+ }
+
+ dp_priv->panel->connector = dp_display->connector;
+
return 0;
}
-int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+void dp_bridge_enable(struct drm_bridge *drm_bridge)
{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
int rc = 0;
struct dp_display_private *dp_display;
u32 state;
@@ -1572,26 +1669,32 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
dp_display = container_of(dp, struct dp_display_private, dp_display);
if (!dp_display->dp_mode.drm_mode.clock) {
DRM_ERROR("invalid params\n");
- return -EINVAL;
+ return;
}
+ if (dp->is_edp)
+ dp_hpd_plug_handle(dp_display, 0);
+
mutex_lock(&dp_display->event_mutex);
- /* stop sentinel checking */
- dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
+ state = dp_display->hpd_state;
+ if (state != ST_DISPLAY_OFF && state != ST_MAINLINK_READY) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
rc = dp_display_set_mode(dp, &dp_display->dp_mode);
if (rc) {
DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
mutex_unlock(&dp_display->event_mutex);
- return rc;
+ return;
}
rc = dp_display_prepare(dp);
if (rc) {
DRM_ERROR("DP display prepare failed, rc=%d\n", rc);
mutex_unlock(&dp_display->event_mutex);
- return rc;
+ return;
}
state = dp_display->hpd_state;
@@ -1615,34 +1718,41 @@ int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
/* completed connection */
dp_display->hpd_state = ST_CONNECTED;
+ drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
mutex_unlock(&dp_display->event_mutex);
-
- return rc;
}
-int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+void dp_bridge_disable(struct drm_bridge *drm_bridge)
{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
dp_ctrl_push_idle(dp_display->ctrl);
-
- return 0;
}
-int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
int rc = 0;
u32 state;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
+ if (dp->is_edp)
+ dp_hpd_unplug_handle(dp_display, 0);
+
mutex_lock(&dp_display->event_mutex);
- /* stop sentinel checking */
- dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
+ state = dp_display->hpd_state;
+ if (state != ST_DISCONNECT_PENDING && state != ST_CONNECTED) {
+ mutex_unlock(&dp_display->event_mutex);
+ return;
+ }
dp_display_disable(dp_display, 0);
@@ -1658,14 +1768,16 @@ int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
dp_display->hpd_state = ST_DISPLAY_OFF;
}
+ drm_dbg_dp(dp->drm_dev, "type=%d Done\n", dp->connector_type);
mutex_unlock(&dp_display->event_mutex);
- return rc;
}
-void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
+void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(drm_bridge);
+ struct msm_dp *dp = dp_bridge->dp_display;
struct dp_display_private *dp_display;
dp_display = container_of(dp, struct dp_display_private, dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 7af2b186d2d9..4f9fe4d7610b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -21,10 +21,11 @@ struct msm_dp {
bool audio_enabled;
bool power_on;
unsigned int connector_type;
+ bool is_edp;
hdmi_codec_plugged_cb plugged_cb;
- u32 max_pclk_khz;
+ bool wide_bus_en;
u32 max_dp_lanes;
struct dp_audio *dp_audio;
@@ -32,9 +33,7 @@ struct msm_dp {
int dp_display_set_plugged_cb(struct msm_dp *dp_display,
hdmi_codec_plugged_cb fn, struct device *codec_dev);
-int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz);
-int dp_display_get_modes(struct msm_dp *dp_display,
- struct dp_display_mode *dp_mode);
+int dp_display_get_modes(struct msm_dp *dp_display);
int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 80f59cf99089..62d58b9c4647 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -6,40 +6,25 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "dp_drm.h"
-
-struct msm_dp_bridge {
- struct drm_bridge bridge;
- struct msm_dp *dp_display;
-};
-
-#define to_dp_display(x) container_of((x), struct msm_dp_bridge, bridge)
-
-struct dp_connector {
- struct drm_connector base;
- struct msm_dp *dp_display;
-};
-#define to_dp_connector(x) container_of(x, struct dp_connector, base)
-
/**
- * dp_connector_detect - callback to determine if connector is connected
- * @conn: Pointer to drm connector structure
- * @force: Force detect setting from drm framework
- * Returns: Connector 'is connected' status
+ * dp_bridge_detect - callback to determine if connector is connected
+ * @bridge: Pointer to drm bridge structure
+ * Returns: Bridge's 'is connected' status
*/
-static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
- bool force)
+static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge)
{
struct msm_dp *dp;
- dp = to_dp_connector(conn)->dp_display;
+ dp = to_dp_bridge(bridge)->dp_display;
- DRM_DEBUG_DP("is_connected = %s\n",
+ drm_dbg_dp(dp->drm_dev, "is_connected = %s\n",
(dp->is_connected) ? "true" : "false");
return (dp->is_connected) ? connector_status_connected :
@@ -47,173 +32,45 @@ static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
}
/**
- * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @bridge: Poiner to drm bridge
* @connector: Pointer to drm connector structure
* Returns: Number of modes added
*/
-static int dp_connector_get_modes(struct drm_connector *connector)
+static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector)
{
int rc = 0;
struct msm_dp *dp;
- struct dp_display_mode *dp_mode = NULL;
- struct drm_display_mode *m, drm_mode;
if (!connector)
return 0;
- dp = to_dp_connector(connector)->dp_display;
-
- dp_mode = kzalloc(sizeof(*dp_mode), GFP_KERNEL);
- if (!dp_mode)
- return 0;
+ dp = to_dp_bridge(bridge)->dp_display;
/* pluggable case assumes EDID is read when HPD */
if (dp->is_connected) {
- /*
- *The get_modes() function might return one mode that is stored
- * in dp_mode when compliance test is in progress. If not, the
- * return value is equal to the total number of modes supported
- * by the sink
- */
- rc = dp_display_get_modes(dp, dp_mode);
+ rc = dp_display_get_modes(dp);
if (rc <= 0) {
DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
- kfree(dp_mode);
return rc;
}
- if (dp_mode->drm_mode.clock) { /* valid DP mode */
- memset(&drm_mode, 0x0, sizeof(drm_mode));
- drm_mode_copy(&drm_mode, &dp_mode->drm_mode);
- m = drm_mode_duplicate(connector->dev, &drm_mode);
- if (!m) {
- DRM_ERROR("failed to add mode %ux%u\n",
- drm_mode.hdisplay,
- drm_mode.vdisplay);
- kfree(dp_mode);
- return 0;
- }
- drm_mode_probed_add(connector, m);
- }
} else {
- DRM_DEBUG_DP("No sink connected\n");
+ drm_dbg_dp(connector->dev, "No sink connected\n");
}
- kfree(dp_mode);
return rc;
}
-/**
- * dp_connector_mode_valid - callback to determine if specified mode is valid
- * @connector: Pointer to drm connector structure
- * @mode: Pointer to drm mode structure
- * Returns: Validity status for specified mode
- */
-static enum drm_mode_status dp_connector_mode_valid(
- struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct msm_dp *dp_disp;
-
- dp_disp = to_dp_connector(connector)->dp_display;
-
- if ((dp_disp->max_pclk_khz <= 0) ||
- (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) ||
- (mode->clock > dp_disp->max_pclk_khz))
- return MODE_BAD;
-
- return dp_display_validate_mode(dp_disp, mode->clock);
-}
-
-static const struct drm_connector_funcs dp_connector_funcs = {
- .detect = dp_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs dp_connector_helper_funcs = {
- .get_modes = dp_connector_get_modes,
- .mode_valid = dp_connector_mode_valid,
-};
-
-/* connector initialization */
-struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
-{
- struct drm_connector *connector = NULL;
- struct dp_connector *dp_connector;
- int ret;
-
- dp_connector = devm_kzalloc(dp_display->drm_dev->dev,
- sizeof(*dp_connector),
- GFP_KERNEL);
- if (!dp_connector)
- return ERR_PTR(-ENOMEM);
-
- dp_connector->dp_display = dp_display;
-
- connector = &dp_connector->base;
-
- ret = drm_connector_init(dp_display->drm_dev, connector,
- &dp_connector_funcs,
- dp_display->connector_type);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &dp_connector_helper_funcs);
-
- /*
- * Enable HPD to let hpd event is handled when cable is connected.
- */
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_attach_encoder(connector, dp_display->encoder);
-
- return connector;
-}
-
-static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
-
- msm_dp_display_mode_set(dp_display, drm_bridge->encoder, mode, adjusted_mode);
-}
-
-static void dp_bridge_enable(struct drm_bridge *drm_bridge)
-{
- struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
-
- msm_dp_display_enable(dp_display, drm_bridge->encoder);
-}
-
-static void dp_bridge_disable(struct drm_bridge *drm_bridge)
-{
- struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
-
- msm_dp_display_pre_disable(dp_display, drm_bridge->encoder);
-}
-
-static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
-{
- struct msm_dp_bridge *dp_bridge = to_dp_display(drm_bridge);
- struct msm_dp *dp_display = dp_bridge->dp_display;
-
- msm_dp_display_disable(dp_display, drm_bridge->encoder);
-}
-
static const struct drm_bridge_funcs dp_bridge_ops = {
.enable = dp_bridge_enable,
.disable = dp_bridge_disable,
.post_disable = dp_bridge_post_disable,
.mode_set = dp_bridge_mode_set,
+ .mode_valid = dp_bridge_mode_valid,
+ .get_modes = dp_bridge_get_modes,
+ .detect = dp_bridge_detect,
};
-struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
{
int rc;
@@ -228,11 +85,33 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi
bridge = &dp_bridge->bridge;
bridge->funcs = &dp_bridge_ops;
- bridge->encoder = encoder;
+ bridge->type = dp_display->connector_type;
+
+ /*
+ * Many ops only make sense for DP. Why?
+ * - Detect/HPD are used by DRM to know if a display is _physically_
+ * there, not whether the display is powered on / finished initting.
+ * On eDP we assume the display is always there because you can't
+ * know until power is applied. If we don't implement the ops DRM will
+ * assume our display is always there.
+ * - Currently eDP mode reading is driven by the panel driver. This
+ * allows the panel driver to properly power itself on to read the
+ * modes.
+ */
+ if (!dp_display->is_edp) {
+ bridge->ops =
+ DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_HPD |
+ DRM_BRIDGE_OP_MODES;
+ }
+
+ drm_bridge_add(bridge);
rc = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (rc) {
DRM_ERROR("failed to attach bridge, rc=%d\n", rc);
+ drm_bridge_remove(bridge);
+
return ERR_PTR(rc);
}
@@ -249,3 +128,17 @@ struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display, struct drm_devi
return bridge;
}
+
+/* connector initialization */
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
+{
+ struct drm_connector *connector = NULL;
+
+ connector = drm_bridge_connector_init(dp_display->drm_dev, dp_display->encoder);
+ if (IS_ERR(connector))
+ return connector;
+
+ drm_connector_attach_encoder(connector, dp_display->encoder);
+
+ return connector;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index c27bfceefdf0..f4b1ed1e24f7 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -7,12 +7,30 @@
#define _DP_DRM_H_
#include <linux/types.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_bridge.h>
#include "msm_drv.h"
#include "dp_display.h"
+struct msm_dp_bridge {
+ struct drm_bridge bridge;
+ struct msm_dp *dp_display;
+};
+
+#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
+
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
+struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+void dp_bridge_enable(struct drm_bridge *drm_bridge);
+void dp_bridge_disable(struct drm_bridge *drm_bridge);
+void dp_bridge_post_disable(struct drm_bridge *drm_bridge);
+enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode);
+void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index d4d31e5bda07..36f0af02749f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -36,6 +36,7 @@ struct dp_link_request {
struct dp_link_private {
u32 prev_sink_count;
struct device *dev;
+ struct drm_device *drm_dev;
struct drm_dp_aux *aux;
struct dp_link dp_link;
@@ -128,14 +129,14 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
goto exit;
req->test_audio_period_ch_1 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_1 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_2 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_2 = 0x%x\n", ret);
/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
@@ -143,42 +144,42 @@ static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
goto exit;
req->test_audio_period_ch_3 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_3 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_4 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_4 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_5 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_5 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_6 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_6 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_7 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_7 = 0x%x\n", ret);
ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
if (ret == -EINVAL)
goto exit;
req->test_audio_period_ch_8 = ret;
- DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret);
+ drm_dbg_dp(link->drm_dev, "test_audio_period_ch_8 = 0x%x\n", ret);
exit:
return ret;
}
@@ -205,7 +206,7 @@ static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
}
link->dp_link.test_audio.test_audio_pattern_type = data;
- DRM_DEBUG_DP("audio pattern type = 0x%x\n", data);
+ drm_dbg_dp(link->drm_dev, "audio pattern type = 0x%x\n", data);
exit:
return ret;
}
@@ -246,8 +247,9 @@ static int dp_link_parse_audio_mode(struct dp_link_private *link)
link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
link->dp_link.test_audio.test_audio_channel_count = channel_count;
- DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n",
- sampling_rate, channel_count);
+ drm_dbg_dp(link->drm_dev,
+ "sampling_rate = 0x%x, channel_count = 0x%x\n",
+ sampling_rate, channel_count);
exit:
return ret;
}
@@ -486,7 +488,8 @@ static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
return ret;
}
- DRM_DEBUG_DP("link video pattern = 0x%x\n"
+ drm_dbg_dp(link->drm_dev,
+ "link video pattern = 0x%x\n"
"link dynamic range = 0x%x\n"
"link bit depth = 0x%x\n"
"TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
@@ -543,7 +546,8 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
}
link->request.test_link_rate = bp;
- DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate);
+ drm_dbg_dp(link->drm_dev, "link rate = 0x%x\n",
+ link->request.test_link_rate);
rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
if (rlen < 0) {
@@ -558,7 +562,8 @@ static int dp_link_parse_link_training_params(struct dp_link_private *link)
}
link->request.test_lane_count = bp;
- DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count);
+ drm_dbg_dp(link->drm_dev, "lane count = 0x%x\n",
+ link->request.test_lane_count);
return 0;
}
@@ -583,7 +588,7 @@ static int dp_link_parse_phy_test_params(struct dp_link_private *link)
link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
- DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data);
+ drm_dbg_dp(link->drm_dev, "phy_test_pattern_sel = 0x%x\n", data);
switch (data) {
case DP_PHY_TEST_PATTERN_SEL_MASK:
@@ -639,10 +644,10 @@ static int dp_link_parse_request(struct dp_link_private *link)
return rlen;
}
- DRM_DEBUG_DP("device service irq vector = 0x%x\n", data);
+ drm_dbg_dp(link->drm_dev, "device service irq vector = 0x%x\n", data);
if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
- DRM_DEBUG_DP("no test requested\n");
+ drm_dbg_dp(link->drm_dev, "no test requested\n");
return 0;
}
@@ -657,11 +662,11 @@ static int dp_link_parse_request(struct dp_link_private *link)
}
if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
- DRM_DEBUG_DP("link 0x%x not supported\n", data);
+ drm_dbg_dp(link->drm_dev, "link 0x%x not supported\n", data);
goto end;
}
- DRM_DEBUG_DP("Test:(0x%x) requested\n", data);
+ drm_dbg_dp(link->drm_dev, "Test:(0x%x) requested\n", data);
link->request.test_requested = data;
if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
ret = dp_link_parse_phy_test_params(link);
@@ -732,8 +737,8 @@ static int dp_link_parse_sink_count(struct dp_link *dp_link)
link->dp_link.sink_count =
DP_GET_SINK_COUNT(link->dp_link.sink_count);
- DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n",
- link->dp_link.sink_count, cp_ready);
+ drm_dbg_dp(link->drm_dev, "sink_count = 0x%x, cp_ready = 0x%x\n",
+ link->dp_link.sink_count, cp_ready);
return 0;
}
@@ -774,7 +779,8 @@ static int dp_link_process_link_training_request(struct dp_link_private *link)
if (link->request.test_requested != DP_TEST_LINK_TRAINING)
return -EINVAL;
- DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
+ drm_dbg_dp(link->drm_dev,
+ "Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
DP_TEST_LINK_TRAINING,
link->request.test_link_rate,
link->request.test_lane_count);
@@ -852,13 +858,13 @@ bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
static void dp_link_parse_vx_px(struct dp_link_private *link)
{
- DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dbg_dp(link->drm_dev, "vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_voltage(link->link_status, 0),
drm_dp_get_adjust_request_voltage(link->link_status, 1),
drm_dp_get_adjust_request_voltage(link->link_status, 2),
drm_dp_get_adjust_request_voltage(link->link_status, 3));
- DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n",
+ drm_dbg_dp(link->drm_dev, "px: 0=%d, 1=%d, 2=%d, 3=%d\n",
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
@@ -868,7 +874,8 @@ static void dp_link_parse_vx_px(struct dp_link_private *link)
* Update the voltage and pre-emphasis levels as per DPCD request
* vector.
*/
- DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n",
+ drm_dbg_dp(link->drm_dev,
+ "Current: v_level = 0x%x, p_level = 0x%x\n",
link->dp_link.phy_params.v_level,
link->dp_link.phy_params.p_level);
link->dp_link.phy_params.v_level =
@@ -878,7 +885,8 @@ static void dp_link_parse_vx_px(struct dp_link_private *link)
link->dp_link.phy_params.p_level >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
- DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
+ drm_dbg_dp(link->drm_dev,
+ "Requested: v_level = 0x%x, p_level = 0x%x\n",
link->dp_link.phy_params.v_level,
link->dp_link.phy_params.p_level);
}
@@ -895,7 +903,7 @@ static int dp_link_process_phy_test_pattern_request(
struct dp_link_private *link)
{
if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
- DRM_DEBUG_DP("no phy test\n");
+ drm_dbg_dp(link->drm_dev, "no phy test\n");
return -EINVAL;
}
@@ -907,11 +915,13 @@ static int dp_link_process_phy_test_pattern_request(
return -EINVAL;
}
- DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n",
+ drm_dbg_dp(link->drm_dev,
+ "Current: rate = 0x%x, lane count = 0x%x\n",
link->dp_link.link_params.rate,
link->dp_link.link_params.num_lanes);
- DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n",
+ drm_dbg_dp(link->drm_dev,
+ "Requested: rate = 0x%x, lane count = 0x%x\n",
link->request.test_link_rate,
link->request.test_lane_count);
@@ -942,17 +952,18 @@ static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
*/
static int dp_link_process_link_status_update(struct dp_link_private *link)
{
- bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ bool channel_eq_done = drm_dp_channel_eq_ok(link->link_status,
+ link->dp_link.link_params.num_lanes);
- bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
- link->dp_link.link_params.num_lanes);
+ bool clock_recovery_done = drm_dp_clock_recovery_ok(link->link_status,
+ link->dp_link.link_params.num_lanes);
- DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
+ drm_dbg_dp(link->drm_dev,
+ "channel_eq_done = %d, clock_recovery_done = %d\n",
channel_eq_done, clock_recovery_done);
- if (channel_eq_done && clock_recovery_done)
- return -EINVAL;
+ if (channel_eq_done && clock_recovery_done)
+ return -EINVAL;
return 0;
@@ -1058,7 +1069,8 @@ int dp_link_process_request(struct dp_link *dp_link)
}
}
- DRM_DEBUG_DP("sink request=%#x", dp_link->sink_request);
+ drm_dbg_dp(link->drm_dev, "sink request=%#x",
+ dp_link->sink_request);
return ret;
}
@@ -1090,18 +1102,22 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
int v_max = 0, p_max = 0;
+ struct dp_link_private *link;
if (!dp_link) {
DRM_ERROR("invalid input\n");
return -EINVAL;
}
+ link = container_of(dp_link, struct dp_link_private, dp_link);
+
/* use the max level across lanes */
for (i = 0; i < dp_link->link_params.num_lanes; i++) {
u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
i);
- DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
+ drm_dbg_dp(link->drm_dev,
+ "lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
i, data_v, data_p);
if (v_max < data_v)
v_max = data_v;
@@ -1117,14 +1133,16 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
* the allowable range.
*/
if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
- DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n",
+ drm_dbg_dp(link->drm_dev,
+ "Requested vSwingLevel=%d, change to %d\n",
dp_link->phy_params.v_level,
DP_TRAIN_VOLTAGE_SWING_MAX);
dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
}
if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
- DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
DP_TRAIN_PRE_EMPHASIS_MAX);
dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
@@ -1133,13 +1151,14 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
&& (dp_link->phy_params.v_level ==
DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
- DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+ drm_dbg_dp(link->drm_dev,
+ "Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
DP_TRAIN_PRE_EMPHASIS_LVL_1);
dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
}
- DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n",
+ drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
dp_link->phy_params.v_level, dp_link->phy_params.p_level);
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 26c3653c99ec..2be1733534a9 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -11,6 +11,7 @@
struct dp_panel_private {
struct device *dev;
+ struct drm_device *drm_dev;
struct dp_panel dp_panel;
struct drm_dp_aux *aux;
struct dp_link *link;
@@ -50,7 +51,8 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
/* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
if (temp & BIT(7)) {
- DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
+ drm_dbg_dp(panel->drm_dev,
+ "using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
offset = DPRX_EXTENDED_DPCD_FIELD;
}
@@ -80,9 +82,9 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
- DRM_DEBUG_DP("version: %d.%d\n", major, minor);
- DRM_DEBUG_DP("link_rate=%d\n", link_info->rate);
- DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes);
+ drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
+ drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
+ drm_dbg_dp(panel->drm_dev, "lane_count=%d\n", link_info->num_lanes);
if (drm_dp_enhanced_frame_cap(dpcd))
link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
@@ -220,7 +222,8 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
}
if (panel->aux_cfg_update_done) {
- DRM_DEBUG_DP("read DPCD with updated AUX config\n");
+ drm_dbg_dp(panel->drm_dev,
+ "read DPCD with updated AUX config\n");
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
@@ -259,7 +262,7 @@ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
}
int dp_panel_get_modes(struct dp_panel *dp_panel,
- struct drm_connector *connector, struct dp_display_mode *mode)
+ struct drm_connector *connector)
{
if (!dp_panel) {
DRM_ERROR("invalid input\n");
@@ -334,7 +337,8 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
catalog = panel->catalog;
if (!panel->panel_on) {
- DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n");
+ drm_dbg_dp(panel->drm_dev,
+ "DP panel not enabled, handle TPG on next on\n");
return;
}
@@ -343,7 +347,7 @@ void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
return;
}
- DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__);
+ drm_dbg_dp(panel->drm_dev, "calling catalog tpg_enable\n");
dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
}
@@ -369,12 +373,12 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
catalog = panel->catalog;
drm_mode = &panel->dp_panel.dp_mode.drm_mode;
- DRM_DEBUG_DP("width=%d hporch= %d %d %d\n",
+ drm_dbg_dp(panel->drm_dev, "width=%d hporch= %d %d %d\n",
drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
- DRM_DEBUG_DP("height=%d vporch= %d %d %d\n",
+ drm_dbg_dp(panel->drm_dev, "height=%d vporch= %d %d %d\n",
drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
@@ -418,30 +422,37 @@ int dp_panel_timing_cfg(struct dp_panel *dp_panel)
int dp_panel_init_panel_info(struct dp_panel *dp_panel)
{
struct drm_display_mode *drm_mode;
+ struct dp_panel_private *panel;
drm_mode = &dp_panel->dp_mode.drm_mode;
+ panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
/*
* print resolution info as this is a result
* of user initiated action of cable connection
*/
- DRM_DEBUG_DP("SET NEW RESOLUTION:\n");
- DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay,
- drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
- DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_dbg_dp(panel->drm_dev, "SET NEW RESOLUTION:\n");
+ drm_dbg_dp(panel->drm_dev, "%dx%d@%dfps\n",
+ drm_mode->hdisplay, drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
+ drm_dbg_dp(panel->drm_dev,
+ "h_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->hsync_end - drm_mode->hsync_start);
- DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n",
+ drm_dbg_dp(panel->drm_dev,
+ "v_porches(back|front|width) = (%d|%d|%d)\n",
drm_mode->vtotal - drm_mode->vsync_end,
drm_mode->vsync_start - drm_mode->vdisplay,
drm_mode->vsync_end - drm_mode->vsync_start);
- DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock);
- DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp);
+ drm_dbg_dp(panel->drm_dev, "pixel clock (KHz)=(%d)\n",
+ drm_mode->clock);
+ drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
dp_panel->dp_mode.bpp = max_t(u32, 18,
- min_t(u32, dp_panel->dp_mode.bpp, 30));
- DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
+ min_t(u32, dp_panel->dp_mode.bpp, 30));
+ drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
+ dp_panel->dp_mode.bpp);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 99739ea679a7..acb1987fa45f 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -49,7 +49,6 @@ struct dp_panel {
bool video_test;
u32 vic;
- u32 max_pclk_khz;
u32 max_dp_lanes;
u32 max_bw_code;
@@ -65,7 +64,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
u32 mode_pclk_khz);
int dp_panel_get_modes(struct dp_panel *dp_panel,
- struct drm_connector *connector, struct dp_display_mode *mode);
+ struct drm_connector *connector);
void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index 1056b8d5755b..8f9fed9fdafc 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -260,12 +260,10 @@ static int dp_parser_clock(struct dp_parser *parser)
}
}
- DRM_DEBUG_DP("clock parsing successful\n");
-
return 0;
}
-static int dp_parser_find_next_bridge(struct dp_parser *parser)
+int dp_parser_find_next_bridge(struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
struct drm_bridge *bridge;
@@ -279,7 +277,7 @@ static int dp_parser_find_next_bridge(struct dp_parser *parser)
return 0;
}
-static int dp_parser_parse(struct dp_parser *parser, int connector_type)
+static int dp_parser_parse(struct dp_parser *parser)
{
int rc = 0;
@@ -300,25 +298,6 @@ static int dp_parser_parse(struct dp_parser *parser, int connector_type)
if (rc)
return rc;
- /*
- * External bridges are mandatory for eDP interfaces: one has to
- * provide at least an eDP panel (which gets wrapped into panel-bridge).
- *
- * For DisplayPort interfaces external bridges are optional, so
- * silently ignore an error if one is not present (-ENODEV).
- */
- rc = dp_parser_find_next_bridge(parser);
- if (rc == -ENODEV) {
- if (connector_type == DRM_MODE_CONNECTOR_eDP) {
- DRM_ERROR("eDP: next bridge is not present\n");
- return rc;
- }
- } else if (rc) {
- if (rc != -EPROBE_DEFER)
- DRM_ERROR("DP: error parsing next bridge: %d\n", rc);
- return rc;
- }
-
/* Map the corresponding regulator information according to
* version. Currently, since we only have one supported platform,
* mapping the regulator directly.
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index d371bae1c968..3a4d7972c069 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -125,7 +125,7 @@ struct dp_parser {
u32 max_dp_lanes;
struct drm_bridge *next_bridge;
- int (*parse)(struct dp_parser *parser, int connector_type);
+ int (*parse)(struct dp_parser *parser);
};
/**
@@ -141,4 +141,16 @@ struct dp_parser {
*/
struct dp_parser *dp_parser_get(struct platform_device *pdev);
+/**
+ * dp_parser_find_next_bridge() - find an additional bridge to DP
+ *
+ * @parser: dp_parser data from client
+ *
+ * This function is used to find any additional bridge attached to
+ * the DP controller. The eDP interface requires a panel bridge.
+ *
+ * Return: 0 if able to get the bridge, otherwise negative errno for failure.
+ */
+int dp_parser_find_next_bridge(struct dp_parser *parser);
+
#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index b48b45e92bfa..d9e011775ad8 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -16,6 +16,7 @@ struct dp_power_private {
struct dp_parser *parser;
struct platform_device *pdev;
struct device *dev;
+ struct drm_device *drm_dev;
struct clk *link_clk_src;
struct clk *pixel_provider;
struct clk *link_provider;
@@ -208,7 +209,12 @@ static int dp_power_clk_set_rate(struct dp_power_private *power,
int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
{
- DRM_DEBUG_DP("core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
+ struct dp_power_private *power;
+
+ power = container_of(dp_power, struct dp_power_private, dp_power);
+
+ drm_dbg_dp(power->drm_dev,
+ "core_clk_on=%d link_clk_on=%d stream_clk_on=%d\n",
dp_power->core_clks_on, dp_power->link_clks_on, dp_power->stream_clks_on);
if (pm_type == DP_CORE_PM)
@@ -240,22 +246,26 @@ int dp_power_clk_enable(struct dp_power *dp_power,
if (enable) {
if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
- DRM_DEBUG_DP("core clks already enabled\n");
+ drm_dbg_dp(power->drm_dev,
+ "core clks already enabled\n");
return 0;
}
if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
- DRM_DEBUG_DP("links clks already enabled\n");
+ drm_dbg_dp(power->drm_dev,
+ "links clks already enabled\n");
return 0;
}
if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
- DRM_DEBUG_DP("pixel clks already enabled\n");
+ drm_dbg_dp(power->drm_dev,
+ "pixel clks already enabled\n");
return 0;
}
if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
- DRM_DEBUG_DP("Enable core clks before link clks\n");
+ drm_dbg_dp(power->drm_dev,
+ "Enable core clks before link clks\n");
rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
if (rc) {
@@ -282,10 +292,11 @@ int dp_power_clk_enable(struct dp_power *dp_power,
else
dp_power->link_clks_on = enable;
- DRM_DEBUG_DP("%s clocks for %s\n",
+ drm_dbg_dp(power->drm_dev, "%s clocks for %s\n",
enable ? "enable" : "disable",
dp_parser_pm_name(pm_type));
- DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n",
+ drm_dbg_dp(power->drm_dev,
+ "strem_clks:%s link_clks:%s core_clks:%s\n",
dp_power->stream_clks_on ? "on" : "off",
dp_power->link_clks_on ? "on" : "off",
dp_power->core_clks_on ? "on" : "off");
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index c12e66aa42a3..1625328fa430 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -21,6 +21,11 @@ bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi)
return !(host_flags & MIPI_DSI_MODE_VIDEO);
}
+struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+{
+ return msm_dsi_host_get_dsc_config(msm_dsi->host);
+}
+
static int dsi_get_phy(struct msm_dsi *msm_dsi)
{
struct platform_device *pdev = msm_dsi->pdev;
@@ -273,7 +278,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
}
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
- priv->connectors[priv->num_connectors++] = msm_dsi->connector;
return 0;
fail:
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index c8dedc95428c..580a1e6358bf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -114,6 +114,8 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
const struct drm_display_mode *mode);
+enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode);
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host);
unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
@@ -152,6 +154,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi);
void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host);
void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host);
+struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 4dee6f0bdda6..d1b2a17b0a66 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -704,5 +704,85 @@ static inline uint32_t DSI_VERSION_MAJOR(uint32_t val)
#define REG_DSI_CPHY_MODE_CTRL 0x000002d4
+#define REG_DSI_VIDEO_COMPRESSION_MODE_CTRL 0x0000029c
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK 0xffff0000
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT 16
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_WC__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK 0x00003f00
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT 8
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK 0x000000c0
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT 6
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK 0x00000030
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT 4
+static inline uint32_t DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__SHIFT) & DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM__MASK;
+}
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL_EN 0x00000001
+
+#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x000002a4
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK 0x3f000000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT 24
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_DATATYPE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK 0x00c00000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT 22
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_PKT_PER_LINE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK 0x00300000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT 20
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EOL_BYTE_NUM__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM1_EN 0x00010000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK 0x00003f00
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT 8
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK 0x000000c0
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT 6
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_PKT_PER_LINE__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK 0x00000030
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT 4
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EOL_BYTE_NUM__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_EN 0x00000001
+
+#define REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x000002a8
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK 0xffff0000
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT 16
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM1_SLICE_WIDTH__MASK;
+}
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK 0x0000ffff
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT 0
+static inline uint32_t DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(uint32_t val)
+{
+ return ((val) << DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__SHIFT) & DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+}
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index d51e70fab93d..a95d5df52653 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -31,6 +31,8 @@
#define DSI_RESET_TOGGLE_DELAY_MS 20
+static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc);
+
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{
u32 ver;
@@ -157,6 +159,7 @@ struct msm_dsi_host {
struct regmap *sfpb;
struct drm_display_mode *mode;
+ struct msm_display_dsc_config *dsc;
/* connected device info */
struct device_node *device_node;
@@ -909,6 +912,68 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0));
}
+static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay)
+{
+ struct msm_display_dsc_config *dsc = msm_host->dsc;
+ u32 reg, intf_width, reg_ctrl, reg_ctrl2;
+ u32 slice_per_intf, total_bytes_per_intf;
+ u32 pkt_per_line;
+ u32 bytes_in_slice;
+ u32 eol_byte_num;
+
+ /* first calculate dsc parameters and then program
+ * compress mode registers
+ */
+ intf_width = hdisplay;
+ slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width);
+
+ /* If slice_per_pkt is greater than slice_per_intf
+ * then default to 1. This can happen during partial
+ * update.
+ */
+ if (slice_per_intf > dsc->drm->slice_count)
+ dsc->drm->slice_count = 1;
+
+ slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width);
+ bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8);
+
+ dsc->drm->slice_chunk_size = bytes_in_slice;
+
+ total_bytes_per_intf = bytes_in_slice * slice_per_intf;
+
+ eol_byte_num = total_bytes_per_intf % 3;
+ pkt_per_line = slice_per_intf / dsc->drm->slice_count;
+
+ if (is_cmd_mode) /* packet data type */
+ reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE);
+ else
+ reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM);
+
+ /* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE
+ * registers have similar offsets, so for below common code use
+ * DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits
+ */
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1);
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num);
+ reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN;
+
+ if (is_cmd_mode) {
+ reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL);
+ reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2);
+
+ reg_ctrl &= ~0xffff;
+ reg_ctrl |= reg;
+
+ reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK;
+ reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(bytes_in_slice);
+
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
+ dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+ } else {
+ dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
+ }
+}
+
static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
@@ -941,7 +1006,38 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
hdisplay /= 2;
}
+ if (msm_host->dsc) {
+ struct msm_display_dsc_config *dsc = msm_host->dsc;
+
+ /* update dsc params with timing params */
+ if (!dsc || !mode->hdisplay || !mode->vdisplay) {
+ pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n",
+ mode->hdisplay, mode->vdisplay);
+ return;
+ }
+
+ dsc->drm->pic_width = mode->hdisplay;
+ dsc->drm->pic_height = mode->vdisplay;
+ DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height);
+
+ /* we do the calculations for dsc parameters here so that
+ * panel can use these parameters
+ */
+ dsi_populate_dsc_params(dsc);
+
+ /* Divide the display by 3 but keep back/font porch and
+ * pulse width same
+ */
+ h_total -= hdisplay;
+ hdisplay /= 3;
+ h_total += hdisplay;
+ ha_end = ha_start + hdisplay;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ if (msm_host->dsc)
+ dsi_update_dsc_timing(msm_host, false, mode->hdisplay);
+
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
DSI_ACTIVE_H_END(ha_end));
@@ -960,8 +1056,14 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
+ if (msm_host->dsc)
+ dsi_update_dsc_timing(msm_host, true, mode->hdisplay);
+
/* image data and 1 byte write_memory_start cmd */
- wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ if (!msm_host->dsc)
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ else
+ wc = mode->hdisplay / 2 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
@@ -1341,10 +1443,10 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
dsi_get_bpp(msm_host->format) / 8;
len = dsi_cmd_dma_add(msm_host, msg);
- if (!len) {
+ if (len < 0) {
pr_err("%s: failed to add cmd type = 0x%x\n",
__func__, msg->type);
- return -EINVAL;
+ return len;
}
/* for video mode, do not send cmds more than
@@ -1363,10 +1465,14 @@ static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
}
ret = dsi_cmd_dma_tx(msm_host, len);
- if (ret < len) {
- pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
- __func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
- return -ECOMM;
+ if (ret < 0) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret);
+ return ret;
+ } else if (ret < len) {
+ pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n",
+ __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len);
+ return -EIO;
}
return len;
@@ -1722,6 +1828,133 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
return -EINVAL;
}
+static u32 dsi_dsc_rc_buf_thresh[DSC_NUM_BUF_RANGES - 1] = {
+ 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62,
+ 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e
+};
+
+/* only 8bpc, 8bpp added */
+static char min_qp[DSC_NUM_BUF_RANGES] = {
+ 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13
+};
+
+static char max_qp[DSC_NUM_BUF_RANGES] = {
+ 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15
+};
+
+static char bpg_offset[DSC_NUM_BUF_RANGES] = {
+ 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
+};
+
+static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc)
+{
+ int mux_words_size;
+ int groups_per_line, groups_total;
+ int min_rate_buffer_size;
+ int hrd_delay;
+ int pre_num_extra_mux_bits, num_extra_mux_bits;
+ int slice_bits;
+ int target_bpp_x16;
+ int data;
+ int final_value, final_scale;
+ int i;
+
+ dsc->drm->rc_model_size = 8192;
+ dsc->drm->first_line_bpg_offset = 12;
+ dsc->drm->rc_edge_factor = 6;
+ dsc->drm->rc_tgt_offset_high = 3;
+ dsc->drm->rc_tgt_offset_low = 3;
+ dsc->drm->simple_422 = 0;
+ dsc->drm->convert_rgb = 1;
+ dsc->drm->vbr_enable = 0;
+
+ /* handle only bpp = bpc = 8 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++)
+ dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i];
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ dsc->drm->rc_range_params[i].range_min_qp = min_qp[i];
+ dsc->drm->rc_range_params[i].range_max_qp = max_qp[i];
+ dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i];
+ }
+
+ dsc->drm->initial_offset = 6144; /* Not bpp 12 */
+ if (dsc->drm->bits_per_pixel != 8)
+ dsc->drm->initial_offset = 2048; /* bpp = 12 */
+
+ mux_words_size = 48; /* bpc == 8/10 */
+ if (dsc->drm->bits_per_component == 12)
+ mux_words_size = 64;
+
+ dsc->drm->initial_xmit_delay = 512;
+ dsc->drm->initial_scale_value = 32;
+ dsc->drm->first_line_bpg_offset = 12;
+ dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1;
+
+ /* bpc 8 */
+ dsc->drm->flatness_min_qp = 3;
+ dsc->drm->flatness_max_qp = 12;
+ dsc->drm->rc_quant_incr_limit0 = 11;
+ dsc->drm->rc_quant_incr_limit1 = 11;
+ dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+
+ /* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of
+ * params are calculated
+ */
+ groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3);
+ dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8;
+ if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8)
+ dsc->drm->slice_chunk_size++;
+
+ /* rbs-min */
+ min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset +
+ dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel +
+ groups_per_line * dsc->drm->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel);
+
+ dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay;
+
+ dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size /
+ (dsc->drm->rc_model_size - dsc->drm->initial_offset);
+
+ slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height;
+
+ groups_total = groups_per_line * dsc->drm->slice_height;
+
+ data = dsc->drm->first_line_bpg_offset * 2048;
+
+ dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1));
+
+ pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2);
+
+ num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size -
+ ((slice_bits - pre_num_extra_mux_bits) % mux_words_size));
+
+ data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits);
+ dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total);
+
+ /* bpp * 16 + 0.5 */
+ data = dsc->drm->bits_per_pixel * 16;
+ data *= 2;
+ data++;
+ data /= 2;
+ target_bpp_x16 = data;
+
+ data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16;
+ final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits;
+ dsc->drm->final_offset = final_value;
+
+ final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value);
+
+ data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset);
+ dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data;
+
+ dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8);
+
+ return 0;
+}
+
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
@@ -1931,9 +2164,24 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_panel *panel;
int ret;
msm_host->dev = dev;
+ panel = msm_dsi_host_get_panel(&msm_host->base);
+
+ if (!IS_ERR(panel) && panel->dsc) {
+ struct msm_display_dsc_config *dsc = msm_host->dsc;
+
+ if (!dsc) {
+ dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL);
+ if (!dsc)
+ return -ENOMEM;
+ dsc->drm = panel->dsc;
+ msm_host->dsc = dsc;
+ }
+ }
+
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
@@ -2092,9 +2340,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
}
ret = dsi_cmds2buf_tx(msm_host, msg);
- if (ret < msg->tx_len) {
+ if (ret < 0) {
pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
return ret;
+ } else if (ret < msg->tx_len) {
+ pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret);
+ return -ECOMM;
}
/*
@@ -2409,6 +2660,32 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return 0;
}
+enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host,
+ const struct drm_display_mode *mode)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ struct msm_display_dsc_config *dsc = msm_host->dsc;
+ int pic_width = mode->hdisplay;
+ int pic_height = mode->vdisplay;
+
+ if (!msm_host->dsc)
+ return MODE_OK;
+
+ if (pic_width % dsc->drm->slice_width) {
+ pr_err("DSI: pic_width %d has to be multiple of slice %d\n",
+ pic_width, dsc->drm->slice_width);
+ return MODE_H_ILLEGAL;
+ }
+
+ if (pic_height % dsc->drm->slice_height) {
+ pr_err("DSI: pic_height %d has to be multiple of slice %d\n",
+ pic_height, dsc->drm->slice_height);
+ return MODE_V_ILLEGAL;
+ }
+
+ return MODE_OK;
+}
+
struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host)
{
return of_drm_find_panel(to_msm_dsi_host(host)->device_node);
@@ -2498,3 +2775,10 @@ void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host)
dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER,
DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER);
}
+
+struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ return msm_host->dsc;
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 9f6af0f0fe00..be8ef9bb6edc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -573,6 +573,17 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
dsi_mgr_bridge_power_on(bridge);
}
+static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ int id = dsi_mgr_bridge_get_id(bridge);
+ struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
+ struct mipi_dsi_host *host = msm_dsi->host;
+
+ return msm_dsi_host_check_dsc(host, mode);
+}
+
static const struct drm_connector_funcs dsi_mgr_connector_funcs = {
.detect = dsi_mgr_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -593,6 +604,7 @@ static const struct drm_bridge_funcs dsi_mgr_bridge_funcs = {
.disable = dsi_mgr_bridge_disable,
.post_disable = dsi_mgr_bridge_post_disable,
.mode_set = dsi_mgr_bridge_mode_set,
+ .mode_valid = dsi_mgr_bridge_mode_valid,
};
/* initialize connector when we're connected to a drm_panel */
@@ -665,6 +677,8 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
+ drm_bridge_add(bridge);
+
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
@@ -735,6 +749,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
{
+ drm_bridge_remove(bridge);
}
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 75557ac99adf..8199c53567f4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -1062,7 +1062,7 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
- .io_start = { 0xc994400, 0xc996000 },
+ .io_start = { 0xc994400, 0xc996400 },
.num_dsi_phy = 2,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 6e506feb111f..66ed1919a1db 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -586,7 +586,7 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{
char clk_name[32], parent[32], vco_name[32];
- char parent2[32], parent3[32], parent4[32];
+ char parent2[32];
struct clk_init_data vco_init = {
.parent_data = &(const struct clk_parent_data) {
.fw_name = "ref",
@@ -687,15 +687,13 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
- snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
- snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
hw = devm_clk_hw_register_mux(dev, clk_name,
((const char *[]){
- parent, parent2, parent3, parent4
- }), 4, 0, pll_7nm->phy->base +
+ parent, parent2,
+ }), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 2, 0, NULL);
+ 0, 1, 0, NULL);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
deleted file mode 100644
index 14b0ef02287e..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __EDP_CONNECTOR_H__
-#define __EDP_CONNECTOR_H__
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <drm/display/drm_dp_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-
-#include "msm_drv.h"
-
-#define edp_read(offset) msm_readl((offset))
-#define edp_write(offset, data) msm_writel((data), (offset))
-
-struct edp_ctrl;
-struct edp_aux;
-struct edp_phy;
-
-struct msm_edp {
- struct drm_device *dev;
- struct platform_device *pdev;
-
- struct drm_connector *connector;
- struct drm_bridge *bridge;
-
- /* the encoder we are hooked to (outside of eDP block) */
- struct drm_encoder *encoder;
-
- struct edp_ctrl *ctrl;
-
- int irq;
-};
-
-/* eDP bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
-void edp_bridge_destroy(struct drm_bridge *bridge);
-
-/* eDP connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
-
-/* AUX */
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
-
-/* Phy */
-bool msm_edp_phy_ready(struct edp_phy *phy);
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
-
-/* Ctrl */
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
-int msm_edp_ctrl_init(struct msm_edp *edp);
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid);
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info);
-/* @pixel_rate is in kHz */
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn);
-
-#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
deleted file mode 100644
index 9ac1963c679e..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ /dev/null
@@ -1,1374 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/display/drm_dp_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define VDDA_UA_ON_LOAD 100000 /* uA units */
-#define VDDA_UA_OFF_LOAD 100 /* uA units */
-
-#define DPCD_LINK_VOLTAGE_MAX 4
-#define DPCD_LINK_PRE_EMPHASIS_MAX 4
-
-#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
-
-/* Link training return value */
-#define EDP_TRAIN_FAIL -1
-#define EDP_TRAIN_SUCCESS 0
-#define EDP_TRAIN_RECONFIG 1
-
-#define EDP_CLK_MASK_AHB BIT(0)
-#define EDP_CLK_MASK_AUX BIT(1)
-#define EDP_CLK_MASK_LINK BIT(2)
-#define EDP_CLK_MASK_PIXEL BIT(3)
-#define EDP_CLK_MASK_MDP_CORE BIT(4)
-#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
-#define EDP_CLK_MASK_AUX_CHAN \
- (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
-#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
-
-#define EDP_BACKLIGHT_MAX 255
-
-#define EDP_INTR_STATUS1 \
- (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
- EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
- EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
-#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
-#define EDP_INTR_STATUS2 \
- (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
- EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
- EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
-#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
-
-struct edp_ctrl {
- struct platform_device *pdev;
-
- void __iomem *base;
-
- /* regulators */
- struct regulator *vdda_vreg; /* 1.8 V */
- struct regulator *lvl_vreg;
-
- /* clocks */
- struct clk *aux_clk;
- struct clk *pixel_clk;
- struct clk *ahb_clk;
- struct clk *link_clk;
- struct clk *mdp_core_clk;
-
- /* gpios */
- struct gpio_desc *panel_en_gpio;
- struct gpio_desc *panel_hpd_gpio;
-
- /* completion and mutex */
- struct completion idle_comp;
- struct mutex dev_mutex; /* To protect device power status */
-
- /* work queue */
- struct work_struct on_work;
- struct work_struct off_work;
- struct workqueue_struct *workqueue;
-
- /* Interrupt register lock */
- spinlock_t irq_lock;
-
- bool edp_connected;
- bool power_on;
-
- /* edid raw data */
- struct edid *edid;
-
- struct drm_dp_aux *drm_aux;
-
- /* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
-
- /* Link status */
- u8 link_rate;
- u8 lane_cnt;
- u8 v_level;
- u8 p_level;
-
- /* Timing status */
- u8 interlaced;
- u32 pixel_rate; /* in kHz */
- u32 color_depth;
-
- struct edp_aux *aux;
- struct edp_phy *phy;
-};
-
-struct edp_pixel_clk_div {
- u32 rate; /* in kHz */
- u32 m;
- u32 n;
-};
-
-#define EDP_PIXEL_CLK_NUM 8
-static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
- { /* Link clock = 162MHz, source clock = 810MHz */
- {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 60}, /* FHD 1920x1080@60Hz */
- {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
- {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
- },
- { /* Link clock = 270MHz, source clock = 675MHz */
- {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 50}, /* FHD 1920x1080@60Hz */
- {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
- {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
- },
-};
-
-static int edp_clk_init(struct edp_ctrl *ctrl)
-{
- struct platform_device *pdev = ctrl->pdev;
- int ret;
-
- ctrl->aux_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(ctrl->aux_clk)) {
- ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find core clock, %d\n", __func__, ret);
- ctrl->aux_clk = NULL;
- return ret;
- }
-
- ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(ctrl->pixel_clk)) {
- ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
- ctrl->pixel_clk = NULL;
- return ret;
- }
-
- ctrl->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(ctrl->ahb_clk)) {
- ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
- ctrl->ahb_clk = NULL;
- return ret;
- }
-
- ctrl->link_clk = msm_clk_get(pdev, "link");
- if (IS_ERR(ctrl->link_clk)) {
- ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link clock, %d\n", __func__, ret);
- ctrl->link_clk = NULL;
- return ret;
- }
-
- /* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
- if (IS_ERR(ctrl->mdp_core_clk)) {
- ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
- ctrl->mdp_core_clk = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- int ret;
-
- DBG("mask=%x", clk_mask);
- /* ahb_clk should be enabled first */
- if (clk_mask & EDP_CLK_MASK_AHB) {
- ret = clk_prepare_enable(ctrl->ahb_clk);
- if (ret) {
- pr_err("%s: Failed to enable ahb clk\n", __func__);
- goto f0;
- }
- }
- if (clk_mask & EDP_CLK_MASK_AUX) {
- ret = clk_set_rate(ctrl->aux_clk, 19200000);
- if (ret) {
- pr_err("%s: Failed to set rate aux clk\n", __func__);
- goto f1;
- }
- ret = clk_prepare_enable(ctrl->aux_clk);
- if (ret) {
- pr_err("%s: Failed to enable aux clk\n", __func__);
- goto f1;
- }
- }
- /* Need to set rate and enable link_clk prior to pixel_clk */
- if (clk_mask & EDP_CLK_MASK_LINK) {
- DBG("edp->link_clk, set_rate %ld",
- (unsigned long)ctrl->link_rate * 27000000);
- ret = clk_set_rate(ctrl->link_clk,
- (unsigned long)ctrl->link_rate * 27000000);
- if (ret) {
- pr_err("%s: Failed to set rate to link clk\n",
- __func__);
- goto f2;
- }
-
- ret = clk_prepare_enable(ctrl->link_clk);
- if (ret) {
- pr_err("%s: Failed to enable link clk\n", __func__);
- goto f2;
- }
- }
- if (clk_mask & EDP_CLK_MASK_PIXEL) {
- DBG("edp->pixel_clk, set_rate %ld",
- (unsigned long)ctrl->pixel_rate * 1000);
- ret = clk_set_rate(ctrl->pixel_clk,
- (unsigned long)ctrl->pixel_rate * 1000);
- if (ret) {
- pr_err("%s: Failed to set rate to pixel clk\n",
- __func__);
- goto f3;
- }
-
- ret = clk_prepare_enable(ctrl->pixel_clk);
- if (ret) {
- pr_err("%s: Failed to enable pixel clk\n", __func__);
- goto f3;
- }
- }
- if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
- ret = clk_prepare_enable(ctrl->mdp_core_clk);
- if (ret) {
- pr_err("%s: Failed to enable mdp core clk\n", __func__);
- goto f4;
- }
- }
-
- return 0;
-
-f4:
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
-f3:
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
-f2:
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
-f1:
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-f0:
- return ret;
-}
-
-static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- if (clk_mask & EDP_CLK_MASK_MDP_CORE)
- clk_disable_unprepare(ctrl->mdp_core_clk);
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-}
-
-static int edp_regulator_init(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- DBG("");
- ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
- ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
- ret);
- ctrl->vdda_vreg = NULL;
- return ret;
- }
- ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
- ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
- if (ret) {
- pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
- ret);
- ctrl->lvl_vreg = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_regulator_enable(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
- if (ret < 0) {
- pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
- goto vdda_set_fail;
- }
-
- ret = regulator_enable(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
- goto vdda_enable_fail;
- }
-
- ret = regulator_enable(ctrl->lvl_vreg);
- if (ret) {
- pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
- goto lvl_enable_fail;
- }
-
- DBG("exit");
- return 0;
-
-lvl_enable_fail:
- regulator_disable(ctrl->vdda_vreg);
-vdda_enable_fail:
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-vdda_set_fail:
- return ret;
-}
-
-static void edp_regulator_disable(struct edp_ctrl *ctrl)
-{
- regulator_disable(ctrl->lvl_vreg);
- regulator_disable(ctrl->vdda_vreg);
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-}
-
-static int edp_gpio_config(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
- if (IS_ERR(ctrl->panel_hpd_gpio)) {
- ret = PTR_ERR(ctrl->panel_hpd_gpio);
- ctrl->panel_hpd_gpio = NULL;
- pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
- if (IS_ERR(ctrl->panel_en_gpio)) {
- ret = PTR_ERR(ctrl->panel_en_gpio);
- ctrl->panel_en_gpio = NULL;
- pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- DBG("gpio on");
-
- return 0;
-}
-
-static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
-{
- unsigned long flags;
-
- DBG("%d", enable);
- spin_lock_irqsave(&ctrl->irq_lock, flags);
- if (enable) {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
- } else {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
- }
- spin_unlock_irqrestore(&ctrl->irq_lock, flags);
- DBG("exit");
-}
-
-static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
-{
- u32 prate;
- u32 lrate;
- u32 bpp;
- u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
- u8 lane;
-
- prate = ctrl->pixel_rate;
- bpp = ctrl->color_depth * 3;
-
- /*
- * By default, use the maximum link rate and minimum lane count,
- * so that we can do rate down shift during link training.
- */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- lrate = 270000; /* in kHz */
- lrate *= ctrl->link_rate;
- lrate /= 10; /* in kByte, 10 bits --> 8 bits */
-
- for (lane = 1; lane <= max_lane; lane <<= 1) {
- if (lrate >= prate)
- break;
- lrate <<= 1;
- }
-
- ctrl->lane_cnt = lane;
- DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
-}
-
-static void edp_config_ctrl(struct edp_ctrl *ctrl)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
-
- depth = EDP_6BIT;
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
-
- data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
-
- if (!ctrl->interlaced) /* progressive */
- data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
-
- data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
- EDP_CONFIGURATION_CTRL_STATIC_MVID);
-
- edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
-}
-
-static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
-{
- edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
- /* Make sure H/W status is set */
- wmb();
-}
-
-static int edp_lane_set_write(struct edp_ctrl *ctrl,
- u8 voltage_level, u8 pre_emphasis_level)
-{
- int i;
- u8 buf[4];
-
- if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
- voltage_level |= 0x04;
-
- if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
- pre_emphasis_level |= 0x04;
-
- pre_emphasis_level <<= 3;
-
- for (i = 0; i < 4; i++)
- buf[i] = voltage_level | pre_emphasis_level;
-
- DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
- if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
- pr_err("%s: Set sw/pe to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
-{
- u8 p = pattern;
-
- DBG("pattern=%x", p);
- if (drm_dp_dpcd_write(ctrl->drm_aux,
- DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
- pr_err("%s: Set training pattern to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
- const u8 *link_status)
-{
- int i;
- u8 max = 0;
- u8 data;
-
- /* use the max level across lanes */
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_voltage(link_status, i);
- DBG("lane=%d req_voltage_swing=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
-
- /* use the max level across lanes */
- max = 0;
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
- DBG("lane=%d req_pre_emphasis=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
- DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
-}
-
-static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
-{
- int cnt = 10;
- u32 data;
- u32 shift = train - 1;
-
- DBG("train=%d", train);
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
- while (--cnt) {
- data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
- if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
- break;
- }
-
- if (cnt == 0)
- pr_err("%s: set link_train=%d failed\n", __func__, train);
-}
-
-static const u8 vm_pre_emphasis[4][4] = {
- {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static const u8 vm_voltage_swing[4][4] = {
- {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
- {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
-};
-
-static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
-{
- u32 value0;
- u32 value1;
-
- DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
-
- value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
- value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
-
- /* Configure host and panel only if both values are allowed */
- if (value0 != 0xFF && value1 != 0xFF) {
- msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
- return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
- }
-
- return -EINVAL;
-}
-
-static int edp_start_link_train_1(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- u8 old_v_level;
- int tries;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- tries = 0;
- old_v_level = ctrl->v_level;
- while (1) {
- drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
- break;
- }
-
- if (old_v_level == ctrl->v_level) {
- tries++;
- if (tries >= 5) {
- ret = -1;
- break;
- }
- } else {
- tries = 0;
- old_v_level = ctrl->v_level;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_start_link_train_2(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- int tries = 0;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
-
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- tries++;
- if (tries > 10) {
- ret = -1;
- break;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
-{
- u32 prate, lrate, bpp;
- u8 rate, lane, max_lane;
- int changed = 0;
-
- rate = ctrl->link_rate;
- lane = ctrl->lane_cnt;
- max_lane = drm_dp_max_lane_count(ctrl->dpcd);
-
- bpp = ctrl->color_depth * 3;
- prate = ctrl->pixel_rate;
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
- rate -= 4; /* reduce rate */
- changed++;
- }
-
- if (changed) {
- if (lane >= 1 && lane < max_lane)
- lane <<= 1; /* increase lane */
-
- lrate = 270000; /* in kHz */
- lrate *= rate;
- lrate /= 10; /* kByte, 10 bits --> 8 bits */
- lrate *= lane;
-
- DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
- lrate, prate, rate, lane,
- ctrl->pixel_rate,
- bpp);
-
- if (lrate > prate) {
- ctrl->link_rate = rate;
- ctrl->lane_cnt = lane;
- DBG("new rate=%d %d", rate, lane);
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = edp_train_pattern_set_write(ctrl, 0);
-
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- return ret;
-}
-
-static int edp_do_link_train(struct edp_ctrl *ctrl)
-{
- u8 values[2];
- int ret;
-
- DBG("");
- /*
- * Set the current link rate and lane cnt to panel. They may have been
- * adjusted and the values are different from them in DPCD CAP
- */
- values[0] = ctrl->lane_cnt;
- values[1] = ctrl->link_rate;
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
- sizeof(values)) < 0)
- return EDP_TRAIN_FAIL;
-
- ctrl->v_level = 0; /* start from default level */
- ctrl->p_level = 0;
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_1(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 1 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 1 completed successfully");
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_2(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 2 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 2 completed successfully");
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
-clear:
- edp_clear_training_pattern(ctrl);
-
- return ret;
-}
-
-static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
-
- if (sync)
- data |= EDP_MISC1_MISC0_SYNC;
- else
- data &= ~EDP_MISC1_MISC0_SYNC;
-
- /* only legacy rgb mode supported */
- depth = EDP_6BIT; /* Default */
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
- else if (ctrl->color_depth == 10)
- depth = EDP_10BIT;
- else if (ctrl->color_depth == 12)
- depth = EDP_12BIT;
- else if (ctrl->color_depth == 16)
- depth = EDP_16BIT;
-
- data |= EDP_MISC1_MISC0_COLOR(depth);
-
- edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
-}
-
-static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
-{
- u32 n_multi, m_multi = 5;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- n_multi = 1;
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- n_multi = 2;
- } else {
- pr_err("%s: Invalid link rate, %d\n", __func__,
- ctrl->link_rate);
- return -EINVAL;
- }
-
- edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
- edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
-
- return 0;
-}
-
-static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
-{
- u32 data = 0;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
-
- if (enable)
- data |= EDP_MAINLINK_CTRL_ENABLE;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
-}
-
-static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
-{
- if (enable) {
- edp_regulator_enable(ctrl);
- edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- msm_edp_phy_ctrl(ctrl->phy, 1);
- msm_edp_aux_ctrl(ctrl->aux, 1);
- gpiod_set_value(ctrl->panel_en_gpio, 1);
- } else {
- gpiod_set_value(ctrl->panel_en_gpio, 0);
- msm_edp_aux_ctrl(ctrl->aux, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- edp_regulator_disable(ctrl);
- }
-}
-
-static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
-{
- u32 m, n;
-
- if (enable) {
- /* Enable link channel clocks */
- edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
-
- msm_edp_phy_vm_pe_init(ctrl->phy);
-
- /* Make sure phy is programed */
- wmb();
- msm_edp_phy_ready(ctrl->phy);
-
- edp_config_ctrl(ctrl);
- msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
- edp_sw_mvid_nvid(ctrl, m, n);
- edp_mainlink_ctrl(ctrl, 1);
- } else {
- edp_mainlink_ctrl(ctrl, 0);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
- }
-}
-
-static int edp_ctrl_training(struct edp_ctrl *ctrl)
-{
- int ret;
-
- /* Do link training only when power is on */
- if (!ctrl->power_on)
- return -EINVAL;
-
-train_start:
- ret = edp_do_link_train(ctrl);
- if (ret == EDP_TRAIN_RECONFIG) {
- /* Re-configure main link */
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
-
- /* Make sure link is fully disabled */
- wmb();
- usleep_range(500, 1000);
-
- msm_edp_phy_ctrl(ctrl->phy, 1);
- edp_ctrl_link_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- goto train_start;
- }
-
- return ret;
-}
-
-static void edp_ctrl_on_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, on_work);
- u8 value;
- int ret;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->power_on) {
- DBG("already on");
- goto unlock_ret;
- }
-
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_link_enable(ctrl, 1);
-
- edp_ctrl_irq_enable(ctrl, 1);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret < 0)
- goto fail;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- if (ret < 0)
- goto fail;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- ctrl->power_on = true;
-
- /* Start link training */
- ret = edp_ctrl_training(ctrl);
- if (ret != EDP_TRAIN_SUCCESS)
- goto fail;
-
- DBG("DONE");
- goto unlock_ret;
-
-fail:
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- ctrl->power_on = false;
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-static void edp_ctrl_off_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, off_work);
- unsigned long time_left;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (!ctrl->power_on) {
- DBG("already off");
- goto unlock_ret;
- }
-
- reinit_completion(&ctrl->idle_comp);
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
-
- time_left = wait_for_completion_timeout(&ctrl->idle_comp,
- msecs_to_jiffies(500));
- if (!time_left)
- DBG("%s: idle pattern timedout\n", __func__);
-
- edp_state_ctrl(ctrl, 0);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- u8 value;
- int ret;
-
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret > 0) {
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- }
- }
-
- edp_ctrl_irq_enable(ctrl, 0);
-
- edp_ctrl_link_enable(ctrl, 0);
-
- edp_ctrl_phy_aux_enable(ctrl, 0);
-
- ctrl->power_on = false;
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
-{
- u32 isr1, isr2, mask1, mask2;
- u32 ack;
-
- DBG("");
- spin_lock(&ctrl->irq_lock);
- isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
- isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
-
- mask1 = isr1 & EDP_INTR_MASK1;
- mask2 = isr2 & EDP_INTR_MASK2;
-
- isr1 &= ~mask1; /* remove masks bit */
- isr2 &= ~mask2;
-
- DBG("isr=%x mask=%x isr2=%x mask2=%x",
- isr1, mask1, isr2, mask2);
-
- ack = isr1 & EDP_INTR_STATUS1;
- ack <<= 1; /* ack bits */
- ack |= mask1;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
-
- ack = isr2 & EDP_INTR_STATUS2;
- ack <<= 1; /* ack bits */
- ack |= mask2;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
- spin_unlock(&ctrl->irq_lock);
-
- if (isr1 & EDP_INTERRUPT_REG_1_HPD)
- DBG("edp_hpd");
-
- if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
- DBG("edp_video_ready");
-
- if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
- DBG("idle_patterns_sent");
- complete(&ctrl->idle_comp);
- }
-
- msm_edp_aux_irq(ctrl->aux, isr1);
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
-{
- if (on)
- queue_work(ctrl->workqueue, &ctrl->on_work);
- else
- queue_work(ctrl->workqueue, &ctrl->off_work);
-}
-
-int msm_edp_ctrl_init(struct msm_edp *edp)
-{
- struct edp_ctrl *ctrl = NULL;
- struct device *dev;
- int ret;
-
- if (!edp) {
- pr_err("%s: edp is NULL!\n", __func__);
- return -EINVAL;
- }
-
- dev = &edp->pdev->dev;
- ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- edp->ctrl = ctrl;
- ctrl->pdev = edp->pdev;
-
- ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- /* Get regulator, clock, gpio, pwm */
- ret = edp_regulator_init(ctrl);
- if (ret) {
- pr_err("%s:regulator init fail\n", __func__);
- return ret;
- }
- ret = edp_clk_init(ctrl);
- if (ret) {
- pr_err("%s:clk init fail\n", __func__);
- return ret;
- }
- ret = edp_gpio_config(ctrl);
- if (ret) {
- pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
- return ret;
- }
-
- /* Init aux and phy */
- ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
- if (!ctrl->aux || !ctrl->drm_aux) {
- pr_err("%s:failed to init aux\n", __func__);
- return -ENOMEM;
- }
-
- ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
- if (!ctrl->phy) {
- pr_err("%s:failed to init phy\n", __func__);
- ret = -ENOMEM;
- goto err_destory_aux;
- }
-
- spin_lock_init(&ctrl->irq_lock);
- mutex_init(&ctrl->dev_mutex);
- init_completion(&ctrl->idle_comp);
-
- /* setup workqueue */
- ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
- INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
- INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
-
- return 0;
-
-err_destory_aux:
- msm_edp_aux_destroy(dev, ctrl->aux);
- ctrl->aux = NULL;
- return ret;
-}
-
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
-{
- if (!ctrl)
- return;
-
- if (ctrl->workqueue) {
- destroy_workqueue(ctrl->workqueue);
- ctrl->workqueue = NULL;
- }
-
- if (ctrl->aux) {
- msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
- ctrl->aux = NULL;
- }
-
- kfree(ctrl->edid);
- ctrl->edid = NULL;
-
- mutex_destroy(&ctrl->dev_mutex);
-}
-
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
-{
- mutex_lock(&ctrl->dev_mutex);
- DBG("connect status = %d", ctrl->edp_connected);
- if (ctrl->edp_connected) {
- mutex_unlock(&ctrl->dev_mutex);
- return true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
- DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
- pr_err("%s: AUX channel is NOT ready\n", __func__);
- memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
- } else {
- ctrl->edp_connected = true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-
- DBG("exit: connect status=%d", ctrl->edp_connected);
-
- mutex_unlock(&ctrl->dev_mutex);
-
- return ctrl->edp_connected;
-}
-
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid)
-{
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->edid) {
- if (edid) {
- DBG("Just return edid buffer");
- *edid = ctrl->edid;
- }
- goto unlock_ret;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- /* Initialize link rate as panel max link rate */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
- if (!ctrl->edid) {
- pr_err("%s: edid read fail\n", __func__);
- goto disable_ret;
- }
-
- if (edid)
- *edid = ctrl->edid;
-
-disable_ret:
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return 0;
-}
-
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info)
-{
- u32 hstart_from_sync, vstart_from_sync;
- u32 data;
- int ret = 0;
-
- mutex_lock(&ctrl->dev_mutex);
- /*
- * Need to keep color depth, pixel rate and
- * interlaced information in ctrl context
- */
- ctrl->color_depth = info->bpc;
- ctrl->pixel_rate = mode->clock;
- ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
-
- /* Fill initial link config based on passed in timing */
- edp_fill_link_cfg(ctrl);
-
- if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
- pr_err("%s, fail to prepare enable ahb clk\n", __func__);
- ret = -EINVAL;
- goto unlock_ret;
- }
- edp_clock_synchrous(ctrl, 1);
-
- /* Configure eDP timing to HW */
- edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
- EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
- EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
-
- vstart_from_sync = mode->vtotal - mode->vsync_start;
- hstart_from_sync = mode->htotal - mode->hsync_start;
- edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
- EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
- EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
-
- data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
- mode->vsync_end - mode->vsync_start);
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
- mode->hsync_end - mode->hsync_start);
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
- edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
-
- edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
- EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
- EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
-
- edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return ret;
-}
-
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn)
-{
- const struct edp_pixel_clk_div *divs;
- u32 err = 1; /* 1% error tolerance */
- u32 clk_err;
- int i;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- divs = clk_divs[0];
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- divs = clk_divs[1];
- } else {
- pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
- return false;
- }
-
- for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
- clk_err = abs(divs[i].rate - pixel_rate);
- if ((divs[i].rate * err / 100) >= clk_err) {
- if (pm)
- *pm = divs[i].m;
- if (pn)
- *pn = divs[i].n;
- return true;
- }
- }
-
- DBG("pixel clock %d(kHz) not supported", pixel_rate);
-
- return false;
-}
-
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ec324352e862..cf24e68864ba 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -142,6 +142,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
/* HDCP needs physical address of hdmi register */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
config->mmio_name);
+ if (!res) {
+ ret = -EINVAL;
+ goto fail;
+ }
hdmi->mmio_phy_addr = res->start;
hdmi->qfprom_mmio = msm_ioremap(pdev, config->qfprom_mmio_name);
@@ -298,9 +302,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
drm_connector_attach_encoder(hdmi->connector, hdmi->encoder);
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (hdmi->irq < 0) {
- ret = hdmi->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
+ if (!hdmi->irq) {
+ ret = -EINVAL;
+ DRM_DEV_ERROR(dev->dev, "failed to get irq\n");
goto fail;
}
@@ -322,7 +326,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
}
priv->bridges[priv->num_bridges++] = hdmi->bridge;
- priv->connectors[priv->num_connectors++] = hdmi->connector;
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 10ebe2089cb6..97c24010c4d1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -15,6 +15,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge)
struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
msm_hdmi_hpd_disable(hdmi_bridge);
+ drm_bridge_remove(bridge);
}
static void msm_hdmi_power_on(struct drm_bridge *bridge)
@@ -349,6 +350,8 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
+ drm_bridge_add(bridge);
+
ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index affa95eb05fc..4a3dda23e3e0 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -232,6 +233,9 @@ static int msm_drm_uninit(struct device *dev)
drm_mode_config_cleanup(ddev);
+ for (i = 0; i < priv->num_bridges; i++)
+ drm_bridge_remove(priv->bridges[i]);
+
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
@@ -256,17 +260,6 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
-#define KMS_MDP4 4
-#define KMS_MDP5 5
-#define KMS_DPU 3
-
-static int get_mdp_ver(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
-
- return (int) (unsigned long) of_device_get_match_data(dev);
-}
-
#include <linux/of_address.h>
bool msm_use_mmu(struct drm_device *dev)
@@ -353,7 +346,6 @@ static int msm_init_vram(struct drm_device *dev)
static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
- struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
struct msm_kms *kms;
@@ -401,30 +393,18 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
msm_gem_shrinker_init(ddev);
- switch (get_mdp_ver(pdev)) {
- case KMS_MDP4:
- kms = mdp4_kms_init(ddev);
- priv->kms = kms;
- break;
- case KMS_MDP5:
- kms = mdp5_kms_init(ddev);
- break;
- case KMS_DPU:
- kms = dpu_kms_init(ddev);
- priv->kms = kms;
- break;
- default:
+ if (priv->kms_init) {
+ ret = priv->kms_init(ddev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
+ priv->kms = NULL;
+ goto err_msm_uninit;
+ }
+ kms = priv->kms;
+ } else {
/* valid only for the dummy headless case, where of_node=NULL */
WARN_ON(dev->of_node);
kms = NULL;
- break;
- }
-
- if (IS_ERR(kms)) {
- DRM_DEV_ERROR(dev, "failed to load kms\n");
- ret = PTR_ERR(kms);
- priv->kms = NULL;
- goto err_msm_uninit;
}
/* Enable normalization of plane zpos */
@@ -613,7 +593,7 @@ static int msm_ioctl_get_param(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
return -EINVAL;
gpu = priv->gpu;
@@ -622,7 +602,7 @@ static int msm_ioctl_get_param(struct drm_device *dev, void *data,
return -ENXIO;
return gpu->funcs->get_param(gpu, file->driver_priv,
- args->param, &args->value);
+ args->param, &args->value, &args->len);
}
static int msm_ioctl_set_param(struct drm_device *dev, void *data,
@@ -632,7 +612,7 @@ static int msm_ioctl_set_param(struct drm_device *dev, void *data,
struct drm_msm_param *args = data;
struct msm_gpu *gpu;
- if (args->pipe != MSM_PIPE_3D0)
+ if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
return -EINVAL;
gpu = priv->gpu;
@@ -641,7 +621,7 @@ static int msm_ioctl_set_param(struct drm_device *dev, void *data,
return -ENXIO;
return gpu->funcs->set_param(gpu, file->driver_priv,
- args->param, args->value);
+ args->param, args->value, args->len);
}
static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
@@ -722,6 +702,23 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
return msm_gem_get_iova(obj, ctx->aspace, iova);
}
+static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
+ struct drm_file *file, struct drm_gem_object *obj,
+ uint64_t iova)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+
+ if (!priv->gpu)
+ return -EINVAL;
+
+ /* Only supported if per-process address space is supported: */
+ if (priv->gpu->aspace == ctx->aspace)
+ return -EOPNOTSUPP;
+
+ return msm_gem_set_iova(obj, ctx->aspace, iova);
+}
+
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -736,6 +733,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
switch (args->info) {
case MSM_INFO_GET_OFFSET:
case MSM_INFO_GET_IOVA:
+ case MSM_INFO_SET_IOVA:
/* value returned as immediate, not pointer, so len==0: */
if (args->len)
return -EINVAL;
@@ -760,6 +758,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
case MSM_INFO_GET_IOVA:
ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
break;
+ case MSM_INFO_SET_IOVA:
+ ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
+ break;
case MSM_INFO_SET_NAME:
/* length check should leave room for terminating null: */
if (args->len >= sizeof(msm_obj->name)) {
@@ -973,50 +974,7 @@ static const struct drm_driver msm_driver = {
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
-static int __maybe_unused msm_runtime_suspend(struct device *dev)
-{
- struct msm_drm_private *priv = dev_get_drvdata(dev);
- struct msm_mdss *mdss = priv->mdss;
-
- DBG("");
-
- if (mdss && mdss->funcs)
- return mdss->funcs->disable(mdss);
-
- return 0;
-}
-
-static int __maybe_unused msm_runtime_resume(struct device *dev)
-{
- struct msm_drm_private *priv = dev_get_drvdata(dev);
- struct msm_mdss *mdss = priv->mdss;
-
- DBG("");
-
- if (mdss && mdss->funcs)
- return mdss->funcs->enable(mdss);
-
- return 0;
-}
-
-static int __maybe_unused msm_pm_suspend(struct device *dev)
-{
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return msm_runtime_suspend(dev);
-}
-
-static int __maybe_unused msm_pm_resume(struct device *dev)
-{
- if (pm_runtime_suspended(dev))
- return 0;
-
- return msm_runtime_resume(dev);
-}
-
-static int __maybe_unused msm_pm_prepare(struct device *dev)
+int msm_pm_prepare(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
@@ -1027,7 +985,7 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
return drm_mode_config_helper_suspend(ddev);
}
-static void __maybe_unused msm_pm_complete(struct device *dev)
+void msm_pm_complete(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
@@ -1039,8 +997,6 @@ static void __maybe_unused msm_pm_complete(struct device *dev)
}
static const struct dev_pm_ops msm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
- SET_RUNTIME_PM_OPS(msm_runtime_suspend, msm_runtime_resume, NULL)
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
@@ -1055,25 +1011,11 @@ static const struct dev_pm_ops msm_pm_ops = {
* is no external component that we need to add since LVDS is within MDP4
* itself.
*/
-static int add_components_mdp(struct device *mdp_dev,
+static int add_components_mdp(struct device *master_dev,
struct component_match **matchptr)
{
- struct device_node *np = mdp_dev->of_node;
+ struct device_node *np = master_dev->of_node;
struct device_node *ep_node;
- struct device *master_dev;
-
- /*
- * on MDP4 based platforms, the MDP platform device is the component
- * master that adds other display interface components to itself.
- *
- * on MDP5 based platforms, the MDSS platform device is the component
- * master that adds MDP5 and other display interface components to
- * itself.
- */
- if (of_device_is_compatible(np, "qcom,mdp4"))
- master_dev = mdp_dev;
- else
- master_dev = mdp_dev->parent;
for_each_endpoint_of_node(np, ep_node) {
struct device_node *intf;
@@ -1082,7 +1024,7 @@ static int add_components_mdp(struct device *mdp_dev,
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
- DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
+ DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
@@ -1114,60 +1056,6 @@ static int add_components_mdp(struct device *mdp_dev,
return 0;
}
-static int find_mdp_node(struct device *dev, void *data)
-{
- return of_match_node(dpu_dt_match, dev->of_node) ||
- of_match_node(mdp5_dt_match, dev->of_node);
-}
-
-static int add_display_components(struct platform_device *pdev,
- struct component_match **matchptr)
-{
- struct device *mdp_dev;
- struct device *dev = &pdev->dev;
- int ret;
-
- /*
- * MDP5/DPU based devices don't have a flat hierarchy. There is a top
- * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
- * Populate the children devices, find the MDP5/DPU node, and then add
- * the interfaces to our components list.
- */
- switch (get_mdp_ver(pdev)) {
- case KMS_MDP5:
- case KMS_DPU:
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to populate children devices\n");
- return ret;
- }
-
- mdp_dev = device_find_child(dev, NULL, find_mdp_node);
- if (!mdp_dev) {
- DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
- of_platform_depopulate(dev);
- return -ENODEV;
- }
-
- put_device(mdp_dev);
-
- /* add the MDP component itself */
- drm_of_component_match_add(dev, matchptr, component_compare_of,
- mdp_dev->of_node);
- break;
- case KMS_MDP4:
- /* MDP4 */
- mdp_dev = dev;
- break;
- }
-
- ret = add_components_mdp(mdp_dev, matchptr);
- if (ret)
- of_platform_depopulate(dev);
-
- return ret;
-}
-
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -1208,90 +1096,68 @@ static void msm_drm_unbind(struct device *dev)
msm_drm_uninit(dev);
}
-static const struct component_master_ops msm_drm_ops = {
+const struct component_master_ops msm_drm_ops = {
.bind = msm_drm_bind,
.unbind = msm_drm_unbind,
};
-/*
- * Platform driver:
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
+int msm_drv_probe(struct device *master_dev,
+ int (*kms_init)(struct drm_device *dev))
{
- struct component_match *match = NULL;
struct msm_drm_private *priv;
+ struct component_match *match = NULL;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
+ priv->kms_init = kms_init;
+ dev_set_drvdata(master_dev, priv);
- switch (get_mdp_ver(pdev)) {
- case KMS_MDP5:
- ret = mdp5_mdss_init(pdev);
- break;
- case KMS_DPU:
- ret = dpu_mdss_init(pdev);
- break;
- default:
- ret = 0;
- break;
- }
- if (ret) {
- platform_set_drvdata(pdev, NULL);
- return ret;
- }
-
- if (get_mdp_ver(pdev)) {
- ret = add_display_components(pdev, &match);
+ /* Add mdp components if we have KMS. */
+ if (kms_init) {
+ ret = add_components_mdp(master_dev, &match);
if (ret)
- goto fail;
+ return ret;
}
- ret = add_gpu_components(&pdev->dev, &match);
+ ret = add_gpu_components(master_dev, &match);
if (ret)
- goto fail;
+ return ret;
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
*/
- ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ ret = dma_set_mask_and_coherent(master_dev, ~0);
if (ret)
- goto fail;
+ return ret;
- ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
if (ret)
- goto fail;
+ return ret;
return 0;
+}
-fail:
- of_platform_depopulate(&pdev->dev);
-
- if (priv->mdss && priv->mdss->funcs)
- priv->mdss->funcs->destroy(priv->mdss);
+/*
+ * Platform driver:
+ * Used only for headlesss GPU instances
+ */
- return ret;
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ return msm_drv_probe(&pdev->dev, NULL);
}
static int msm_pdev_remove(struct platform_device *pdev)
{
- struct msm_drm_private *priv = platform_get_drvdata(pdev);
- struct msm_mdss *mdss = priv->mdss;
-
component_master_del(&pdev->dev, &msm_drm_ops);
- of_platform_depopulate(&pdev->dev);
-
- if (mdss && mdss->funcs)
- mdss->funcs->destroy(mdss);
return 0;
}
-static void msm_pdev_shutdown(struct platform_device *pdev)
+void msm_drv_shutdown(struct platform_device *pdev)
{
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
@@ -1302,28 +1168,12 @@ static void msm_pdev_shutdown(struct platform_device *pdev)
drm_atomic_helper_shutdown(drm);
}
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
- { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
- { .compatible = "qcom,msm8998-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,qcm2290-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sc8180x-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU },
- { .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU },
- {}
-};
-MODULE_DEVICE_TABLE(of, dt_match);
-
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
- .shutdown = msm_pdev_shutdown,
+ .shutdown = msm_drv_shutdown,
.driver = {
.name = "msm",
- .of_match_table = dt_match,
.pm = &msm_pm_ops,
},
};
@@ -1340,6 +1190,8 @@ static int __init msm_drm_register(void)
msm_hdmi_register();
msm_dp_register();
adreno_register();
+ msm_mdp4_register();
+ msm_mdss_register();
return platform_driver_register(&msm_platform_driver);
}
@@ -1347,6 +1199,8 @@ static void __exit msm_drm_unregister(void)
{
DBG("fini");
platform_driver_unregister(&msm_platform_driver);
+ msm_mdss_unregister();
+ msm_mdp4_unregister();
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d661debb50f1..08388d742d65 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -30,6 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
@@ -46,20 +47,10 @@ struct msm_gem_vma;
struct msm_disp_state;
#define MAX_CRTCS 8
-#define MAX_PLANES 20
-#define MAX_ENCODERS 8
#define MAX_BRIDGES 8
-#define MAX_CONNECTORS 8
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-enum msm_mdp_plane_property {
- PLANE_PROP_ZPOS,
- PLANE_PROP_ALPHA,
- PLANE_PROP_PREMULTIPLIED,
- PLANE_PROP_MAX_NUM
-};
-
enum msm_dp_controller {
MSM_DP_CONTROLLER_0,
MSM_DP_CONTROLLER_1,
@@ -74,14 +65,10 @@ enum msm_dp_controller {
* enum msm_display_caps - features/capabilities supported by displays
* @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
* @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
- * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
- * @MSM_DISPLAY_CAP_EDID: EDID supported
*/
enum msm_display_caps {
MSM_DISPLAY_CAP_VID_MODE = BIT(0),
MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
- MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
- MSM_DISPLAY_CAP_EDID = BIT(3),
};
/**
@@ -101,12 +88,15 @@ enum msm_event_wait {
* @num_lm: number of layer mixers used
* @num_enc: number of compression encoder blocks used
* @num_intf: number of interfaces the panel is mounted on
+ * @num_dspp: number of dspp blocks used
+ * @num_dsc: number of Display Stream Compression (DSC) blocks used
*/
struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
u32 num_dspp;
+ u32 num_dsc;
};
/* Commit/Event thread specific structure */
@@ -116,18 +106,21 @@ struct msm_drm_thread {
struct kthread_worker *worker;
};
+/* DSC config */
+struct msm_display_dsc_config {
+ struct drm_dsc_config *drm;
+};
+
struct msm_drm_private {
struct drm_device *dev;
struct msm_kms *kms;
+ int (*kms_init)(struct drm_device *dev);
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5/DPU only) */
- struct msm_mdss *mdss;
-
/* possibly this should be in the kms component, but it is
* shared by both mdp4 and mdp5..
*/
@@ -184,26 +177,14 @@ struct msm_drm_private {
struct workqueue_struct *wq;
- unsigned int num_planes;
- struct drm_plane *planes[MAX_PLANES];
-
unsigned int num_crtcs;
struct drm_crtc *crtcs[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
- unsigned int num_encoders;
- struct drm_encoder *encoders[MAX_ENCODERS];
-
unsigned int num_bridges;
struct drm_bridge *bridges[MAX_BRIDGES];
- unsigned int num_connectors;
- struct drm_connector *connectors[MAX_CONNECTORS];
-
- /* Properties */
- struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
-
/* VRAM carveout, used when no IOMMU: */
struct {
unsigned long size;
@@ -250,29 +231,6 @@ void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
-int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages,
- u64 range_start, u64 range_end);
-void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma);
-void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma);
-int msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int npages);
-void msm_gem_close_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma);
-
-
-struct msm_gem_address_space *
-msm_gem_address_space_get(struct msm_gem_address_space *aspace);
-
-void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
-
-struct msm_gem_address_space *
-msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
- u64 va_start, u64 size);
-
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
@@ -313,10 +271,20 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
void msm_fbdev_free(struct drm_device *dev);
struct hdmi;
+#ifdef CONFIG_DRM_MSM_HDMI
int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
+#else
+static inline int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ return -EINVAL;
+}
+static inline void __init msm_hdmi_register(void) {}
+static inline void __exit msm_hdmi_unregister(void) {}
+#endif
struct msm_dsi;
#ifdef CONFIG_DRM_MSM_DSI
@@ -330,6 +298,7 @@ void msm_dsi_snapshot(struct msm_disp_state *disp_state, struct msm_dsi *msm_dsi
bool msm_dsi_is_cmd_mode(struct msm_dsi *msm_dsi);
bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
+struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@@ -358,6 +327,11 @@ static inline bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return false;
}
+
+static inline struct msm_display_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_DRM_MSM_DP
@@ -365,20 +339,11 @@ int __init msm_dp_register(void);
void __exit msm_dp_unregister(void);
int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
-int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
-int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
-int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
-void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode);
-
-struct drm_bridge *msm_dp_bridge_init(struct msm_dp *dp_display,
- struct drm_device *dev,
- struct drm_encoder *encoder);
void msm_dp_irq_postinstall(struct msm_dp *dp_display);
void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp_display);
void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
+bool msm_dp_wide_bus_available(const struct msm_dp *dp_display);
#else
static inline int __init msm_dp_register(void)
@@ -394,27 +359,6 @@ static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
{
return -EINVAL;
}
-static inline int msm_dp_display_enable(struct msm_dp *dp,
- struct drm_encoder *encoder)
-{
- return -EINVAL;
-}
-static inline int msm_dp_display_disable(struct msm_dp *dp,
- struct drm_encoder *encoder)
-{
- return -EINVAL;
-}
-static inline int msm_dp_display_pre_disable(struct msm_dp *dp,
- struct drm_encoder *encoder)
-{
- return -EINVAL;
-}
-static inline void msm_dp_display_mode_set(struct msm_dp *dp,
- struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
-}
static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
{
@@ -429,12 +373,44 @@ static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
{
}
+static inline bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
+{
+ return false;
+}
+
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDP4
+void msm_mdp4_register(void);
+void msm_mdp4_unregister(void);
+#else
+static inline void msm_mdp4_register(void) {}
+static inline void msm_mdp4_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDP5
+void msm_mdp_register(void);
+void msm_mdp_unregister(void);
+#else
+static inline void msm_mdp_register(void) {}
+static inline void msm_mdp_unregister(void) {}
#endif
-void __init msm_mdp_register(void);
-void __exit msm_mdp_unregister(void);
-void __init msm_dpu_register(void);
-void __exit msm_dpu_unregister(void);
+#ifdef CONFIG_DRM_MSM_DPU
+void msm_dpu_register(void);
+void msm_dpu_unregister(void);
+#else
+static inline void msm_dpu_register(void) {}
+static inline void msm_dpu_unregister(void) {}
+#endif
+
+#ifdef CONFIG_DRM_MSM_MDSS
+void msm_mdss_register(void);
+void msm_mdss_unregister(void);
+#else
+static inline void msm_mdss_register(void) {}
+static inline void msm_mdss_unregister(void) {}
+#endif
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
@@ -534,4 +510,16 @@ static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
return clamp(remaining_jiffies, 0LL, (s64)INT_MAX);
}
+/* Driver helpers */
+
+extern const struct component_master_ops msm_drm_ops;
+
+int msm_pm_prepare(struct device *dev);
+void msm_pm_complete(struct device *dev);
+
+int msm_drv_probe(struct device *dev,
+ int (*kms_init)(struct drm_device *dev));
+void msm_drv_shutdown(struct platform_device *pdev);
+
+
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 7137492fe78e..362775ae50af 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -21,6 +21,10 @@ struct msm_framebuffer {
/* Count of # of attached planes which need dirtyfb: */
refcount_t dirtyfb;
+
+ /* Framebuffer per-plane address, if pinned, else zero: */
+ uint64_t iova[DRM_FORMAT_MAX_PLANES];
+ atomic_t prepare_count;
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
@@ -76,14 +80,16 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
- uint64_t iova;
if (needs_dirtyfb)
refcount_inc(&msm_fb->dirtyfb);
+ atomic_inc(&msm_fb->prepare_count);
+
for (i = 0; i < n; i++) {
- ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
- drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
+ drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+ fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
}
@@ -103,14 +109,16 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
for (i = 0; i < n; i++)
msm_gem_unpin_iova(fb->obj[i], aspace);
+
+ if (!atomic_dec_return(&msm_fb->prepare_count))
+ memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- if (!fb->obj[plane])
- return 0;
- return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->iova[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index f2cece542c3f..3df255402a33 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -15,6 +15,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
const char *name)
{
struct msm_fence_context *fctx;
+ static int index = 0;
fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -23,6 +24,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
fctx->dev = dev;
strncpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
+ fctx->index = index++;
fctx->fenceptr = fenceptr;
spin_lock_init(&fctx->spinlock);
@@ -34,7 +36,7 @@ void msm_fence_context_free(struct msm_fence_context *fctx)
kfree(fctx);
}
-static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
{
/*
* Note: Check completed_fence first, as fenceptr is in a write-combine
@@ -76,7 +78,7 @@ static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
- return fence_completed(f->fctx, f->base.seqno);
+ return msm_fence_completed(f->fctx, f->base.seqno);
}
static const struct dma_fence_ops msm_fence_ops = {
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 17ee3822b423..7f1798c54cd1 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -21,6 +21,8 @@ struct msm_fence_context {
char name[32];
/** context: see dma_fence_context_alloc() */
unsigned context;
+ /** index: similar to context, but local to msm_fence_context's */
+ unsigned index;
/**
* last_fence:
@@ -56,6 +58,7 @@ struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
volatile uint32_t *fenceptr, const char *name);
void msm_fence_context_free(struct msm_fence_context *fctx);
+bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8f492656c9ad..97d5b4d8b9b0 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -376,39 +376,40 @@ put_iova_vmas(struct drm_gem_object *obj)
}
}
-static int get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova,
+static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace,
u64 range_start, u64 range_end)
{
struct msm_gem_vma *vma;
- int ret = 0;
GEM_WARN_ON(!msm_gem_is_locked(obj));
vma = lookup_vma(obj, aspace);
if (!vma) {
+ int ret;
+
vma = add_vma(obj, aspace);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return vma;
- ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
+ ret = msm_gem_init_vma(aspace, vma, obj->size,
range_start, range_end);
if (ret) {
del_vma(vma);
- return ret;
+ return ERR_PTR(ret);
}
+ } else {
+ GEM_WARN_ON(vma->iova < range_start);
+ GEM_WARN_ON((vma->iova + obj->size) > range_end);
}
- *iova = vma->iova;
- return 0;
+ return vma;
}
-static int msm_gem_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
struct page **pages;
int ret, prot = IOMMU_READ;
@@ -426,16 +427,11 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
- vma = lookup_vma(obj, aspace);
- if (GEM_WARN_ON(!vma))
- return -EINVAL;
-
pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- ret = msm_gem_map_vma(aspace, vma, prot,
- msm_obj->sgt, obj->size >> PAGE_SHIFT);
+ ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
if (!ret)
msm_obj->pin_count++;
@@ -443,23 +439,42 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
return ret;
}
+void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ GEM_WARN_ON(!msm_gem_is_locked(obj));
+
+ msm_gem_unpin_vma(vma);
+
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+
+ update_inactive(msm_obj);
+}
+
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ return get_vma_locked(obj, aspace, 0, U64_MAX);
+}
+
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
- u64 local;
+ struct msm_gem_vma *vma;
int ret;
GEM_WARN_ON(!msm_gem_is_locked(obj));
- ret = get_iova_locked(obj, aspace, &local,
- range_start, range_end);
-
- if (!ret)
- ret = msm_gem_pin_iova(obj, aspace);
+ vma = get_vma_locked(obj, aspace, range_start, range_end);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret)
- *iova = local;
+ *iova = vma->iova;
return ret;
}
@@ -481,12 +496,6 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
return ret;
}
-int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
-{
- return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
-}
-
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
@@ -501,52 +510,67 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
- int ret;
+ struct msm_gem_vma *vma;
+ int ret = 0;
msm_gem_lock(obj);
- ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
+ vma = get_vma_locked(obj, aspace, 0, U64_MAX);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else {
+ *iova = vma->iova;
+ }
msm_gem_unlock(obj);
return ret;
}
-/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
- */
-uint64_t msm_gem_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+static int clear_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_vma *vma;
+ struct msm_gem_vma *vma = lookup_vma(obj, aspace);
- msm_gem_lock(obj);
- vma = lookup_vma(obj, aspace);
- msm_gem_unlock(obj);
- GEM_WARN_ON(!vma);
+ if (!vma)
+ return 0;
+
+ if (msm_gem_vma_inuse(vma))
+ return -EBUSY;
- return vma ? vma->iova : 0;
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
+ del_vma(vma);
+
+ return 0;
}
/*
- * Locked variant of msm_gem_unpin_iova()
+ * Get the requested iova but don't pin it. Fails if the requested iova is
+ * not available. Doesn't need a put because iovas are currently valid for
+ * the life of the object.
+ *
+ * Setting an iova of zero will clear the vma.
*/
-void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
-
- GEM_WARN_ON(!msm_gem_is_locked(obj));
-
- vma = lookup_vma(obj, aspace);
-
- if (!GEM_WARN_ON(!vma)) {
- msm_gem_unmap_vma(aspace, vma);
-
- msm_obj->pin_count--;
- GEM_WARN_ON(msm_obj->pin_count < 0);
+ int ret = 0;
- update_inactive(msm_obj);
+ msm_gem_lock(obj);
+ if (!iova) {
+ ret = clear_iova(obj, aspace);
+ } else {
+ struct msm_gem_vma *vma;
+ vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ } else if (GEM_WARN_ON(vma->iova != iova)) {
+ clear_iova(obj, aspace);
+ ret = -EBUSY;
+ }
}
+ msm_gem_unlock(obj);
+
+ return ret;
}
/*
@@ -557,8 +581,13 @@ void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
+ struct msm_gem_vma *vma;
+
msm_gem_lock(obj);
- msm_gem_unpin_iova_locked(obj, aspace);
+ vma = lookup_vma(obj, aspace);
+ if (!GEM_WARN_ON(!vma)) {
+ msm_gem_unpin_vma_locked(obj, vma);
+ }
msm_gem_unlock(obj);
}
@@ -939,7 +968,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
name, comm ? ":" : "", comm ? comm : "",
vma->aspace, vma->iova,
vma->mapped ? "mapped" : "unmapped",
- vma->inuse);
+ msm_gem_vma_inuse(vma));
kfree(comm);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index af612add5264..c75d3b879a53 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -38,8 +38,25 @@ struct msm_gem_address_space {
/* @faults: the number of GPU hangs associated with this address space */
int faults;
+
+ /** @va_start: lowest possible address to allocate */
+ uint64_t va_start;
+
+ /** @va_size: the size of the address space (in bytes) */
+ uint64_t va_size;
};
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size);
+
+struct msm_fence_context;
+
struct msm_gem_vma {
struct drm_mm_node node;
uint64_t iova;
@@ -47,8 +64,25 @@ struct msm_gem_vma {
struct list_head list; /* node in msm_gem_object::vmas */
bool mapped;
int inuse;
+ uint32_t fence_mask;
+ uint32_t fence[MSM_GPU_MAX_RINGS];
+ struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
};
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int size,
+ u64 range_start, u64 range_end);
+bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
+void msm_gem_unpin_vma(struct msm_gem_vma *vma);
+void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int prot,
+ struct sg_table *sgt, int size);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -110,19 +144,19 @@ struct msm_gem_object {
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma);
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_set_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t iova);
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
-uint64_t msm_gem_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
-void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
@@ -329,7 +363,6 @@ struct msm_gem_submit {
bool valid; /* true if no cmdstream patching needed */
bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
- struct msm_file_private *ctx;
unsigned int nr_cmds;
unsigned int nr_bos;
u32 ident; /* A "identifier" for the submit for logging */
@@ -343,12 +376,18 @@ struct msm_gem_submit {
struct drm_msm_gem_submit_reloc *relocs;
} *cmd; /* array of size nr_cmds */
struct {
+/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
+#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
+#define BO_LOCKED 0x4000 /* obj lock is held */
+#define BO_ACTIVE 0x2000 /* active refcnt is held */
+#define BO_PINNED 0x1000 /* obj is pinned and on active list */
uint32_t flags;
union {
struct msm_gem_object *obj;
uint32_t handle;
};
uint64_t iova;
+ struct msm_gem_vma *vma;
} bos[];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index e8f1b7a2ca9c..94ab705e9b8a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -17,7 +17,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 8d1eef914ba8..80975229b4de 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -21,12 +21,6 @@
* Cmdstream submission:
*/
-/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
-#define BO_LOCKED 0x4000 /* obj lock is held */
-#define BO_ACTIVE 0x2000 /* active refcnt is held */
-#define BO_PINNED 0x1000 /* obj is pinned and on active list */
-
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
@@ -231,16 +225,21 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
struct drm_gem_object *obj = &submit->bos[i].obj->base;
unsigned flags = submit->bos[i].flags & cleanup_flags;
+ /*
+ * Clear flags bit before dropping lock, so that the msm_job_run()
+ * path isn't racing with submit_cleanup() (ie. the read/modify/
+ * write is protected by the obj lock in all paths)
+ */
+ submit->bos[i].flags &= ~cleanup_flags;
+
if (flags & BO_PINNED)
- msm_gem_unpin_iova_locked(obj, submit->aspace);
+ msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
if (flags & BO_ACTIVE)
msm_gem_active_put(obj);
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
-
- submit->bos[i].flags &= ~cleanup_flags;
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
@@ -363,21 +362,26 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
- uint64_t iova;
+ struct msm_gem_vma *vma;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_and_pin_iova_locked(obj,
- submit->aspace, &iova);
+ vma = msm_gem_get_vma_locked(obj, submit->aspace);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ break;
+ }
+ ret = msm_gem_pin_vma_locked(obj, vma);
if (ret)
break;
submit->bos[i].flags |= BO_PINNED;
+ submit->bos[i].vma = vma;
- if (iova == submit->bos[i].iova) {
+ if (vma->iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
- submit->bos[i].iova = iova;
+ submit->bos[i].iova = vma->iova;
/* iova changed, so address in cmdstream is not valid: */
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
@@ -730,6 +734,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
+ if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
+ DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
+ return -EPERM;
+ }
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index f914ddbaea89..3c1dc9241831 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -5,6 +5,7 @@
*/
#include "msm_drv.h"
+#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -37,14 +38,31 @@ msm_gem_address_space_get(struct msm_gem_address_space *aspace)
return aspace;
}
+bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
+{
+ if (vma->inuse > 0)
+ return true;
+
+ while (vma->fence_mask) {
+ unsigned idx = ffs(vma->fence_mask) - 1;
+
+ if (!msm_fence_completed(vma->fctx[idx], vma->fence[idx]))
+ return true;
+
+ vma->fence_mask &= ~BIT(idx);
+ }
+
+ return false;
+}
+
/* Actually unmap memory for the vma */
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- unsigned size = vma->node.size << PAGE_SHIFT;
+ unsigned size = vma->node.size;
/* Print a message if we try to purge a vma in use */
- if (WARN_ON(vma->inuse > 0))
+ if (GEM_WARN_ON(msm_gem_vma_inuse(vma)))
return;
/* Don't do anything if the memory isn't mapped */
@@ -58,22 +76,32 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
}
/* Remove reference counts for the mapping */
-void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma)
+void msm_gem_unpin_vma(struct msm_gem_vma *vma)
{
- if (!WARN_ON(!vma->iova))
+ if (GEM_WARN_ON(!vma->inuse))
+ return;
+ if (!GEM_WARN_ON(!vma->iova))
vma->inuse--;
}
+/* Replace pin reference with fence: */
+void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
+{
+ vma->fctx[fctx->index] = fctx;
+ vma->fence[fctx->index] = fctx->last_fence;
+ vma->fence_mask |= BIT(fctx->index);
+ msm_gem_unpin_vma(vma);
+}
+
+/* Map and pin vma: */
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int prot,
- struct sg_table *sgt, int npages)
+ struct sg_table *sgt, int size)
{
- unsigned size = npages << PAGE_SHIFT;
int ret = 0;
- if (WARN_ON(!vma->iova))
+ if (GEM_WARN_ON(!vma->iova))
return -EINVAL;
/* Increase the usage counter */
@@ -100,7 +128,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma)
{
- if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped))
return;
spin_lock(&aspace->lock);
@@ -115,23 +143,24 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages,
+ struct msm_gem_vma *vma, int size,
u64 range_start, u64 range_end)
{
int ret;
- if (WARN_ON(vma->iova))
+ if (GEM_WARN_ON(vma->iova))
return -EBUSY;
spin_lock(&aspace->lock);
- ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
- 0, range_start, range_end, 0);
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
+ size, PAGE_SIZE, 0,
+ range_start, range_end, 0);
spin_unlock(&aspace->lock);
if (ret)
return ret;
- vma->iova = vma->node.start << PAGE_SHIFT;
+ vma->iova = vma->node.start;
vma->mapped = false;
kref_get(&aspace->kref);
@@ -155,8 +184,10 @@ msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = mmu;
+ aspace->va_start = va_start;
+ aspace->va_size = size;
- drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
+ drm_mm_init(&aspace->mm, va_start, size);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index faf0c242874e..eb8a6663f309 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -351,6 +351,28 @@ find_submit(struct msm_ringbuffer *ring, uint32_t fence)
static void retire_submits(struct msm_gpu *gpu);
+static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
+{
+ struct msm_file_private *ctx = submit->queue->ctx;
+ struct task_struct *task;
+
+ /* Note that kstrdup will return NULL if argument is NULL: */
+ *comm = kstrdup(ctx->comm, GFP_KERNEL);
+ *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
+ if (!task)
+ return;
+
+ if (!*comm)
+ *comm = kstrdup(task->comm, GFP_KERNEL);
+
+ if (!*cmd)
+ *cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+
+ put_task_struct(task);
+}
+
static void recover_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
@@ -367,18 +389,12 @@ static void recover_worker(struct kthread_work *work)
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
- struct task_struct *task;
-
/* Increment the fault counts */
submit->queue->faults++;
- submit->aspace->faults++;
+ if (submit->aspace)
+ submit->aspace->faults++;
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
if (comm && cmd) {
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
@@ -467,14 +483,7 @@ static void fault_worker(struct kthread_work *work)
goto resume_smmu;
if (submit) {
- struct task_struct *task;
-
- task = get_pid_task(submit->pid, PIDTYPE_PID);
- if (task) {
- comm = kstrdup(task->comm, GFP_KERNEL);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
- put_task_struct(task);
- }
+ get_comm_cmdline(submit, &comm, &cmd);
/*
* When we get GPU iova faults, we can get 1000s of them,
@@ -515,7 +524,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence_before(fence, ring->seqno)) {
+ } else if (fence_before(fence, ring->fctx->last_fence)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -523,13 +532,13 @@ static void hangcheck_handler(struct timer_list *t)
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, ring->seqno);
+ gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (fence_after(ring->seqno, ring->hangcheck_fence))
+ if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -746,7 +755,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gpu_hw_init(gpu);
- submit->seqno = ++ring->seqno;
+ submit->seqno = submit->hw_fence->seqno;
msm_rd_dump_submit(priv->rd, submit, NULL);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 02419f2ca2bc..6def00883046 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -9,6 +9,7 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/clk.h>
+#include <linux/devfreq.h>
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
@@ -21,6 +22,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
struct msm_gpu_state;
+struct msm_file_private;
struct msm_gpu_config {
const char *ioname;
@@ -43,9 +45,9 @@ struct msm_gpu_config {
*/
struct msm_gpu_funcs {
int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t *value);
+ uint32_t param, uint64_t *value, uint32_t *len);
int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
- uint32_t param, uint64_t value);
+ uint32_t param, uint64_t value, uint32_t len);
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
@@ -62,7 +64,7 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
- unsigned long (*gpu_busy)(struct msm_gpu *gpu);
+ u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
@@ -106,11 +108,8 @@ struct msm_gpu_devfreq {
struct dev_pm_qos_request boost_freq;
/**
- * busy_cycles:
- *
- * Used by implementation of gpu->gpu_busy() to track the last
- * busy counter value, for calculating elapsed busy cycles since
- * last sampling period.
+ * busy_cycles: Last busy counter value, for calculating elapsed busy
+ * cycles since last sampling period.
*/
u64 busy_cycles;
@@ -120,6 +119,8 @@ struct msm_gpu_devfreq {
/** idle_time: Time of last transition to idle: */
ktime_t idle_time;
+ struct devfreq_dev_status average_status;
+
/**
* idle_work:
*
@@ -290,7 +291,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (fence_after(ring->seqno, ring->memptrs->fence))
+ if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
return true;
}
@@ -354,6 +355,12 @@ struct msm_file_private {
*/
int sysprof;
+ /** comm: Overridden task comm, see MSM_PARAM_COMM */
+ char *comm;
+
+ /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
+ char *cmdline;
+
/**
* entities:
*
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 12641616acd3..d2539ca78c29 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -9,6 +9,7 @@
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
+#include <linux/math64.h>
#include <linux/units.h>
/*
@@ -49,18 +50,95 @@ static unsigned long get_freq(struct msm_gpu *gpu)
return clk_get_rate(gpu->core_clk);
}
-static int msm_devfreq_get_dev_status(struct device *dev,
+static void get_raw_dev_status(struct msm_gpu *gpu,
struct devfreq_dev_status *status)
{
- struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ u64 busy_cycles, busy_time;
+ unsigned long sample_rate;
ktime_t time;
status->current_frequency = get_freq(gpu);
- status->busy_time = gpu->funcs->gpu_busy(gpu);
-
+ busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
time = ktime_get();
- status->total_time = ktime_us_delta(time, gpu->devfreq.time);
- gpu->devfreq.time = time;
+
+ busy_time = busy_cycles - df->busy_cycles;
+ status->total_time = ktime_us_delta(time, df->time);
+
+ df->busy_cycles = busy_cycles;
+ df->time = time;
+
+ busy_time *= USEC_PER_SEC;
+ do_div(busy_time, sample_rate);
+ if (WARN_ON(busy_time > ~0LU))
+ busy_time = ~0LU;
+
+ status->busy_time = busy_time;
+}
+
+static void update_average_dev_status(struct msm_gpu *gpu,
+ const struct devfreq_dev_status *raw)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ const u32 polling_ms = df->devfreq->profile->polling_ms;
+ const u32 max_history_ms = polling_ms * 11 / 10;
+ struct devfreq_dev_status *avg = &df->average_status;
+ u64 avg_freq;
+
+ /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle.
+ * When we enforce the constraint on idle, it calls get_dev_status
+ * which would normally reset the stats. When we remove the
+ * constraint on active, it calls get_dev_status again where busy_time
+ * would be 0.
+ *
+ * To remedy this, we always return the average load over the past
+ * polling_ms.
+ */
+
+ /* raw is longer than polling_ms or avg has no history */
+ if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms ||
+ !avg->total_time) {
+ *avg = *raw;
+ return;
+ }
+
+ /* Truncate the oldest history first.
+ *
+ * Because we keep the history with a single devfreq_dev_status,
+ * rather than a list of devfreq_dev_status, we have to assume freq
+ * and load are the same over avg->total_time. We can scale down
+ * avg->busy_time and avg->total_time by the same factor to drop
+ * history.
+ */
+ if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >=
+ max_history_ms) {
+ const u32 new_total_time = polling_ms * USEC_PER_MSEC -
+ raw->total_time;
+ avg->busy_time = div_u64(
+ mul_u32_u32(avg->busy_time, new_total_time),
+ avg->total_time);
+ avg->total_time = new_total_time;
+ }
+
+ /* compute the average freq over avg->total_time + raw->total_time */
+ avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time);
+ avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time);
+ do_div(avg_freq, avg->total_time + raw->total_time);
+
+ avg->current_frequency = avg_freq;
+ avg->busy_time += raw->busy_time;
+ avg->total_time += raw->total_time;
+}
+
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
+ struct devfreq_dev_status raw;
+
+ get_raw_dev_status(gpu, &raw);
+ update_average_dev_status(gpu, &raw);
+ *status = gpu->devfreq.average_status;
return 0;
}
@@ -224,7 +302,6 @@ void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
void msm_devfreq_active(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
- struct devfreq_dev_status status;
unsigned int idle_time;
if (!has_devfreq(gpu))
@@ -248,12 +325,6 @@ void msm_devfreq_active(struct msm_gpu *gpu)
dev_pm_qos_update_request(&df->idle_freq,
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
-
- /*
- * Reset the polling interval so we aren't inconsistent
- * about freq vs busy/total cycles
- */
- msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 2a4f0526cb98..ab25fff271f9 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -194,27 +194,6 @@ static inline void msm_kms_destroy(struct msm_kms *kms)
msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
}
-struct msm_kms *mdp4_kms_init(struct drm_device *dev);
-struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-struct msm_kms *dpu_kms_init(struct drm_device *dev);
-
-extern const struct of_device_id dpu_dt_match[];
-extern const struct of_device_id mdp5_dt_match[];
-
-struct msm_mdss_funcs {
- int (*enable)(struct msm_mdss *mdss);
- int (*disable)(struct msm_mdss *mdss);
- void (*destroy)(struct msm_mdss *mdss);
-};
-
-struct msm_mdss {
- struct device *dev;
- const struct msm_mdss_funcs *funcs;
-};
-
-int mdp5_mdss_init(struct platform_device *dev);
-int dpu_mdss_init(struct platform_device *dev);
-
#define for_each_crtc_mask(dev, crtc, crtc_mask) \
drm_for_each_crtc(crtc, dev) \
for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
new file mode 100644
index 000000000000..0454a571adf7
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -0,0 +1,415 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdesc.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+
+/* for DPU_HW_* defines */
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+#define HW_REV 0x0
+#define HW_INTR_STATUS 0x0010
+
+#define UBWC_STATIC 0x144
+#define UBWC_CTRL_2 0x150
+#define UBWC_PREDICTION_MODE 0x154
+
+struct msm_mdss {
+ struct device *dev;
+
+ void __iomem *mmio;
+ struct clk_bulk_data *clocks;
+ size_t num_clocks;
+ bool is_mdp5;
+ struct {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irq_controller;
+};
+
+static void msm_mdss_irq(struct irq_desc *desc)
+{
+ struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 interrupts;
+
+ chained_irq_enter(chip, desc);
+
+ interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ int rc;
+
+ rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
+ hwirq);
+ if (rc < 0) {
+ dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
+ hwirq, rc);
+ break;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void msm_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void msm_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip msm_mdss_irq_chip = {
+ .name = "msm_mdss",
+ .irq_mask = msm_mdss_irq_mask,
+ .irq_unmask = msm_mdss_irq_unmask,
+};
+
+static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
+
+static int msm_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct msm_mdss *msm_mdss = domain->host_data;
+
+ irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
+ irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
+
+ return irq_set_chip_data(irq, msm_mdss);
+}
+
+static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
+ .map = msm_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = msm_mdss->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &msm_mdss_irqdomain_ops, msm_mdss);
+ if (!domain) {
+ dev_err(dev, "failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ msm_mdss->irq_controller.enabled_mask = 0;
+ msm_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int msm_mdss_enable(struct msm_mdss *msm_mdss)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
+ if (ret) {
+ dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ /*
+ * HW_REV requires MDSS_MDP_CLK, which is not enabled by the mdss on
+ * mdp5 hardware. Skip reading it for now.
+ */
+ if (msm_mdss->is_mdp5)
+ return 0;
+
+ /*
+ * ubwc config is part of the "mdss" region which is not accessible
+ * from the rest of the driver. hardcode known configurations here
+ */
+ switch (readl_relaxed(msm_mdss->mmio + HW_REV)) {
+ case DPU_HW_VER_500:
+ case DPU_HW_VER_501:
+ writel_relaxed(0x420, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ case DPU_HW_VER_600:
+ /* TODO: 0x102e for LP_DDR4 */
+ writel_relaxed(0x103e, msm_mdss->mmio + UBWC_STATIC);
+ writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
+ writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
+ break;
+ case DPU_HW_VER_620:
+ writel_relaxed(0x1e, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ case DPU_HW_VER_720:
+ writel_relaxed(0x101e, msm_mdss->mmio + UBWC_STATIC);
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_mdss_disable(struct msm_mdss *msm_mdss)
+{
+ clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
+
+ return 0;
+}
+
+static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
+{
+ struct platform_device *pdev = to_platform_device(msm_mdss->dev);
+ int irq;
+
+ pm_runtime_suspend(msm_mdss->dev);
+ pm_runtime_disable(msm_mdss->dev);
+ irq_domain_remove(msm_mdss->irq_controller.domain);
+ msm_mdss->irq_controller.domain = NULL;
+ irq = platform_get_irq(pdev, 0);
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+}
+
+static int msm_mdss_reset(struct device *dev)
+{
+ struct reset_control *reset;
+
+ reset = reset_control_get_optional_exclusive(dev, NULL);
+ if (!reset) {
+ /* Optional reset not specified */
+ return 0;
+ } else if (IS_ERR(reset)) {
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "failed to acquire mdss reset\n");
+ }
+
+ reset_control_assert(reset);
+ /*
+ * Tests indicate that reset has to be held for some period of time,
+ * make it one frame in a typical system
+ */
+ msleep(20);
+ reset_control_deassert(reset);
+
+ reset_control_put(reset);
+
+ return 0;
+}
+
+/*
+ * MDP5 MDSS uses at most three specified clocks.
+ */
+#define MDP5_MDSS_NUM_CLOCKS 3
+static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
+{
+ struct clk_bulk_data *bulk;
+ int num_clocks = 0;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
+ if (!bulk)
+ return -ENOMEM;
+
+ bulk[num_clocks++].id = "iface";
+ bulk[num_clocks++].id = "bus";
+ bulk[num_clocks++].id = "vsync";
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
+ if (ret)
+ return ret;
+
+ *clocks = bulk;
+
+ return num_clocks;
+}
+
+static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
+{
+ struct msm_mdss *msm_mdss;
+ int ret;
+ int irq;
+
+ ret = msm_mdss_reset(&pdev->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
+ if (!msm_mdss)
+ return ERR_PTR(-ENOMEM);
+
+ msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
+ if (IS_ERR(msm_mdss->mmio))
+ return ERR_CAST(msm_mdss->mmio);
+
+ dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
+
+ if (is_mdp5)
+ ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
+ else
+ ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
+ return ERR_PTR(ret);
+ }
+ msm_mdss->num_clocks = ret;
+ msm_mdss->is_mdp5 = is_mdp5;
+
+ msm_mdss->dev = &pdev->dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return ERR_PTR(irq);
+
+ ret = _msm_mdss_irq_domain_add(msm_mdss);
+ if (ret)
+ return ERR_PTR(ret);
+
+ irq_set_chained_handler_and_data(irq, msm_mdss_irq,
+ msm_mdss);
+
+ pm_runtime_enable(&pdev->dev);
+
+ return msm_mdss;
+}
+
+static int __maybe_unused mdss_runtime_suspend(struct device *dev)
+{
+ struct msm_mdss *mdss = dev_get_drvdata(dev);
+
+ DBG("");
+
+ return msm_mdss_disable(mdss);
+}
+
+static int __maybe_unused mdss_runtime_resume(struct device *dev)
+{
+ struct msm_mdss *mdss = dev_get_drvdata(dev);
+
+ DBG("");
+
+ return msm_mdss_enable(mdss);
+}
+
+static int __maybe_unused mdss_pm_suspend(struct device *dev)
+{
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mdss_runtime_suspend(dev);
+}
+
+static int __maybe_unused mdss_pm_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return mdss_runtime_resume(dev);
+}
+
+static const struct dev_pm_ops mdss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
+ SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
+};
+
+static int mdss_probe(struct platform_device *pdev)
+{
+ struct msm_mdss *mdss;
+ bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ mdss = msm_mdss_init(pdev, is_mdp5);
+ if (IS_ERR(mdss))
+ return PTR_ERR(mdss);
+
+ platform_set_drvdata(pdev, mdss);
+
+ /*
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
+ */
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
+ msm_mdss_destroy(mdss);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mdss_remove(struct platform_device *pdev)
+{
+ struct msm_mdss *mdss = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+
+ msm_mdss_destroy(mdss);
+
+ return 0;
+}
+
+static const struct of_device_id mdss_dt_match[] = {
+ { .compatible = "qcom,mdss" },
+ { .compatible = "qcom,msm8998-mdss" },
+ { .compatible = "qcom,qcm2290-mdss" },
+ { .compatible = "qcom,sdm845-mdss" },
+ { .compatible = "qcom,sc7180-mdss" },
+ { .compatible = "qcom,sc7280-mdss" },
+ { .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_dt_match);
+
+static struct platform_driver mdss_platform_driver = {
+ .probe = mdss_probe,
+ .remove = mdss_remove,
+ .driver = {
+ .name = "msm-mdss",
+ .of_match_table = mdss_dt_match,
+ .pm = &mdss_pm_ops,
+ },
+};
+
+void __init msm_mdss_register(void)
+{
+ platform_driver_register(&mdss_platform_driver);
+}
+
+void __exit msm_mdss_unregister(void)
+{
+ platform_driver_unregister(&mdss_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9d835331f214..a92ffde53f0b 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -180,6 +180,7 @@ static int rd_open(struct inode *inode, struct file *file)
struct msm_gpu *gpu = priv->gpu;
uint64_t val;
uint32_t gpu_id;
+ uint32_t zero = 0;
int ret = 0;
if (!gpu)
@@ -200,12 +201,12 @@ static int rd_open(struct inode *inode, struct file *file)
*
* Note: These particular params do not require a context
*/
- gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val);
+ gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val, &zero);
gpu_id = val;
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
- gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val);
+ gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val, &zero);
rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
out:
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 367a6aaa3a20..43066320ff8c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -14,9 +14,20 @@ module_param(num_hw_submissions, uint, 0600);
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
+ struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
+ int i;
- submit->hw_fence = msm_fence_alloc(submit->ring->fctx);
+ submit->hw_fence = msm_fence_alloc(fctx);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
+
+ msm_gem_lock(obj);
+ msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
+ submit->bos[i].flags &= ~BO_PINNED;
+ msm_gem_unlock(obj);
+ }
pm_runtime_get_sync(&gpu->pdev->dev);
@@ -40,7 +51,7 @@ static void msm_job_free(struct drm_sched_job *job)
msm_gem_submit_put(submit);
}
-const struct drm_sched_backend_ops msm_sched_ops = {
+static const struct drm_sched_backend_ops msm_sched_ops = {
.run_job = msm_job_run,
.free_job = msm_job_free
};
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index d8c63df4e9ca..2a5045abe46e 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -59,7 +59,6 @@ struct msm_ringbuffer {
spinlock_t submit_lock;
uint64_t iova;
- uint32_t seqno;
uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova;
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 79b6ccd6ce64..f486a3cd4e55 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -61,6 +61,8 @@ void __msm_file_private_destroy(struct kref *kref)
}
msm_gem_address_space_put(ctx->aspace);
+ kfree(ctx->comm);
+ kfree(ctx->cmdline);
kfree(ctx);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
index c79d1259e49b..505a905e3ad1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -200,7 +200,6 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
{
struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
- wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc);
drm_connector_helper_add(&wb_conn->base,
&rcar_du_wb_conn_helper_funcs);
@@ -208,7 +207,8 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu,
&rcar_du_wb_conn_funcs,
&rcar_du_wb_enc_helper_funcs,
writeback_formats,
- ARRAY_SIZE(writeback_formats));
+ ARRAY_SIZE(writeback_formats),
+ 1 << drm_crtc_index(&rcrtc->crtc));
}
void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 82beb8c159f2..3579d487402e 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -503,7 +503,8 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
ret = drm_writeback_connector_init(drm, &txp->connector,
&vc4_txp_connector_funcs,
&vc4_txp_encoder_helper_funcs,
- drm_fmts, ARRAY_SIZE(drm_fmts));
+ drm_fmts, ARRAY_SIZE(drm_fmts),
+ 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index af1604dfbbaf..0a315221d1f5 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -140,12 +140,12 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev)
{
struct drm_writeback_connector *wb = &vkmsdev->output.wb_connector;
- vkmsdev->output.wb_connector.encoder.possible_crtcs = 1;
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
return drm_writeback_connector_init(&vkmsdev->drm, wb,
&vkms_wb_connector_funcs,
&vkms_wb_encoder_helper_funcs,
vkms_wb_formats,
- ARRAY_SIZE(vkms_wb_formats));
+ ARRAY_SIZE(vkms_wb_formats),
+ 1);
}