From 17a794d768383527408e23f2a1a04ac64c3d2ba6 Mon Sep 17 00:00:00 2001 From: Chris Zhong Date: Fri, 26 Aug 2016 20:39:38 -0700 Subject: drm/rockchip: vop: make vop register setting take effect The setting of vop registers need a reg_done writing to take effect. In vop_enable the vop return to work by by restoring registers, but the registers do not take effect immediately, it should a vop_cfg_done after it. The same thing is needed by windows_disabled in vop_crtc_disable. Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index fb5f001f51c3..4f8b8631956a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -531,6 +531,8 @@ static int vop_enable(struct drm_crtc *crtc) } memcpy(vop->regs, vop->regsbak, vop->len); + vop_cfg_done(vop); + /* * At here, vop clock & iommu is enable, R/W vop regs would be safe. */ @@ -582,6 +584,8 @@ static void vop_crtc_disable(struct drm_crtc *crtc) spin_unlock(&vop->reg_lock); } + vop_cfg_done(vop); + drm_crtc_vblank_off(crtc); /* -- cgit v1.2.3 From 5a2c920c99bcd3a05edff184958c334c6b13c262 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 10 Nov 2016 22:10:56 +0100 Subject: drm/rockchip: return ERR_PTR instead of NULL rockchip_drm_framebuffer_init is only used in one case, in rockchip_drm_fbdev.c, where its return value is tested using IS_ERR. To enable propagating the reason for the error, change the definition so that it returns an ERR_PTR value. Problem found with the help of Coccinelle. Signed-off-by: Julia Lawall --- drivers/gpu/drm/rockchip/rockchip_drm_fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index d5e1f8627d38..c9ccdf8f44bb 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -213,7 +213,7 @@ rockchip_drm_framebuffer_init(struct drm_device *dev, rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1); if (IS_ERR(rockchip_fb)) - return NULL; + return ERR_CAST(rockchip_fb); return &rockchip_fb->fb; } -- cgit v1.2.3 From 1a0f7ed3abe29cff8e652c69a8ad89aec2c40f00 Mon Sep 17 00:00:00 2001 From: Chris Zhong Date: Sun, 5 Feb 2017 15:54:56 +0800 Subject: drm/rockchip: cdn-dp: add cdn DP support for rk3399 Add support for cdn DP controller which is embedded in the rk3399 SoCs. The DP is compliant with DisplayPort Specification, Version 1.3, This IP is compatible with the rockchip type-c PHY IP. There is a uCPU in DP controller, it need a firmware to work, please put the firmware file to /lib/firmware/rockchip/dptx.bin. The uCPU in charge of aux communication and link training, the host use mailbox to communicate with the ucpu. The dclk pin_pol of vop must not be invert for DP. Signed-off-by: Chris Zhong [seanpaul fixed up some races between the worker and modeset] [seanpaul squashed ~15 commits from chromium.org gerrit] Signed-off-by: Sean Paul [groeck fixed compilation errors when building as module] Signed-off-by: Guenter Roeck --- drivers/gpu/drm/rockchip/Kconfig | 10 + drivers/gpu/drm/rockchip/Makefile | 2 + drivers/gpu/drm/rockchip/cdn-dp-core.c | 1237 +++++++++++++++++++++++++++ drivers/gpu/drm/rockchip/cdn-dp-core.h | 110 +++ drivers/gpu/drm/rockchip/cdn-dp-reg.c | 979 +++++++++++++++++++++ drivers/gpu/drm/rockchip/cdn-dp-reg.h | 483 +++++++++++ drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 13 +- drivers/gpu/drm/rockchip/rockchip_drm_vop.h | 9 + drivers/gpu/drm/rockchip/rockchip_vop_reg.c | 2 + 9 files changed, 2842 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/rockchip/cdn-dp-core.c create mode 100644 drivers/gpu/drm/rockchip/cdn-dp-core.h create mode 100644 drivers/gpu/drm/rockchip/cdn-dp-reg.c create mode 100644 drivers/gpu/drm/rockchip/cdn-dp-reg.h (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 6f7f9c59f05b..ad31b3eb408f 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -21,6 +21,16 @@ config ROCKCHIP_ANALOGIX_DP for the Analogix Core DP driver. If you want to enable DP on RK3288 based SoC, you should selet this option. +config ROCKCHIP_CDN_DP + tristate "Rockchip cdn DP" + depends on DRM_ROCKCHIP + select SND_SOC_HDMI_CODEC if SND_SOC + help + This selects support for Rockchip SoC specific extensions + for the cdn DP driver. If you want to enable Dp on + RK3399 based SoC, you should select this + option. + config ROCKCHIP_DW_HDMI tristate "Rockchip specific extensions for Synopsys DW HDMI" depends on DRM_ROCKCHIP diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 9746365694ba..c931e2a7d8de 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -7,6 +7,8 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o +obj-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp.o +cdn-dp-objs := cdn-dp-core.o cdn-dp-reg.o obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c new file mode 100644 index 000000000000..7db25081a8ef --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -0,0 +1,1237 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Chris Zhong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cdn-dp-core.h" +#include "cdn-dp-reg.h" +#include "rockchip_drm_vop.h" + +#define connector_to_dp(c) \ + container_of(c, struct cdn_dp_device, connector) + +#define encoder_to_dp(c) \ + container_of(c, struct cdn_dp_device, encoder) + +#define GRF_SOC_CON9 0x6224 +#define DP_SEL_VOP_LIT BIT(12) +#define GRF_SOC_CON26 0x6268 +#define UPHY_SEL_BIT 3 +#define UPHY_SEL_MASK BIT(19) +#define DPTX_HPD_SEL (3 << 12) +#define DPTX_HPD_DEL (2 << 12) +#define DPTX_HPD_SEL_MASK (3 << 28) + +#define CDN_FW_TIMEOUT_MS (64 * 1000) +#define CDN_DPCD_TIMEOUT_MS 5000 +#define CDN_DP_FIRMWARE "rockchip/dptx.bin" + +struct cdn_dp_data { + u8 max_phy; +}; + +struct cdn_dp_data rk3399_cdn_dp = { + .max_phy = 2, +}; + +static const struct of_device_id cdn_dp_dt_ids[] = { + { .compatible = "rockchip,rk3399-cdn-dp", + .data = (void *)&rk3399_cdn_dp }, + {} +}; + +MODULE_DEVICE_TABLE(of, cdn_dp_dt_ids); + +static int cdn_dp_grf_write(struct cdn_dp_device *dp, + unsigned int reg, unsigned int val) +{ + int ret; + + ret = clk_prepare_enable(dp->grf_clk); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to prepare_enable grf clock\n"); + return ret; + } + + ret = regmap_write(dp->grf, reg, val); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Could not write to GRF: %d\n", ret); + return ret; + } + + clk_disable_unprepare(dp->grf_clk); + + return 0; +} + +static int cdn_dp_clk_enable(struct cdn_dp_device *dp) +{ + int ret; + u32 rate; + + ret = clk_prepare_enable(dp->pclk); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "cannot enable dp pclk %d\n", ret); + goto err_pclk; + } + + ret = clk_prepare_enable(dp->core_clk); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "cannot enable core_clk %d\n", ret); + goto err_core_clk; + } + + ret = pm_runtime_get_sync(dp->dev); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "cannot get pm runtime %d\n", ret); + goto err_pclk; + } + + reset_control_assert(dp->core_rst); + reset_control_assert(dp->dptx_rst); + reset_control_assert(dp->apb_rst); + reset_control_deassert(dp->core_rst); + reset_control_deassert(dp->dptx_rst); + reset_control_deassert(dp->apb_rst); + + rate = clk_get_rate(dp->core_clk); + if (!rate) { + DRM_DEV_ERROR(dp->dev, "get clk rate failed: %d\n", rate); + goto err_set_rate; + } + + cdn_dp_set_fw_clk(dp, rate); + cdn_dp_clock_reset(dp); + + return 0; + +err_set_rate: + clk_disable_unprepare(dp->core_clk); +err_core_clk: + clk_disable_unprepare(dp->pclk); +err_pclk: + return ret; +} + +static void cdn_dp_clk_disable(struct cdn_dp_device *dp) +{ + pm_runtime_put_sync(dp->dev); + clk_disable_unprepare(dp->pclk); + clk_disable_unprepare(dp->core_clk); +} + +static int cdn_dp_get_port_lanes(struct cdn_dp_port *port) +{ + struct extcon_dev *edev = port->extcon; + union extcon_property_value property; + int dptx; + u8 lanes; + + dptx = extcon_get_state(edev, EXTCON_DISP_DP); + if (dptx > 0) { + extcon_get_property(edev, EXTCON_DISP_DP, + EXTCON_PROP_USB_SS, &property); + if (property.intval) + lanes = 2; + else + lanes = 4; + } else { + lanes = 0; + } + + return lanes; +} + +static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count) +{ + int ret; + u8 value; + + *sink_count = 0; + ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1); + if (ret) + return ret; + + *sink_count = DP_GET_SINK_COUNT(value); + return 0; +} + +static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) +{ + struct cdn_dp_port *port; + int i, lanes; + + for (i = 0; i < dp->ports; i++) { + port = dp->port[i]; + lanes = cdn_dp_get_port_lanes(port); + if (lanes) + return port; + } + return NULL; +} + +static enum drm_connector_status +cdn_dp_connector_detect(struct drm_connector *connector, bool force) +{ + struct cdn_dp_device *dp = connector_to_dp(connector); + enum drm_connector_status status = connector_status_disconnected; + + mutex_lock(&dp->lock); + if (dp->connected) + status = connector_status_connected; + mutex_unlock(&dp->lock); + + return status; +} + +static void cdn_dp_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .detect = cdn_dp_connector_detect, + .destroy = cdn_dp_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int cdn_dp_connector_get_modes(struct drm_connector *connector) +{ + struct cdn_dp_device *dp = connector_to_dp(connector); + struct edid *edid; + int ret = 0; + + mutex_lock(&dp->lock); + edid = dp->edid; + if (edid) { + DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", + edid->width_cm, edid->height_cm); + + dp->sink_has_audio = drm_detect_monitor_audio(edid); + ret = drm_add_edid_modes(connector, edid); + if (ret) { + drm_mode_connector_update_edid_property(connector, + edid); + drm_edid_to_eld(connector, edid); + } + } + mutex_unlock(&dp->lock); + + return ret; +} + +static struct drm_encoder * +cdn_dp_connector_best_encoder(struct drm_connector *connector) +{ + struct cdn_dp_device *dp = connector_to_dp(connector); + + return &dp->encoder; +} + +static int cdn_dp_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct cdn_dp_device *dp = connector_to_dp(connector); + struct drm_display_info *display_info = &dp->connector.display_info; + u32 requested, actual, rate, sink_max, source_max = 0; + u8 lanes, bpc; + + /* If DP is disconnected, every mode is invalid */ + if (!dp->connected) + return MODE_BAD; + + switch (display_info->bpc) { + case 10: + bpc = 10; + break; + case 6: + bpc = 6; + break; + default: + bpc = 8; + break; + } + + requested = mode->clock * bpc * 3 / 1000; + + source_max = dp->lanes; + sink_max = drm_dp_max_lane_count(dp->dpcd); + lanes = min(source_max, sink_max); + + source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE); + sink_max = drm_dp_max_link_rate(dp->dpcd); + rate = min(source_max, sink_max); + + actual = rate * lanes / 100; + + /* efficiency is about 0.8 */ + actual = actual * 8 / 10; + + if (requested > actual) { + DRM_DEV_DEBUG_KMS(dp->dev, + "requested=%d, actual=%d, clock=%d\n", + requested, actual, mode->clock); + return MODE_CLOCK_HIGH; + } + + return MODE_OK; +} + +static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { + .get_modes = cdn_dp_connector_get_modes, + .best_encoder = cdn_dp_connector_best_encoder, + .mode_valid = cdn_dp_connector_mode_valid, +}; + +static int cdn_dp_firmware_init(struct cdn_dp_device *dp) +{ + int ret; + const u32 *iram_data, *dram_data; + const struct firmware *fw = dp->fw; + const struct cdn_firmware_header *hdr; + + hdr = (struct cdn_firmware_header *)fw->data; + if (fw->size != le32_to_cpu(hdr->size_bytes)) { + DRM_DEV_ERROR(dp->dev, "firmware is invalid\n"); + return -EINVAL; + } + + iram_data = (const u32 *)(fw->data + hdr->header_size); + dram_data = (const u32 *)(fw->data + hdr->header_size + hdr->iram_size); + + ret = cdn_dp_load_firmware(dp, iram_data, hdr->iram_size, + dram_data, hdr->dram_size); + if (ret) + return ret; + + ret = cdn_dp_set_firmware_active(dp, true); + if (ret) { + DRM_DEV_ERROR(dp->dev, "active ucpu failed: %d\n", ret); + return ret; + } + + return cdn_dp_event_config(dp); +} + +static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp, + struct cdn_dp_port *port, + u8 *sink_count) +{ + int ret; + unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); + + /* + * Attempt to read sink count & sink capability, retry in case the sink + * may not be ready. + * + * Sinks are *supposed* to come up within 1ms from an off state, but + * some docks need more time to power up. + */ + while (time_before(jiffies, timeout)) { + if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) + return -ENODEV; + + if (cdn_dp_get_sink_count(dp, sink_count)) { + usleep_range(5000, 10000); + continue; + } + + if (!*sink_count) + return -ENODEV; + + ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); + return ret; + } + + kfree(dp->edid); + dp->edid = drm_do_get_edid(&dp->connector, + cdn_dp_get_edid_block, dp); + return 0; + } + + DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); + return -ETIMEDOUT; +} + +static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) +{ + union extcon_property_value property; + int ret; + + ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, + (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK); + if (ret) + return ret; + + if (!port->phy_enabled) { + ret = phy_power_on(port->phy); + if (ret) { + DRM_DEV_ERROR(dp->dev, "phy power on failed: %d\n", + ret); + goto err_phy; + } + port->phy_enabled = true; + } + + ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, + DPTX_HPD_SEL_MASK | DPTX_HPD_SEL); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to write HPD_SEL %d\n", ret); + goto err_power_on; + } + + ret = cdn_dp_get_hpd_status(dp); + if (ret <= 0) { + if (!ret) + DRM_DEV_ERROR(dp->dev, "hpd does not exist\n"); + goto err_power_on; + } + + ret = extcon_get_property(port->extcon, EXTCON_DISP_DP, + EXTCON_PROP_USB_TYPEC_POLARITY, &property); + if (ret) { + DRM_DEV_ERROR(dp->dev, "get property failed\n"); + goto err_power_on; + } + + port->lanes = cdn_dp_get_port_lanes(port); + ret = cdn_dp_set_host_cap(dp, port->lanes, property.intval); + if (ret) { + DRM_DEV_ERROR(dp->dev, "set host capabilities failed: %d\n", + ret); + goto err_power_on; + } + + return 0; + +err_power_on: + if (phy_power_off(port->phy)) + DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); + else + port->phy_enabled = false; + +err_phy: + cdn_dp_grf_write(dp, GRF_SOC_CON26, + DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); + return ret; +} + +static int cdn_dp_disable_phy(struct cdn_dp_device *dp, + struct cdn_dp_port *port) +{ + int ret; + + if (port->phy_enabled) { + ret = phy_power_off(port->phy); + if (ret) { + DRM_DEV_ERROR(dp->dev, "phy power off failed: %d", ret); + return ret; + } + } + + port->phy_enabled = false; + port->lanes = 0; + return 0; +} + +static int cdn_dp_disable(struct cdn_dp_device *dp) +{ + int ret, i; + + if (!dp->active) + return 0; + + for (i = 0; i < dp->ports; i++) + cdn_dp_disable_phy(dp, dp->port[i]); + + ret = cdn_dp_grf_write(dp, GRF_SOC_CON26, + DPTX_HPD_SEL_MASK | DPTX_HPD_DEL); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to clear hpd sel %d\n", + ret); + return ret; + } + + cdn_dp_set_firmware_active(dp, false); + cdn_dp_clk_disable(dp); + dp->active = false; + dp->link.rate = 0; + dp->link.num_lanes = 0; + if (!dp->connected) { + kfree(dp->edid); + dp->edid = NULL; + } + + return 0; +} + +static int cdn_dp_enable(struct cdn_dp_device *dp) +{ + int ret, i, lanes; + struct cdn_dp_port *port; + u8 sink_count; + + port = cdn_dp_connected_port(dp); + if (!port) { + DRM_DEV_ERROR(dp->dev, + "Can't enable without connection\n"); + return -ENODEV; + } + + if (dp->active) + return 0; + + ret = cdn_dp_clk_enable(dp); + if (ret) + return ret; + + ret = cdn_dp_firmware_init(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "firmware init failed: %d", ret); + goto err_clk_disable; + } + + /* only enable the port that connected with downstream device */ + for (i = port->id; i < dp->ports; i++) { + port = dp->port[i]; + lanes = cdn_dp_get_port_lanes(port); + if (lanes) { + ret = cdn_dp_enable_phy(dp, port); + if (ret) + continue; + + ret = cdn_dp_get_sink_capability(dp, port, &sink_count); + if (ret || (!ret && !sink_count)) { + cdn_dp_disable_phy(dp, port); + } else { + dp->active = true; + dp->lanes = port->lanes; + return 0; + } + } + } + +err_clk_disable: + cdn_dp_clk_disable(dp); + return ret; +} + +static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct cdn_dp_device *dp = encoder_to_dp(encoder); + struct drm_display_info *display_info = &dp->connector.display_info; + struct rockchip_crtc_state *state; + struct video_info *video = &dp->video_info; + int ret, val; + + switch (display_info->bpc) { + case 10: + video->color_depth = 10; + break; + case 6: + video->color_depth = 6; + break; + default: + video->color_depth = 8; + break; + } + + video->color_fmt = PXL_RGB; + + video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); + video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); + + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); + return; + } + + DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", + (ret) ? "LIT" : "BIG"); + state = to_rockchip_crtc_state(encoder->crtc->state); + if (ret) { + val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); + state->output_mode = ROCKCHIP_OUT_MODE_P888; + } else { + val = DP_SEL_VOP_LIT << 16; + state->output_mode = ROCKCHIP_OUT_MODE_AAAA; + } + + ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); + if (ret) + return; + + memcpy(&dp->mode, adjusted, sizeof(*mode)); +} + +static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + struct cdn_dp_port *port = cdn_dp_connected_port(dp); + u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); + + if (!port || !dp->link.rate || !dp->link.num_lanes) + return false; + + if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, + DP_LINK_STATUS_SIZE)) { + DRM_ERROR("Failed to get link status\n"); + return false; + } + + /* if link training is requested we should perform it always */ + return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); +} + +static void cdn_dp_encoder_enable(struct drm_encoder *encoder) +{ + struct cdn_dp_device *dp = encoder_to_dp(encoder); + int ret; + + mutex_lock(&dp->lock); + ret = cdn_dp_enable(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", + ret); + goto out; + } + if (!cdn_dp_check_link_status(dp)) { + ret = cdn_dp_train_link(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed link train %d\n", ret); + goto out; + } + } + + ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret); + goto out; + } + + ret = cdn_dp_config_video(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to config video %d\n", ret); + goto out; + } + + ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret); + goto out; + } +out: + mutex_unlock(&dp->lock); +} + +static void cdn_dp_encoder_disable(struct drm_encoder *encoder) +{ + struct cdn_dp_device *dp = encoder_to_dp(encoder); + int ret; + + mutex_lock(&dp->lock); + if (dp->active) { + ret = cdn_dp_disable(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", + ret); + } + } + mutex_unlock(&dp->lock); + + /* + * In the following 2 cases, we need to run the event_work to re-enable + * the DP: + * 1. If there is not just one port device is connected, and remove one + * device from a port, the DP will be disabled here, at this case, + * run the event_work to re-open DP for the other port. + * 2. If re-training or re-config failed, the DP will be disabled here. + * run the event_work to re-connect it. + */ + if (!dp->connected && cdn_dp_connected_port(dp)) + schedule_work(&dp->event_work); +} + +static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + s->output_type = DRM_MODE_CONNECTOR_DisplayPort; + + return 0; +} + +static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { + .mode_set = cdn_dp_encoder_mode_set, + .enable = cdn_dp_encoder_enable, + .disable = cdn_dp_encoder_disable, + .atomic_check = cdn_dp_encoder_atomic_check, +}; + +static const struct drm_encoder_funcs cdn_dp_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int cdn_dp_parse_dt(struct cdn_dp_device *dp) +{ + struct device *dev = dp->dev; + struct device_node *np = dev->of_node; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + if (IS_ERR(dp->grf)) { + DRM_DEV_ERROR(dev, "cdn-dp needs rockchip,grf property\n"); + return PTR_ERR(dp->grf); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dp->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(dp->regs)) { + DRM_DEV_ERROR(dev, "ioremap reg failed\n"); + return PTR_ERR(dp->regs); + } + + dp->core_clk = devm_clk_get(dev, "core-clk"); + if (IS_ERR(dp->core_clk)) { + DRM_DEV_ERROR(dev, "cannot get core_clk_dp\n"); + return PTR_ERR(dp->core_clk); + } + + dp->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(dp->pclk)) { + DRM_DEV_ERROR(dev, "cannot get pclk\n"); + return PTR_ERR(dp->pclk); + } + + dp->spdif_clk = devm_clk_get(dev, "spdif"); + if (IS_ERR(dp->spdif_clk)) { + DRM_DEV_ERROR(dev, "cannot get spdif_clk\n"); + return PTR_ERR(dp->spdif_clk); + } + + dp->grf_clk = devm_clk_get(dev, "grf"); + if (IS_ERR(dp->grf_clk)) { + DRM_DEV_ERROR(dev, "cannot get grf clk\n"); + return PTR_ERR(dp->grf_clk); + } + + dp->spdif_rst = devm_reset_control_get(dev, "spdif"); + if (IS_ERR(dp->spdif_rst)) { + DRM_DEV_ERROR(dev, "no spdif reset control found\n"); + return PTR_ERR(dp->spdif_rst); + } + + dp->dptx_rst = devm_reset_control_get(dev, "dptx"); + if (IS_ERR(dp->dptx_rst)) { + DRM_DEV_ERROR(dev, "no uphy reset control found\n"); + return PTR_ERR(dp->dptx_rst); + } + + dp->core_rst = devm_reset_control_get(dev, "core"); + if (IS_ERR(dp->core_rst)) { + DRM_DEV_ERROR(dev, "no core reset control found\n"); + return PTR_ERR(dp->core_rst); + } + + dp->apb_rst = devm_reset_control_get(dev, "apb"); + if (IS_ERR(dp->apb_rst)) { + DRM_DEV_ERROR(dev, "no apb reset control found\n"); + return PTR_ERR(dp->apb_rst); + } + + return 0; +} + +static int cdn_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct audio_info audio = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + int ret; + + mutex_lock(&dp->lock); + if (!dp->active) { + ret = -ENODEV; + goto out; + } + + switch (daifmt->fmt) { + case HDMI_I2S: + audio.format = AFMT_I2S; + break; + case HDMI_SPDIF: + audio.format = AFMT_SPDIF; + break; + default: + DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); + ret = -EINVAL; + goto out; + } + + ret = cdn_dp_audio_config(dp, &audio); + if (!ret) + dp->audio_info = audio; + +out: + mutex_unlock(&dp->lock); + return ret; +} + +static void cdn_dp_audio_shutdown(struct device *dev, void *data) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + int ret; + + mutex_lock(&dp->lock); + if (!dp->active) + goto out; + + ret = cdn_dp_audio_stop(dp, &dp->audio_info); + if (!ret) + dp->audio_info.format = AFMT_UNUSED; +out: + mutex_unlock(&dp->lock); +} + +static int cdn_dp_audio_digital_mute(struct device *dev, void *data, + bool enable) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + int ret; + + mutex_lock(&dp->lock); + if (!dp->active) { + ret = -ENODEV; + goto out; + } + + ret = cdn_dp_audio_mute(dp, enable); + +out: + mutex_unlock(&dp->lock); + return ret; +} + +static int cdn_dp_audio_get_eld(struct device *dev, void *data, + u8 *buf, size_t len) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + + memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops audio_codec_ops = { + .hw_params = cdn_dp_audio_hw_params, + .audio_shutdown = cdn_dp_audio_shutdown, + .digital_mute = cdn_dp_audio_digital_mute, + .get_eld = cdn_dp_audio_get_eld, +}; + +static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, + struct device *dev) +{ + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 1, + .ops = &audio_codec_ops, + .max_i2s_channels = 8, + }; + + dp->audio_pdev = platform_device_register_data( + dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + return PTR_ERR_OR_ZERO(dp->audio_pdev); +} + +static int cdn_dp_request_firmware(struct cdn_dp_device *dp) +{ + int ret; + unsigned long timeout = jiffies + msecs_to_jiffies(CDN_FW_TIMEOUT_MS); + unsigned long sleep = 1000; + + WARN_ON(!mutex_is_locked(&dp->lock)); + + if (dp->fw_loaded) + return 0; + + /* Drop the lock before getting the firmware to avoid blocking boot */ + mutex_unlock(&dp->lock); + + while (time_before(jiffies, timeout)) { + ret = request_firmware(&dp->fw, CDN_DP_FIRMWARE, dp->dev); + if (ret == -ENOENT) { + msleep(sleep); + sleep *= 2; + continue; + } else if (ret) { + DRM_DEV_ERROR(dp->dev, + "failed to request firmware: %d\n", ret); + goto out; + } + + dp->fw_loaded = true; + ret = 0; + goto out; + } + + DRM_DEV_ERROR(dp->dev, "Timed out trying to load firmware\n"); + ret = -ETIMEDOUT; +out: + mutex_lock(&dp->lock); + return ret; +} + +static void cdn_dp_pd_event_work(struct work_struct *work) +{ + struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, + event_work); + int ret; + u8 sink_count; + + mutex_lock(&dp->lock); + ret = cdn_dp_request_firmware(dp); + if (ret) + goto out; + + dp->connected = true; + + /* Not connected, notify userspace to disable the block */ + if (!cdn_dp_connected_port(dp)) { + DRM_DEV_INFO(dp->dev, "Not connected. Disabling cdn\n"); + dp->connected = false; + + /* Connected but not enabled, enable the block */ + } else if (!dp->active) { + DRM_DEV_INFO(dp->dev, "Connected, not enabled. Enabling cdn\n"); + ret = cdn_dp_enable(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Enable dp failed %d\n", ret); + dp->connected = false; + } + + /* Enabled and connected to a dongle without a sink, notify userspace */ + } else if (cdn_dp_get_sink_count(dp, &sink_count) || !sink_count) { + DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); + dp->connected = false; + + /* Enabled and connected with a sink, re-train if requested */ + } else if (!cdn_dp_check_link_status(dp)) { + unsigned int rate = dp->link.rate; + unsigned int lanes = dp->link.num_lanes; + struct drm_display_mode *mode = &dp->mode; + + DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); + ret = cdn_dp_train_link(dp); + if (ret) { + dp->connected = false; + DRM_DEV_ERROR(dp->dev, "Train link failed %d\n", ret); + goto out; + } + + /* If training result is changed, update the video config */ + if (mode->clock && + (rate != dp->link.rate || lanes != dp->link.num_lanes)) { + ret = cdn_dp_config_video(dp); + if (ret) { + dp->connected = false; + DRM_DEV_ERROR(dp->dev, + "Failed to config video %d\n", + ret); + } + } + } + +out: + mutex_unlock(&dp->lock); + drm_helper_hpd_irq_event(dp->drm_dev); +} + +static int cdn_dp_pd_event(struct notifier_block *nb, + unsigned long event, void *priv) +{ + struct cdn_dp_port *port = container_of(nb, struct cdn_dp_port, + event_nb); + struct cdn_dp_device *dp = port->dp; + + /* + * It would be nice to be able to just do the work inline right here. + * However, we need to make a bunch of calls that might sleep in order + * to turn on the block/phy, so use a worker instead. + */ + schedule_work(&dp->event_work); + + return NOTIFY_DONE; +} + +static int cdn_dp_bind(struct device *dev, struct device *master, void *data) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct drm_encoder *encoder; + struct drm_connector *connector; + struct cdn_dp_port *port; + struct drm_device *drm_dev = data; + int ret, i; + bool schedule_event = false; + + ret = cdn_dp_parse_dt(dp); + if (ret < 0) + return ret; + + dp->drm_dev = drm_dev; + dp->connected = false; + dp->active = false; + + mutex_init(&dp->lock); + INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); + + encoder = &dp->encoder; + + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, + dev->of_node); + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + return ret; + } + + drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); + + connector = &dp->connector; + connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->dpms = DRM_MODE_DPMS_OFF; + + ret = drm_connector_init(drm_dev, connector, + &cdn_dp_atomic_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto err_free_encoder; + } + + drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); + + ret = drm_mode_connector_attach_encoder(connector, encoder); + if (ret) { + DRM_ERROR("failed to attach connector and encoder\n"); + goto err_free_connector; + } + + cdn_dp_audio_codec_init(dp, dev); + + for (i = 0; i < dp->ports; i++) { + port = dp->port[i]; + + port->event_nb.notifier_call = cdn_dp_pd_event; + ret = devm_extcon_register_notifier(dp->dev, port->extcon, + EXTCON_DISP_DP, + &port->event_nb); + if (ret) { + DRM_DEV_ERROR(dev, + "register EXTCON_DISP_DP notifier err\n"); + goto err_free_connector; + } + + if (extcon_get_state(port->extcon, EXTCON_DISP_DP)) + schedule_event = true; + } + + pm_runtime_enable(dev); + + if (schedule_event) + schedule_work(&dp->event_work); + + return 0; + +err_free_connector: + drm_connector_cleanup(connector); +err_free_encoder: + drm_encoder_cleanup(encoder); + return ret; +} + +static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct drm_encoder *encoder = &dp->encoder; + struct drm_connector *connector = &dp->connector; + + cancel_work_sync(&dp->event_work); + platform_device_unregister(dp->audio_pdev); + cdn_dp_encoder_disable(encoder); + encoder->funcs->destroy(encoder); + connector->funcs->destroy(connector); + + pm_runtime_disable(dev); + release_firmware(dp->fw); + kfree(dp->edid); + dp->edid = NULL; +} + +static const struct component_ops cdn_dp_component_ops = { + .bind = cdn_dp_bind, + .unbind = cdn_dp_unbind, +}; + +int cdn_dp_suspend(struct device *dev) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + + if (dp->active) + return cdn_dp_disable(dp); + + return 0; +} + +int cdn_dp_resume(struct device *dev) +{ + struct cdn_dp_device *dp = dev_get_drvdata(dev); + + if (dp->fw_loaded) + schedule_work(&dp->event_work); + + return 0; +} + +static int cdn_dp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *match; + struct cdn_dp_data *dp_data; + struct cdn_dp_port *port; + struct cdn_dp_device *dp; + struct extcon_dev *extcon; + struct phy *phy; + int i; + + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + dp->dev = dev; + + match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); + dp_data = (struct cdn_dp_data *)match->data; + + for (i = 0; i < dp_data->max_phy; i++) { + extcon = extcon_get_edev_by_phandle(dev, i); + phy = devm_of_phy_get_by_index(dev, dev->of_node, i); + + if (PTR_ERR(extcon) == -EPROBE_DEFER || + PTR_ERR(phy) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + if (IS_ERR(extcon) || IS_ERR(phy)) + continue; + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + port->extcon = extcon; + port->phy = phy; + port->dp = dp; + port->id = i; + dp->port[dp->ports++] = port; + } + + if (!dp->ports) { + DRM_DEV_ERROR(dev, "missing extcon or phy\n"); + return -EINVAL; + } + + dev_set_drvdata(dev, dp); + + return component_add(dev, &cdn_dp_component_ops); +} + +static int cdn_dp_remove(struct platform_device *pdev) +{ + struct cdn_dp_device *dp = platform_get_drvdata(pdev); + + cdn_dp_suspend(dp->dev); + component_del(&pdev->dev, &cdn_dp_component_ops); + + return 0; +} + +static void cdn_dp_shutdown(struct platform_device *pdev) +{ + struct cdn_dp_device *dp = platform_get_drvdata(pdev); + + cdn_dp_suspend(dp->dev); +} + +static const struct dev_pm_ops cdn_dp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cdn_dp_suspend, + cdn_dp_resume) +}; + +static struct platform_driver cdn_dp_driver = { + .probe = cdn_dp_probe, + .remove = cdn_dp_remove, + .shutdown = cdn_dp_shutdown, + .driver = { + .name = "cdn-dp", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(cdn_dp_dt_ids), + .pm = &cdn_dp_pm_ops, + }, +}; + +module_platform_driver(cdn_dp_driver); + +MODULE_AUTHOR("Chris Zhong "); +MODULE_DESCRIPTION("cdn DP Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h new file mode 100644 index 000000000000..3bea4b8d265f --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2016 Chris Zhong + * Copyright (C) 2016 ROCKCHIP, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CDN_DP_CORE_H +#define _CDN_DP_CORE_H + +#include +#include +#include +#include +#include "rockchip_drm_drv.h" + +#define MAX_PHY 2 + +enum audio_format { + AFMT_I2S = 0, + AFMT_SPDIF = 1, + AFMT_UNUSED, +}; + +struct audio_info { + enum audio_format format; + int sample_rate; + int channels; + int sample_width; +}; + +enum vic_pxl_encoding_format { + PXL_RGB = 0x1, + YCBCR_4_4_4 = 0x2, + YCBCR_4_2_2 = 0x4, + YCBCR_4_2_0 = 0x8, + Y_ONLY = 0x10, +}; + +struct video_info { + bool h_sync_polarity; + bool v_sync_polarity; + bool interlaced; + int color_depth; + enum vic_pxl_encoding_format color_fmt; +}; + +struct cdn_firmware_header { + u32 size_bytes; /* size of the entire header+image(s) in bytes */ + u32 header_size; /* size of just the header in bytes */ + u32 iram_size; /* size of iram */ + u32 dram_size; /* size of dram */ +}; + +struct cdn_dp_port { + struct cdn_dp_device *dp; + struct notifier_block event_nb; + struct extcon_dev *extcon; + struct phy *phy; + u8 lanes; + bool phy_enabled; + u8 id; +}; + +struct cdn_dp_device { + struct device *dev; + struct drm_device *drm_dev; + struct drm_connector connector; + struct drm_encoder encoder; + struct drm_display_mode mode; + struct platform_device *audio_pdev; + struct work_struct event_work; + struct edid *edid; + + struct mutex lock; + bool connected; + bool active; + + const struct firmware *fw; /* cdn dp firmware */ + unsigned int fw_version; /* cdn fw version */ + bool fw_loaded; + + void __iomem *regs; + struct regmap *grf; + struct clk *core_clk; + struct clk *pclk; + struct clk *spdif_clk; + struct clk *grf_clk; + struct reset_control *spdif_rst; + struct reset_control *dptx_rst; + struct reset_control *apb_rst; + struct reset_control *core_rst; + struct audio_info audio_info; + struct video_info video_info; + struct drm_dp_link link; + struct cdn_dp_port *port[MAX_PHY]; + u8 ports; + u8 lanes; + + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + bool sink_has_audio; +}; +#endif /* _CDN_DP_CORE_H */ diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c new file mode 100644 index 000000000000..3a5b8a4aa1e7 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -0,0 +1,979 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Chris Zhong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#include "cdn-dp-core.h" +#include "cdn-dp-reg.h" + +#define CDN_DP_SPDIF_CLK 200000000 +#define FW_ALIVE_TIMEOUT_US 1000000 +#define MAILBOX_RETRY_US 1000 +#define MAILBOX_TIMEOUT_US 5000000 +#define LINK_TRAINING_RETRY_MS 20 +#define LINK_TRAINING_TIMEOUT_MS 500 + +void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk) +{ + writel(clk / 1000000, dp->regs + SW_CLK_H); +} + +void cdn_dp_clock_reset(struct cdn_dp_device *dp) +{ + u32 val; + + val = DPTX_FRMR_DATA_CLK_RSTN_EN | + DPTX_FRMR_DATA_CLK_EN | + DPTX_PHY_DATA_RSTN_EN | + DPTX_PHY_DATA_CLK_EN | + DPTX_PHY_CHAR_RSTN_EN | + DPTX_PHY_CHAR_CLK_EN | + SOURCE_AUX_SYS_CLK_RSTN_EN | + SOURCE_AUX_SYS_CLK_EN | + DPTX_SYS_CLK_RSTN_EN | + DPTX_SYS_CLK_EN | + CFG_DPTX_VIF_CLK_RSTN_EN | + CFG_DPTX_VIF_CLK_EN; + writel(val, dp->regs + SOURCE_DPTX_CAR); + + val = SOURCE_PHY_RSTN_EN | SOURCE_PHY_CLK_EN; + writel(val, dp->regs + SOURCE_PHY_CAR); + + val = SOURCE_PKT_SYS_RSTN_EN | + SOURCE_PKT_SYS_CLK_EN | + SOURCE_PKT_DATA_RSTN_EN | + SOURCE_PKT_DATA_CLK_EN; + writel(val, dp->regs + SOURCE_PKT_CAR); + + val = SPDIF_CDR_CLK_RSTN_EN | + SPDIF_CDR_CLK_EN | + SOURCE_AIF_SYS_RSTN_EN | + SOURCE_AIF_SYS_CLK_EN | + SOURCE_AIF_CLK_RSTN_EN | + SOURCE_AIF_CLK_EN; + writel(val, dp->regs + SOURCE_AIF_CAR); + + val = SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN | + SOURCE_CIPHER_SYS_CLK_EN | + SOURCE_CIPHER_CHAR_CLK_RSTN_EN | + SOURCE_CIPHER_CHAR_CLK_EN; + writel(val, dp->regs + SOURCE_CIPHER_CAR); + + val = SOURCE_CRYPTO_SYS_CLK_RSTN_EN | + SOURCE_CRYPTO_SYS_CLK_EN; + writel(val, dp->regs + SOURCE_CRYPTO_CAR); + + /* enable Mailbox and PIF interrupt */ + writel(0, dp->regs + APB_INT_MASK); +} + +static int cdn_dp_mailbox_read(struct cdn_dp_device *dp) +{ + int val, ret; + + ret = readx_poll_timeout(readl, dp->regs + MAILBOX_EMPTY_ADDR, + val, !val, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + return readl(dp->regs + MAILBOX0_RD_DATA) & 0xff; +} + +static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val) +{ + int ret, full; + + ret = readx_poll_timeout(readl, dp->regs + MAILBOX_FULL_ADDR, + full, !full, MAILBOX_RETRY_US, + MAILBOX_TIMEOUT_US); + if (ret < 0) + return ret; + + writel(val, dp->regs + MAILBOX0_WR_DATA); + + return 0; +} + +static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp, + u8 module_id, u8 opcode, + u8 req_size) +{ + u32 mbox_size, i; + u8 header[4]; + int ret; + + /* read the header of the message */ + for (i = 0; i < 4; i++) { + ret = cdn_dp_mailbox_read(dp); + if (ret < 0) + return ret; + + header[i] = ret; + } + + mbox_size = (header[2] << 8) | header[3]; + + if (opcode != header[0] || module_id != header[1] || + req_size != mbox_size) { + /* + * If the message in mailbox is not what we want, we need to + * clear the mailbox by reading its contents. + */ + for (i = 0; i < mbox_size; i++) + if (cdn_dp_mailbox_read(dp) < 0) + break; + + return -EINVAL; + } + + return 0; +} + +static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp, + u8 *buff, u8 buff_size) +{ + u32 i; + int ret; + + for (i = 0; i < buff_size; i++) { + ret = cdn_dp_mailbox_read(dp); + if (ret < 0) + return ret; + + buff[i] = ret; + } + + return 0; +} + +static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id, + u8 opcode, u16 size, u8 *message) +{ + u8 header[4]; + int ret, i; + + header[0] = opcode; + header[1] = module_id; + header[2] = (size >> 8) & 0xff; + header[3] = size & 0xff; + + for (i = 0; i < 4; i++) { + ret = cdp_dp_mailbox_write(dp, header[i]); + if (ret) + return ret; + } + + for (i = 0; i < size; i++) { + ret = cdp_dp_mailbox_write(dp, message[i]); + if (ret) + return ret; + } + + return 0; +} + +static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val) +{ + u8 msg[6]; + + msg[0] = (addr >> 8) & 0xff; + msg[1] = addr & 0xff; + msg[2] = (val >> 24) & 0xff; + msg[3] = (val >> 16) & 0xff; + msg[4] = (val >> 8) & 0xff; + msg[5] = val & 0xff; + return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER, + sizeof(msg), msg); +} + +static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr, + u8 start_bit, u8 bits_no, u32 val) +{ + u8 field[8]; + + field[0] = (addr >> 8) & 0xff; + field[1] = addr & 0xff; + field[2] = start_bit; + field[3] = bits_no; + field[4] = (val >> 24) & 0xff; + field[5] = (val >> 16) & 0xff; + field[6] = (val >> 8) & 0xff; + field[7] = val & 0xff; + + return cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD, + sizeof(field), field); +} + +int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len) +{ + u8 msg[5], reg[5]; + int ret; + + msg[0] = (len >> 8) & 0xff; + msg[1] = len & 0xff; + msg[2] = (addr >> 16) & 0xff; + msg[3] = (addr >> 8) & 0xff; + msg[4] = addr & 0xff; + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD, + sizeof(msg), msg); + if (ret) + goto err_dpcd_read; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_READ_DPCD, + sizeof(reg) + len); + if (ret) + goto err_dpcd_read; + + ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); + if (ret) + goto err_dpcd_read; + + ret = cdn_dp_mailbox_read_receive(dp, data, len); + +err_dpcd_read: + return ret; +} + +int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value) +{ + u8 msg[6], reg[5]; + int ret; + + msg[0] = 0; + msg[1] = 1; + msg[2] = (addr >> 16) & 0xff; + msg[3] = (addr >> 8) & 0xff; + msg[4] = addr & 0xff; + msg[5] = value; + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD, + sizeof(msg), msg); + if (ret) + goto err_dpcd_write; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_WRITE_DPCD, sizeof(reg)); + if (ret) + goto err_dpcd_write; + + ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); + if (ret) + goto err_dpcd_write; + + if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4])) + ret = -EINVAL; + +err_dpcd_write: + if (ret) + DRM_DEV_ERROR(dp->dev, "dpcd write failed: %d\n", ret); + return ret; +} + +int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem, + u32 i_size, const u32 *d_mem, u32 d_size) +{ + u32 reg; + int i, ret; + + /* reset ucpu before load firmware*/ + writel(APB_IRAM_PATH | APB_DRAM_PATH | APB_XT_RESET, + dp->regs + APB_CTRL); + + for (i = 0; i < i_size; i += 4) + writel(*i_mem++, dp->regs + ADDR_IMEM + i); + + for (i = 0; i < d_size; i += 4) + writel(*d_mem++, dp->regs + ADDR_DMEM + i); + + /* un-reset ucpu */ + writel(0, dp->regs + APB_CTRL); + + /* check the keep alive register to make sure fw working */ + ret = readx_poll_timeout(readl, dp->regs + KEEP_ALIVE, + reg, reg, 2000, FW_ALIVE_TIMEOUT_US); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "failed to loaded the FW reg = %x\n", + reg); + return -EINVAL; + } + + reg = readl(dp->regs + VER_L) & 0xff; + dp->fw_version = reg; + reg = readl(dp->regs + VER_H) & 0xff; + dp->fw_version |= reg << 8; + reg = readl(dp->regs + VER_LIB_L_ADDR) & 0xff; + dp->fw_version |= reg << 16; + reg = readl(dp->regs + VER_LIB_H_ADDR) & 0xff; + dp->fw_version |= reg << 24; + + dev_dbg(dp->dev, "firmware version: %x\n", dp->fw_version); + + return 0; +} + +int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable) +{ + u8 msg[5]; + int ret, i; + + msg[0] = GENERAL_MAIN_CONTROL; + msg[1] = MB_MODULE_ID_GENERAL; + msg[2] = 0; + msg[3] = 1; + msg[4] = enable ? FW_ACTIVE : FW_STANDBY; + + for (i = 0; i < sizeof(msg); i++) { + ret = cdp_dp_mailbox_write(dp, msg[i]); + if (ret) + goto err_set_firmware_active; + } + + /* read the firmware state */ + for (i = 0; i < sizeof(msg); i++) { + ret = cdn_dp_mailbox_read(dp); + if (ret < 0) + goto err_set_firmware_active; + + msg[i] = ret; + } + + ret = 0; + +err_set_firmware_active: + if (ret < 0) + DRM_DEV_ERROR(dp->dev, "set firmware active failed\n"); + return ret; +} + +int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip) +{ + u8 msg[8]; + int ret; + + msg[0] = CDN_DP_MAX_LINK_RATE; + msg[1] = lanes | SCRAMBLER_EN; + msg[2] = VOLTAGE_LEVEL_2; + msg[3] = PRE_EMPHASIS_LEVEL_3; + msg[4] = PTS1 | PTS2 | PTS3 | PTS4; + msg[5] = FAST_LT_NOT_SUPPORT; + msg[6] = flip ? LANE_MAPPING_FLIPPED : LANE_MAPPING_NORMAL; + msg[7] = ENHANCED; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, + DPTX_SET_HOST_CAPABILITIES, + sizeof(msg), msg); + if (ret) + goto err_set_host_cap; + + ret = cdn_dp_reg_write(dp, DP_AUX_SWAP_INVERSION_CONTROL, + AUX_HOST_INVERT); + +err_set_host_cap: + if (ret) + DRM_DEV_ERROR(dp->dev, "set host cap failed: %d\n", ret); + return ret; +} + +int cdn_dp_event_config(struct cdn_dp_device *dp) +{ + u8 msg[5]; + int ret; + + memset(msg, 0, sizeof(msg)); + + msg[0] = DPTX_EVENT_ENABLE_HPD | DPTX_EVENT_ENABLE_TRAINING; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_ENABLE_EVENT, + sizeof(msg), msg); + if (ret) + DRM_DEV_ERROR(dp->dev, "set event config failed: %d\n", ret); + + return ret; +} + +u32 cdn_dp_get_event(struct cdn_dp_device *dp) +{ + return readl(dp->regs + SW_EVENTS0); +} + +int cdn_dp_get_hpd_status(struct cdn_dp_device *dp) +{ + u8 status; + int ret; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE, + 0, NULL); + if (ret) + goto err_get_hpd; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_HPD_STATE, sizeof(status)); + if (ret) + goto err_get_hpd; + + ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status)); + if (ret) + goto err_get_hpd; + + return status; + +err_get_hpd: + DRM_DEV_ERROR(dp->dev, "get hpd status failed: %d\n", ret); + return ret; +} + +int cdn_dp_get_edid_block(void *data, u8 *edid, + unsigned int block, size_t length) +{ + struct cdn_dp_device *dp = data; + u8 msg[2], reg[2], i; + int ret; + + for (i = 0; i < 4; i++) { + msg[0] = block / 2; + msg[1] = block % 2; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID, + sizeof(msg), msg); + if (ret) + continue; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_GET_EDID, + sizeof(reg) + length); + if (ret) + continue; + + ret = cdn_dp_mailbox_read_receive(dp, reg, sizeof(reg)); + if (ret) + continue; + + ret = cdn_dp_mailbox_read_receive(dp, edid, length); + if (ret) + continue; + + if (reg[0] == length && reg[1] == block / 2) + break; + } + + if (ret) + DRM_DEV_ERROR(dp->dev, "get block[%d] edid failed: %d\n", block, + ret); + + return ret; +} + +static int cdn_dp_training_start(struct cdn_dp_device *dp) +{ + unsigned long timeout; + u8 msg, event[2]; + int ret; + + msg = LINK_TRAINING_RUN; + + /* start training */ + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_TRAINING_CONTROL, + sizeof(msg), &msg); + if (ret) + goto err_training_start; + + timeout = jiffies + msecs_to_jiffies(LINK_TRAINING_TIMEOUT_MS); + while (time_before(jiffies, timeout)) { + msleep(LINK_TRAINING_RETRY_MS); + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, 0, NULL); + if (ret) + goto err_training_start; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_READ_EVENT, + sizeof(event)); + if (ret) + goto err_training_start; + + ret = cdn_dp_mailbox_read_receive(dp, event, sizeof(event)); + if (ret) + goto err_training_start; + + if (event[1] & EQ_PHASE_FINISHED) + return 0; + } + + ret = -ETIMEDOUT; + +err_training_start: + DRM_DEV_ERROR(dp->dev, "training failed: %d\n", ret); + return ret; +} + +static int cdn_dp_get_training_status(struct cdn_dp_device *dp) +{ + u8 status[10]; + int ret; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_READ_LINK_STAT, + 0, NULL); + if (ret) + goto err_get_training_status; + + ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX, + DPTX_READ_LINK_STAT, + sizeof(status)); + if (ret) + goto err_get_training_status; + + ret = cdn_dp_mailbox_read_receive(dp, status, sizeof(status)); + if (ret) + goto err_get_training_status; + + dp->link.rate = status[0]; + dp->link.num_lanes = status[1]; + +err_get_training_status: + if (ret) + DRM_DEV_ERROR(dp->dev, "get training status failed: %d\n", ret); + return ret; +} + +int cdn_dp_train_link(struct cdn_dp_device *dp) +{ + int ret; + + ret = cdn_dp_training_start(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret); + return ret; + } + + ret = cdn_dp_get_training_status(dp); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to get training stat %d\n", ret); + return ret; + } + + DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate, + dp->link.num_lanes); + return ret; +} + +int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active) +{ + u8 msg; + int ret; + + msg = !!active; + + ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_SET_VIDEO, + sizeof(msg), &msg); + if (ret) + DRM_DEV_ERROR(dp->dev, "set video status failed: %d\n", ret); + + return ret; +} + +static int cdn_dp_get_msa_misc(struct video_info *video, + struct drm_display_mode *mode) +{ + u32 msa_misc; + u8 val[2]; + + switch (video->color_fmt) { + case PXL_RGB: + case Y_ONLY: + val[0] = 0; + break; + /* set YUV default color space conversion to BT601 */ + case YCBCR_4_4_4: + val[0] = 6 + BT_601 * 8; + break; + case YCBCR_4_2_2: + val[0] = 5 + BT_601 * 8; + break; + case YCBCR_4_2_0: + val[0] = 5; + break; + }; + + switch (video->color_depth) { + case 6: + val[1] = 0; + break; + case 8: + val[1] = 1; + break; + case 10: + val[1] = 2; + break; + case 12: + val[1] = 3; + break; + case 16: + val[1] = 4; + break; + }; + + msa_misc = 2 * val[0] + 32 * val[1] + + ((video->color_fmt == Y_ONLY) ? (1 << 14) : 0); + + return msa_misc; +} + +int cdn_dp_config_video(struct cdn_dp_device *dp) +{ + struct video_info *video = &dp->video_info; + struct drm_display_mode *mode = &dp->mode; + u64 symbol; + u32 val, link_rate, rem; + u8 bit_per_pix, tu_size_reg = TU_SIZE; + int ret; + + bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ? + (video->color_depth * 2) : (video->color_depth * 3); + + link_rate = drm_dp_bw_code_to_link_rate(dp->link.rate) / 1000; + + ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE); + if (ret) + goto err_config_video; + + ret = cdn_dp_reg_write(dp, HSYNC2VSYNC_POL_CTRL, 0); + if (ret) + goto err_config_video; + + /* + * get a best tu_size and valid symbol: + * 1. chose Lclk freq(162Mhz, 270Mhz, 540Mhz), set TU to 32 + * 2. calculate VS(valid symbol) = TU * Pclk * Bpp / (Lclk * Lanes) + * 3. if VS > *.85 or VS < *.1 or VS < 2 or TU < VS + 4, then set + * TU += 2 and repeat 2nd step. + */ + do { + tu_size_reg += 2; + symbol = tu_size_reg * mode->clock * bit_per_pix; + do_div(symbol, dp->link.num_lanes * link_rate * 8); + rem = do_div(symbol, 1000); + if (tu_size_reg > 64) { + ret = -EINVAL; + goto err_config_video; + } + } while ((symbol <= 1) || (tu_size_reg - symbol < 4) || + (rem > 850) || (rem < 100)); + + val = symbol + (tu_size_reg << 8); + val |= TU_CNT_RST_EN; + ret = cdn_dp_reg_write(dp, DP_FRAMER_TU, val); + if (ret) + goto err_config_video; + + /* set the FIFO Buffer size */ + val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate; + val /= (dp->link.num_lanes * link_rate); + val = div_u64(8 * (symbol + 1), bit_per_pix) - val; + val += 2; + ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val); + + switch (video->color_depth) { + case 6: + val = BCS_6; + break; + case 8: + val = BCS_8; + break; + case 10: + val = BCS_10; + break; + case 12: + val = BCS_12; + break; + case 16: + val = BCS_16; + break; + }; + + val += video->color_fmt << 8; + ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val); + if (ret) + goto err_config_video; + + val = video->h_sync_polarity ? DP_FRAMER_SP_HSP : 0; + val |= video->v_sync_polarity ? DP_FRAMER_SP_VSP : 0; + ret = cdn_dp_reg_write(dp, DP_FRAMER_SP, val); + if (ret) + goto err_config_video; + + val = (mode->hsync_start - mode->hdisplay) << 16; + val |= mode->htotal - mode->hsync_end; + ret = cdn_dp_reg_write(dp, DP_FRONT_BACK_PORCH, val); + if (ret) + goto err_config_video; + + val = mode->hdisplay * bit_per_pix / 8; + ret = cdn_dp_reg_write(dp, DP_BYTE_COUNT, val); + if (ret) + goto err_config_video; + + val = mode->htotal | ((mode->htotal - mode->hsync_start) << 16); + ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_0, val); + if (ret) + goto err_config_video; + + val = mode->hsync_end - mode->hsync_start; + val |= (mode->hdisplay << 16) | (video->h_sync_polarity << 15); + ret = cdn_dp_reg_write(dp, MSA_HORIZONTAL_1, val); + if (ret) + goto err_config_video; + + val = mode->vtotal; + val |= (mode->vtotal - mode->vsync_start) << 16; + ret = cdn_dp_reg_write(dp, MSA_VERTICAL_0, val); + if (ret) + goto err_config_video; + + val = mode->vsync_end - mode->vsync_start; + val |= (mode->vdisplay << 16) | (video->v_sync_polarity << 15); + ret = cdn_dp_reg_write(dp, MSA_VERTICAL_1, val); + if (ret) + goto err_config_video; + + val = cdn_dp_get_msa_misc(video, mode); + ret = cdn_dp_reg_write(dp, MSA_MISC, val); + if (ret) + goto err_config_video; + + ret = cdn_dp_reg_write(dp, STREAM_CONFIG, 1); + if (ret) + goto err_config_video; + + val = mode->hsync_end - mode->hsync_start; + val |= mode->hdisplay << 16; + ret = cdn_dp_reg_write(dp, DP_HORIZONTAL, val); + if (ret) + goto err_config_video; + + val = mode->vdisplay; + val |= (mode->vtotal - mode->vsync_start) << 16; + ret = cdn_dp_reg_write(dp, DP_VERTICAL_0, val); + if (ret) + goto err_config_video; + + val = mode->vtotal; + ret = cdn_dp_reg_write(dp, DP_VERTICAL_1, val); + if (ret) + goto err_config_video; + + ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 2, 1, 0); + +err_config_video: + if (ret) + DRM_DEV_ERROR(dp->dev, "config video failed: %d\n", ret); + return ret; +} + +int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio) +{ + u32 val; + int ret; + + ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, 0); + if (ret) { + DRM_DEV_ERROR(dp->dev, "audio stop failed: %d\n", ret); + return ret; + } + + val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; + val |= SPDIF_FIFO_MID_RANGE(0xe0); + val |= SPDIF_JITTER_THRSH(0xe0); + val |= SPDIF_JITTER_AVG_WIN(7); + writel(val, dp->regs + SPDIF_CTRL_ADDR); + + /* clearn the audio config and reset */ + writel(0, dp->regs + AUDIO_SRC_CNTL); + writel(0, dp->regs + AUDIO_SRC_CNFG); + writel(AUDIO_SW_RST, dp->regs + AUDIO_SRC_CNTL); + writel(0, dp->regs + AUDIO_SRC_CNTL); + + /* reset smpl2pckt component */ + writel(0, dp->regs + SMPL2PKT_CNTL); + writel(AUDIO_SW_RST, dp->regs + SMPL2PKT_CNTL); + writel(0, dp->regs + SMPL2PKT_CNTL); + + /* reset FIFO */ + writel(AUDIO_SW_RST, dp->regs + FIFO_CNTL); + writel(0, dp->regs + FIFO_CNTL); + + if (audio->format == AFMT_SPDIF) + clk_disable_unprepare(dp->spdif_clk); + + return 0; +} + +int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable) +{ + int ret; + + ret = cdn_dp_reg_write_bit(dp, DP_VB_ID, 4, 1, enable); + if (ret) + DRM_DEV_ERROR(dp->dev, "audio mute failed: %d\n", ret); + + return ret; +} + +static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp, + struct audio_info *audio) +{ + int sub_pckt_num = 1, i2s_port_en_val = 0xf, i; + u32 val; + + if (audio->channels == 2) { + if (dp->link.num_lanes == 1) + sub_pckt_num = 2; + else + sub_pckt_num = 4; + + i2s_port_en_val = 1; + } else if (audio->channels == 4) { + i2s_port_en_val = 3; + } + + writel(0x0, dp->regs + SPDIF_CTRL_ADDR); + + writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL); + + val = MAX_NUM_CH(audio->channels); + val |= NUM_OF_I2S_PORTS(audio->channels); + val |= AUDIO_TYPE_LPCM; + val |= CFG_SUB_PCKT_NUM(sub_pckt_num); + writel(val, dp->regs + SMPL2PKT_CNFG); + + if (audio->sample_width == 16) + val = 0; + else if (audio->sample_width == 24) + val = 1 << 9; + else + val = 2 << 9; + + val |= AUDIO_CH_NUM(audio->channels); + val |= I2S_DEC_PORT_EN(i2s_port_en_val); + val |= TRANS_SMPL_WIDTH_32; + writel(val, dp->regs + AUDIO_SRC_CNFG); + + for (i = 0; i < (audio->channels + 1) / 2; i++) { + if (audio->sample_width == 16) + val = (0x02 << 8) | (0x02 << 20); + else if (audio->sample_width == 24) + val = (0x0b << 8) | (0x0b << 20); + + val |= ((2 * i) << 4) | ((2 * i + 1) << 16); + writel(val, dp->regs + STTS_BIT_CH(i)); + } + + switch (audio->sample_rate) { + case 32000: + val = SAMPLING_FREQ(3) | + ORIGINAL_SAMP_FREQ(0xc); + break; + case 44100: + val = SAMPLING_FREQ(0) | + ORIGINAL_SAMP_FREQ(0xf); + break; + case 48000: + val = SAMPLING_FREQ(2) | + ORIGINAL_SAMP_FREQ(0xd); + break; + case 88200: + val = SAMPLING_FREQ(8) | + ORIGINAL_SAMP_FREQ(0x7); + break; + case 96000: + val = SAMPLING_FREQ(0xa) | + ORIGINAL_SAMP_FREQ(5); + break; + case 176400: + val = SAMPLING_FREQ(0xc) | + ORIGINAL_SAMP_FREQ(3); + break; + case 192000: + val = SAMPLING_FREQ(0xe) | + ORIGINAL_SAMP_FREQ(1); + break; + } + val |= 4; + writel(val, dp->regs + COM_CH_STTS_BITS); + + writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL); + writel(I2S_DEC_START, dp->regs + AUDIO_SRC_CNTL); +} + +static void cdn_dp_audio_config_spdif(struct cdn_dp_device *dp) +{ + u32 val; + + val = SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; + val |= SPDIF_FIFO_MID_RANGE(0xe0); + val |= SPDIF_JITTER_THRSH(0xe0); + val |= SPDIF_JITTER_AVG_WIN(7); + writel(val, dp->regs + SPDIF_CTRL_ADDR); + + writel(SYNC_WR_TO_CH_ZERO, dp->regs + FIFO_CNTL); + + val = MAX_NUM_CH(2) | AUDIO_TYPE_LPCM | CFG_SUB_PCKT_NUM(4); + writel(val, dp->regs + SMPL2PKT_CNFG); + writel(SMPL2PKT_EN, dp->regs + SMPL2PKT_CNTL); + + val = SPDIF_ENABLE | SPDIF_AVG_SEL | SPDIF_JITTER_BYPASS; + val |= SPDIF_FIFO_MID_RANGE(0xe0); + val |= SPDIF_JITTER_THRSH(0xe0); + val |= SPDIF_JITTER_AVG_WIN(7); + writel(val, dp->regs + SPDIF_CTRL_ADDR); + + clk_prepare_enable(dp->spdif_clk); + clk_set_rate(dp->spdif_clk, CDN_DP_SPDIF_CLK); +} + +int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio) +{ + int ret; + + /* reset the spdif clk before config */ + if (audio->format == AFMT_SPDIF) { + reset_control_assert(dp->spdif_rst); + reset_control_deassert(dp->spdif_rst); + } + + ret = cdn_dp_reg_write(dp, CM_LANE_CTRL, LANE_REF_CYC); + if (ret) + goto err_audio_config; + + ret = cdn_dp_reg_write(dp, CM_CTRL, 0); + if (ret) + goto err_audio_config; + + if (audio->format == AFMT_I2S) + cdn_dp_audio_config_i2s(dp, audio); + else if (audio->format == AFMT_SPDIF) + cdn_dp_audio_config_spdif(dp); + + ret = cdn_dp_reg_write(dp, AUDIO_PACK_CONTROL, AUDIO_PACK_EN); + +err_audio_config: + if (ret) + DRM_DEV_ERROR(dp->dev, "audio config failed: %d\n", ret); + return ret; +} diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h new file mode 100644 index 000000000000..b5f215324694 --- /dev/null +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h @@ -0,0 +1,483 @@ +/* + * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd + * Author: Chris Zhong + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CDN_DP_REG_H +#define _CDN_DP_REG_H + +#include + +#define ADDR_IMEM 0x10000 +#define ADDR_DMEM 0x20000 + +/* APB CFG addr */ +#define APB_CTRL 0 +#define XT_INT_CTRL 0x04 +#define MAILBOX_FULL_ADDR 0x08 +#define MAILBOX_EMPTY_ADDR 0x0c +#define MAILBOX0_WR_DATA 0x10 +#define MAILBOX0_RD_DATA 0x14 +#define KEEP_ALIVE 0x18 +#define VER_L 0x1c +#define VER_H 0x20 +#define VER_LIB_L_ADDR 0x24 +#define VER_LIB_H_ADDR 0x28 +#define SW_DEBUG_L 0x2c +#define SW_DEBUG_H 0x30 +#define MAILBOX_INT_MASK 0x34 +#define MAILBOX_INT_STATUS 0x38 +#define SW_CLK_L 0x3c +#define SW_CLK_H 0x40 +#define SW_EVENTS0 0x44 +#define SW_EVENTS1 0x48 +#define SW_EVENTS2 0x4c +#define SW_EVENTS3 0x50 +#define XT_OCD_CTRL 0x60 +#define APB_INT_MASK 0x6c +#define APB_STATUS_MASK 0x70 + +/* audio decoder addr */ +#define AUDIO_SRC_CNTL 0x30000 +#define AUDIO_SRC_CNFG 0x30004 +#define COM_CH_STTS_BITS 0x30008 +#define STTS_BIT_CH(x) (0x3000c + ((x) << 2)) +#define SPDIF_CTRL_ADDR 0x3004c +#define SPDIF_CH1_CS_3100_ADDR 0x30050 +#define SPDIF_CH1_CS_6332_ADDR 0x30054 +#define SPDIF_CH1_CS_9564_ADDR 0x30058 +#define SPDIF_CH1_CS_12796_ADDR 0x3005c +#define SPDIF_CH1_CS_159128_ADDR 0x30060 +#define SPDIF_CH1_CS_191160_ADDR 0x30064 +#define SPDIF_CH2_CS_3100_ADDR 0x30068 +#define SPDIF_CH2_CS_6332_ADDR 0x3006c +#define SPDIF_CH2_CS_9564_ADDR 0x30070 +#define SPDIF_CH2_CS_12796_ADDR 0x30074 +#define SPDIF_CH2_CS_159128_ADDR 0x30078 +#define SPDIF_CH2_CS_191160_ADDR 0x3007c +#define SMPL2PKT_CNTL 0x30080 +#define SMPL2PKT_CNFG 0x30084 +#define FIFO_CNTL 0x30088 +#define FIFO_STTS 0x3008c + +/* source pif addr */ +#define SOURCE_PIF_WR_ADDR 0x30800 +#define SOURCE_PIF_WR_REQ 0x30804 +#define SOURCE_PIF_RD_ADDR 0x30808 +#define SOURCE_PIF_RD_REQ 0x3080c +#define SOURCE_PIF_DATA_WR 0x30810 +#define SOURCE_PIF_DATA_RD 0x30814 +#define SOURCE_PIF_FIFO1_FLUSH 0x30818 +#define SOURCE_PIF_FIFO2_FLUSH 0x3081c +#define SOURCE_PIF_STATUS 0x30820 +#define SOURCE_PIF_INTERRUPT_SOURCE 0x30824 +#define SOURCE_PIF_INTERRUPT_MASK 0x30828 +#define SOURCE_PIF_PKT_ALLOC_REG 0x3082c +#define SOURCE_PIF_PKT_ALLOC_WR_EN 0x30830 +#define SOURCE_PIF_SW_RESET 0x30834 + +/* bellow registers need access by mailbox */ +/* source car addr */ +#define SOURCE_HDTX_CAR 0x0900 +#define SOURCE_DPTX_CAR 0x0904 +#define SOURCE_PHY_CAR 0x0908 +#define SOURCE_CEC_CAR 0x090c +#define SOURCE_CBUS_CAR 0x0910 +#define SOURCE_PKT_CAR 0x0918 +#define SOURCE_AIF_CAR 0x091c +#define SOURCE_CIPHER_CAR 0x0920 +#define SOURCE_CRYPTO_CAR 0x0924 + +/* clock meters addr */ +#define CM_CTRL 0x0a00 +#define CM_I2S_CTRL 0x0a04 +#define CM_SPDIF_CTRL 0x0a08 +#define CM_VID_CTRL 0x0a0c +#define CM_LANE_CTRL 0x0a10 +#define I2S_NM_STABLE 0x0a14 +#define I2S_NCTS_STABLE 0x0a18 +#define SPDIF_NM_STABLE 0x0a1c +#define SPDIF_NCTS_STABLE 0x0a20 +#define NMVID_MEAS_STABLE 0x0a24 +#define I2S_MEAS 0x0a40 +#define SPDIF_MEAS 0x0a80 +#define NMVID_MEAS 0x0ac0 + +/* source vif addr */ +#define BND_HSYNC2VSYNC 0x0b00 +#define HSYNC2VSYNC_F1_L1 0x0b04 +#define HSYNC2VSYNC_F2_L1 0x0b08 +#define HSYNC2VSYNC_STATUS 0x0b0c +#define HSYNC2VSYNC_POL_CTRL 0x0b10 + +/* dptx phy addr */ +#define DP_TX_PHY_CONFIG_REG 0x2000 +#define DP_TX_PHY_STATUS_REG 0x2004 +#define DP_TX_PHY_SW_RESET 0x2008 +#define DP_TX_PHY_SCRAMBLER_SEED 0x200c +#define DP_TX_PHY_TRAINING_01_04 0x2010 +#define DP_TX_PHY_TRAINING_05_08 0x2014 +#define DP_TX_PHY_TRAINING_09_10 0x2018 +#define TEST_COR 0x23fc + +/* dptx hpd addr */ +#define HPD_IRQ_DET_MIN_TIMER 0x2100 +#define HPD_IRQ_DET_MAX_TIMER 0x2104 +#define HPD_UNPLGED_DET_MIN_TIMER 0x2108 +#define HPD_STABLE_TIMER 0x210c +#define HPD_FILTER_TIMER 0x2110 +#define HPD_EVENT_MASK 0x211c +#define HPD_EVENT_DET 0x2120 + +/* dpyx framer addr */ +#define DP_FRAMER_GLOBAL_CONFIG 0x2200 +#define DP_SW_RESET 0x2204 +#define DP_FRAMER_TU 0x2208 +#define DP_FRAMER_PXL_REPR 0x220c +#define DP_FRAMER_SP 0x2210 +#define AUDIO_PACK_CONTROL 0x2214 +#define DP_VC_TABLE(x) (0x2218 + ((x) << 2)) +#define DP_VB_ID 0x2258 +#define DP_MTPH_LVP_CONTROL 0x225c +#define DP_MTPH_SYMBOL_VALUES 0x2260 +#define DP_MTPH_ECF_CONTROL 0x2264 +#define DP_MTPH_ACT_CONTROL 0x2268 +#define DP_MTPH_STATUS 0x226c +#define DP_INTERRUPT_SOURCE 0x2270 +#define DP_INTERRUPT_MASK 0x2274 +#define DP_FRONT_BACK_PORCH 0x2278 +#define DP_BYTE_COUNT 0x227c + +/* dptx stream addr */ +#define MSA_HORIZONTAL_0 0x2280 +#define MSA_HORIZONTAL_1 0x2284 +#define MSA_VERTICAL_0 0x2288 +#define MSA_VERTICAL_1 0x228c +#define MSA_MISC 0x2290 +#define STREAM_CONFIG 0x2294 +#define AUDIO_PACK_STATUS 0x2298 +#define VIF_STATUS 0x229c +#define PCK_STUFF_STATUS_0 0x22a0 +#define PCK_STUFF_STATUS_1 0x22a4 +#define INFO_PACK_STATUS 0x22a8 +#define RATE_GOVERNOR_STATUS 0x22ac +#define DP_HORIZONTAL 0x22b0 +#define DP_VERTICAL_0 0x22b4 +#define DP_VERTICAL_1 0x22b8 +#define DP_BLOCK_SDP 0x22bc + +/* dptx glbl addr */ +#define DPTX_LANE_EN 0x2300 +#define DPTX_ENHNCD 0x2304 +#define DPTX_INT_MASK 0x2308 +#define DPTX_INT_STATUS 0x230c + +/* dp aux addr */ +#define DP_AUX_HOST_CONTROL 0x2800 +#define DP_AUX_INTERRUPT_SOURCE 0x2804 +#define DP_AUX_INTERRUPT_MASK 0x2808 +#define DP_AUX_SWAP_INVERSION_CONTROL 0x280c +#define DP_AUX_SEND_NACK_TRANSACTION 0x2810 +#define DP_AUX_CLEAR_RX 0x2814 +#define DP_AUX_CLEAR_TX 0x2818 +#define DP_AUX_TIMER_STOP 0x281c +#define DP_AUX_TIMER_CLEAR 0x2820 +#define DP_AUX_RESET_SW 0x2824 +#define DP_AUX_DIVIDE_2M 0x2828 +#define DP_AUX_TX_PREACHARGE_LENGTH 0x282c +#define DP_AUX_FREQUENCY_1M_MAX 0x2830 +#define DP_AUX_FREQUENCY_1M_MIN 0x2834 +#define DP_AUX_RX_PRE_MIN 0x2838 +#define DP_AUX_RX_PRE_MAX 0x283c +#define DP_AUX_TIMER_PRESET 0x2840 +#define DP_AUX_NACK_FORMAT 0x2844 +#define DP_AUX_TX_DATA 0x2848 +#define DP_AUX_RX_DATA 0x284c +#define DP_AUX_TX_STATUS 0x2850 +#define DP_AUX_RX_STATUS 0x2854 +#define DP_AUX_RX_CYCLE_COUNTER 0x2858 +#define DP_AUX_MAIN_STATES 0x285c +#define DP_AUX_MAIN_TIMER 0x2860 +#define DP_AUX_AFE_OUT 0x2864 + +/* crypto addr */ +#define CRYPTO_HDCP_REVISION 0x5800 +#define HDCP_CRYPTO_CONFIG 0x5804 +#define CRYPTO_INTERRUPT_SOURCE 0x5808 +#define CRYPTO_INTERRUPT_MASK 0x580c +#define CRYPTO22_CONFIG 0x5818 +#define CRYPTO22_STATUS 0x581c +#define SHA_256_DATA_IN 0x583c +#define SHA_256_DATA_OUT_(x) (0x5850 + ((x) << 2)) +#define AES_32_KEY_(x) (0x5870 + ((x) << 2)) +#define AES_32_DATA_IN 0x5880 +#define AES_32_DATA_OUT_(x) (0x5884 + ((x) << 2)) +#define CRYPTO14_CONFIG 0x58a0 +#define CRYPTO14_STATUS 0x58a4 +#define CRYPTO14_PRNM_OUT 0x58a8 +#define CRYPTO14_KM_0 0x58ac +#define CRYPTO14_KM_1 0x58b0 +#define CRYPTO14_AN_0 0x58b4 +#define CRYPTO14_AN_1 0x58b8 +#define CRYPTO14_YOUR_KSV_0 0x58bc +#define CRYPTO14_YOUR_KSV_1 0x58c0 +#define CRYPTO14_MI_0 0x58c4 +#define CRYPTO14_MI_1 0x58c8 +#define CRYPTO14_TI_0 0x58cc +#define CRYPTO14_KI_0 0x58d0 +#define CRYPTO14_KI_1 0x58d4 +#define CRYPTO14_BLOCKS_NUM 0x58d8 +#define CRYPTO14_KEY_MEM_DATA_0 0x58dc +#define CRYPTO14_KEY_MEM_DATA_1 0x58e0 +#define CRYPTO14_SHA1_MSG_DATA 0x58e4 +#define CRYPTO14_SHA1_V_VALUE_(x) (0x58e8 + ((x) << 2)) +#define TRNG_CTRL 0x58fc +#define TRNG_DATA_RDY 0x5900 +#define TRNG_DATA 0x5904 + +/* cipher addr */ +#define HDCP_REVISION 0x60000 +#define INTERRUPT_SOURCE 0x60004 +#define INTERRUPT_MASK 0x60008 +#define HDCP_CIPHER_CONFIG 0x6000c +#define AES_128_KEY_0 0x60010 +#define AES_128_KEY_1 0x60014 +#define AES_128_KEY_2 0x60018 +#define AES_128_KEY_3 0x6001c +#define AES_128_RANDOM_0 0x60020 +#define AES_128_RANDOM_1 0x60024 +#define CIPHER14_KM_0 0x60028 +#define CIPHER14_KM_1 0x6002c +#define CIPHER14_STATUS 0x60030 +#define CIPHER14_RI_PJ_STATUS 0x60034 +#define CIPHER_MODE 0x60038 +#define CIPHER14_AN_0 0x6003c +#define CIPHER14_AN_1 0x60040 +#define CIPHER22_AUTH 0x60044 +#define CIPHER14_R0_DP_STATUS 0x60048 +#define CIPHER14_BOOTSTRAP 0x6004c + +#define DPTX_FRMR_DATA_CLK_RSTN_EN BIT(11) +#define DPTX_FRMR_DATA_CLK_EN BIT(10) +#define DPTX_PHY_DATA_RSTN_EN BIT(9) +#define DPTX_PHY_DATA_CLK_EN BIT(8) +#define DPTX_PHY_CHAR_RSTN_EN BIT(7) +#define DPTX_PHY_CHAR_CLK_EN BIT(6) +#define SOURCE_AUX_SYS_CLK_RSTN_EN BIT(5) +#define SOURCE_AUX_SYS_CLK_EN BIT(4) +#define DPTX_SYS_CLK_RSTN_EN BIT(3) +#define DPTX_SYS_CLK_EN BIT(2) +#define CFG_DPTX_VIF_CLK_RSTN_EN BIT(1) +#define CFG_DPTX_VIF_CLK_EN BIT(0) + +#define SOURCE_PHY_RSTN_EN BIT(1) +#define SOURCE_PHY_CLK_EN BIT(0) + +#define SOURCE_PKT_SYS_RSTN_EN BIT(3) +#define SOURCE_PKT_SYS_CLK_EN BIT(2) +#define SOURCE_PKT_DATA_RSTN_EN BIT(1) +#define SOURCE_PKT_DATA_CLK_EN BIT(0) + +#define SPDIF_CDR_CLK_RSTN_EN BIT(5) +#define SPDIF_CDR_CLK_EN BIT(4) +#define SOURCE_AIF_SYS_RSTN_EN BIT(3) +#define SOURCE_AIF_SYS_CLK_EN BIT(2) +#define SOURCE_AIF_CLK_RSTN_EN BIT(1) +#define SOURCE_AIF_CLK_EN BIT(0) + +#define SOURCE_CIPHER_SYSTEM_CLK_RSTN_EN BIT(3) +#define SOURCE_CIPHER_SYS_CLK_EN BIT(2) +#define SOURCE_CIPHER_CHAR_CLK_RSTN_EN BIT(1) +#define SOURCE_CIPHER_CHAR_CLK_EN BIT(0) + +#define SOURCE_CRYPTO_SYS_CLK_RSTN_EN BIT(1) +#define SOURCE_CRYPTO_SYS_CLK_EN BIT(0) + +#define APB_IRAM_PATH BIT(2) +#define APB_DRAM_PATH BIT(1) +#define APB_XT_RESET BIT(0) + +#define MAILBOX_INT_MASK_BIT BIT(1) +#define PIF_INT_MASK_BIT BIT(0) +#define ALL_INT_MASK 3 + +/* mailbox */ +#define MB_OPCODE_ID 0 +#define MB_MODULE_ID 1 +#define MB_SIZE_MSB_ID 2 +#define MB_SIZE_LSB_ID 3 +#define MB_DATA_ID 4 + +#define MB_MODULE_ID_DP_TX 0x01 +#define MB_MODULE_ID_HDCP_TX 0x07 +#define MB_MODULE_ID_HDCP_RX 0x08 +#define MB_MODULE_ID_HDCP_GENERAL 0x09 +#define MB_MODULE_ID_GENERAL 0x0a + +/* general opcode */ +#define GENERAL_MAIN_CONTROL 0x01 +#define GENERAL_TEST_ECHO 0x02 +#define GENERAL_BUS_SETTINGS 0x03 +#define GENERAL_TEST_ACCESS 0x04 + +#define DPTX_SET_POWER_MNG 0x00 +#define DPTX_SET_HOST_CAPABILITIES 0x01 +#define DPTX_GET_EDID 0x02 +#define DPTX_READ_DPCD 0x03 +#define DPTX_WRITE_DPCD 0x04 +#define DPTX_ENABLE_EVENT 0x05 +#define DPTX_WRITE_REGISTER 0x06 +#define DPTX_READ_REGISTER 0x07 +#define DPTX_WRITE_FIELD 0x08 +#define DPTX_TRAINING_CONTROL 0x09 +#define DPTX_READ_EVENT 0x0a +#define DPTX_READ_LINK_STAT 0x0b +#define DPTX_SET_VIDEO 0x0c +#define DPTX_SET_AUDIO 0x0d +#define DPTX_GET_LAST_AUX_STAUS 0x0e +#define DPTX_SET_LINK_BREAK_POINT 0x0f +#define DPTX_FORCE_LANES 0x10 +#define DPTX_HPD_STATE 0x11 + +#define FW_STANDBY 0 +#define FW_ACTIVE 1 + +#define DPTX_EVENT_ENABLE_HPD BIT(0) +#define DPTX_EVENT_ENABLE_TRAINING BIT(1) + +#define LINK_TRAINING_NOT_ACTIVE 0 +#define LINK_TRAINING_RUN 1 +#define LINK_TRAINING_RESTART 2 + +#define CONTROL_VIDEO_IDLE 0 +#define CONTROL_VIDEO_VALID 1 + +#define TU_CNT_RST_EN BIT(15) +#define VIF_BYPASS_INTERLACE BIT(13) +#define INTERLACE_FMT_DET BIT(12) +#define INTERLACE_DTCT_WIN 0x20 + +#define DP_FRAMER_SP_INTERLACE_EN BIT(2) +#define DP_FRAMER_SP_HSP BIT(1) +#define DP_FRAMER_SP_VSP BIT(0) + +/* capability */ +#define AUX_HOST_INVERT 3 +#define FAST_LT_SUPPORT 1 +#define FAST_LT_NOT_SUPPORT 0 +#define LANE_MAPPING_NORMAL 0x1b +#define LANE_MAPPING_FLIPPED 0xe4 +#define ENHANCED 1 +#define SCRAMBLER_EN BIT(4) + +#define FULL_LT_STARTED BIT(0) +#define FASE_LT_STARTED BIT(1) +#define CLK_RECOVERY_FINISHED BIT(2) +#define EQ_PHASE_FINISHED BIT(3) +#define FASE_LT_START_FINISHED BIT(4) +#define CLK_RECOVERY_FAILED BIT(5) +#define EQ_PHASE_FAILED BIT(6) +#define FASE_LT_FAILED BIT(7) + +#define DPTX_HPD_EVENT BIT(0) +#define DPTX_TRAINING_EVENT BIT(1) +#define HDCP_TX_STATUS_EVENT BIT(4) +#define HDCP2_TX_IS_KM_STORED_EVENT BIT(5) +#define HDCP2_TX_STORE_KM_EVENT BIT(6) +#define HDCP_TX_IS_RECEIVER_ID_VALID_EVENT BIT(7) + +#define TU_SIZE 30 +#define CDN_DP_MAX_LINK_RATE DP_LINK_BW_5_4 + +/* audio */ +#define AUDIO_PACK_EN BIT(8) +#define SAMPLING_FREQ(x) (((x) & 0xf) << 16) +#define ORIGINAL_SAMP_FREQ(x) (((x) & 0xf) << 24) +#define SYNC_WR_TO_CH_ZERO BIT(1) +#define I2S_DEC_START BIT(1) +#define AUDIO_SW_RST BIT(0) +#define SMPL2PKT_EN BIT(1) +#define MAX_NUM_CH(x) (((x) & 0x1f) - 1) +#define NUM_OF_I2S_PORTS(x) ((((x) / 2 - 1) & 0x3) << 5) +#define AUDIO_TYPE_LPCM (2 << 7) +#define CFG_SUB_PCKT_NUM(x) ((((x) - 1) & 0x7) << 11) +#define AUDIO_CH_NUM(x) ((((x) - 1) & 0x1f) << 2) +#define TRANS_SMPL_WIDTH_16 0 +#define TRANS_SMPL_WIDTH_24 BIT(11) +#define TRANS_SMPL_WIDTH_32 (2 << 11) +#define I2S_DEC_PORT_EN(x) (((x) & 0xf) << 17) +#define SPDIF_ENABLE BIT(21) +#define SPDIF_AVG_SEL BIT(20) +#define SPDIF_JITTER_BYPASS BIT(19) +#define SPDIF_FIFO_MID_RANGE(x) (((x) & 0xff) << 11) +#define SPDIF_JITTER_THRSH(x) (((x) & 0xff) << 3) +#define SPDIF_JITTER_AVG_WIN(x) ((x) & 0x7) + +/* Reference cycles when using lane clock as reference */ +#define LANE_REF_CYC 0x8000 + +enum voltage_swing_level { + VOLTAGE_LEVEL_0, + VOLTAGE_LEVEL_1, + VOLTAGE_LEVEL_2, + VOLTAGE_LEVEL_3, +}; + +enum pre_emphasis_level { + PRE_EMPHASIS_LEVEL_0, + PRE_EMPHASIS_LEVEL_1, + PRE_EMPHASIS_LEVEL_2, + PRE_EMPHASIS_LEVEL_3, +}; + +enum pattern_set { + PTS1 = BIT(0), + PTS2 = BIT(1), + PTS3 = BIT(2), + PTS4 = BIT(3), + DP_NONE = BIT(4) +}; + +enum vic_color_depth { + BCS_6 = 0x1, + BCS_8 = 0x2, + BCS_10 = 0x4, + BCS_12 = 0x8, + BCS_16 = 0x10, +}; + +enum vic_bt_type { + BT_601 = 0x0, + BT_709 = 0x1, +}; + +void cdn_dp_clock_reset(struct cdn_dp_device *dp); + +void cdn_dp_set_fw_clk(struct cdn_dp_device *dp, u32 clk); +int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem, + u32 i_size, const u32 *d_mem, u32 d_size); +int cdn_dp_set_firmware_active(struct cdn_dp_device *dp, bool enable); +int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip); +int cdn_dp_event_config(struct cdn_dp_device *dp); +u32 cdn_dp_get_event(struct cdn_dp_device *dp); +int cdn_dp_get_hpd_status(struct cdn_dp_device *dp); +int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value); +int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len); +int cdn_dp_get_edid_block(void *dp, u8 *edid, + unsigned int block, size_t length); +int cdn_dp_train_link(struct cdn_dp_device *dp); +int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active); +int cdn_dp_config_video(struct cdn_dp_device *dp); +int cdn_dp_audio_stop(struct cdn_dp_device *dp, struct audio_info *audio); +int cdn_dp_audio_mute(struct cdn_dp_device *dp, bool enable); +int cdn_dp_audio_config(struct cdn_dp_device *dp, struct audio_info *audio); +#endif /* _CDN_DP_REG_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 4f8b8631956a..76c79ac57df0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -936,9 +936,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc) vop_dsp_hold_valid_irq_disable(vop); } - pin_pol = 0x8; - pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1; - pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1); + pin_pol = BIT(DCLK_INVERT); + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? + 0 : BIT(HSYNC_POSITIVE); + pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? + 0 : BIT(VSYNC_POSITIVE); VOP_CTRL_SET(vop, pin_pol, pin_pol); switch (s->output_type) { @@ -958,6 +960,11 @@ static void vop_crtc_enable(struct drm_crtc *crtc) VOP_CTRL_SET(vop, mipi_pin_pol, pin_pol); VOP_CTRL_SET(vop, mipi_en, 1); break; + case DRM_MODE_CONNECTOR_DisplayPort: + pin_pol &= ~BIT(DCLK_INVERT); + VOP_CTRL_SET(vop, dp_pin_pol, pin_pol); + VOP_CTRL_SET(vop, dp_en, 1); + break; default: DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", s->output_type); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 1dbc52615257..5a4faa85dbd2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -45,6 +45,7 @@ struct vop_ctrl { struct vop_reg edp_en; struct vop_reg hdmi_en; struct vop_reg mipi_en; + struct vop_reg dp_en; struct vop_reg out_mode; struct vop_reg dither_down; struct vop_reg dither_up; @@ -53,6 +54,7 @@ struct vop_ctrl { struct vop_reg hdmi_pin_pol; struct vop_reg edp_pin_pol; struct vop_reg mipi_pin_pol; + struct vop_reg dp_pin_pol; struct vop_reg htotal_pw; struct vop_reg hact_st_end; @@ -244,6 +246,13 @@ enum scale_down_mode { SCALE_DOWN_AVG = 0x1 }; +enum vop_pol { + HSYNC_POSITIVE = 0, + VSYNC_POSITIVE = 1, + DEN_NEGATIVE = 2, + DCLK_INVERT = 3 +}; + #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) #define SCL_FT_DEFAULT_FIXPOINT_SHIFT 12 #define SCL_MAX_VSKIPLINES 4 diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index 35c51f3402f2..91fbc7b52147 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -284,6 +284,7 @@ static const struct vop_data rk3288_vop = { static const struct vop_ctrl rk3399_ctrl_data = { .standby = VOP_REG(RK3399_SYS_CTRL, 0x1, 22), .gate_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 23), + .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11), .rgb_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 13), .edp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 14), @@ -293,6 +294,7 @@ static const struct vop_ctrl rk3399_ctrl_data = { .data_blank = VOP_REG(RK3399_DSP_CTRL0, 0x1, 19), .out_mode = VOP_REG(RK3399_DSP_CTRL0, 0xf, 0), .rgb_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), + .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), .hdmi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 20), .edp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 24), .mipi_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 28), -- cgit v1.2.3 From 83c132ed46a7e9c8ac17be436ef3892850df8695 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 5 Feb 2017 15:54:57 +0800 Subject: drm/rockchip: cdn-dp: Load firmware if no monitor connected If no monitor is connected, suspend/resume cycles result in firmware load errors because the driver attempts to load the firmware while the system is in suspend state. This results in a kernel warning and traceback. Loading the firmware during boot fixes the problem. Note that we can not just call schedule_work conditionally in cdn_dp_pd_event() if the insertion status changed. The problem would still be seen if a monitor is connected for the first time during suspend. Signed-off-by: Guenter Roeck Signed-off-by: Sean Paul Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 7db25081a8ef..b8d0dd7abc61 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1021,7 +1021,6 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) struct cdn_dp_port *port; struct drm_device *drm_dev = data; int ret, i; - bool schedule_event = false; ret = cdn_dp_parse_dt(dp); if (ret < 0) @@ -1083,15 +1082,11 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) "register EXTCON_DISP_DP notifier err\n"); goto err_free_connector; } - - if (extcon_get_state(port->extcon, EXTCON_DISP_DP)) - schedule_event = true; } pm_runtime_enable(dev); - if (schedule_event) - schedule_work(&dp->event_work); + schedule_work(&dp->event_work); return 0; -- cgit v1.2.3 From 5eb2e6ee97b5e9a8587143db339bc71b28a3e1ca Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 5 Feb 2017 15:54:58 +0800 Subject: drm/rockchip: cdn-dp: Do not run worker while suspended If the driver is in suspended mode, the dp block may be disabled, and chip registers may not be accessible. Yet, the worker may be triggered in this situation by an extcon event. If that happens, the following crash will be seen. cdn-dp fec00000.dp: [drm:cdn_dp_pd_event_work] *ERROR* Enable dp failed -19 cdn-dp fec00000.dp: [drm:cdn_dp_pd_event_work] Connected, not enabled. Enabling cdn Bad mode in Error handler detected, code 0xbf000002 -- SError CPU: 1 PID: 10357 Comm: kworker/1:2 Not tainted 4.4.21-05903-ge0514ea #1 Hardware name: Google Kevin (DT) Workqueue: events cdn_dp_pd_event_work task: ffffffc0cda67080 ti: ffffffc0b9b80000 task.ti: ffffffc0b9b80000 PC is at cdn_dp_clock_reset+0x30/0xa8 LR is at cdn_dp_enable+0x1e0/0x69c ... Call trace: [] cdn_dp_pd_event_work+0x58/0x3f4 [] process_one_work+0x240/0x424 [] worker_thread+0x2fc/0x424 [] kthread+0x10c/0x114 [] ret_from_fork+0x10/0x40 Problem is two-fold: The worker should not run while suspended, and the suspend function should not call cdn_dp_disable() while the worker is running. Signed-off-by: Guenter Roeck Signed-off-by: Sean Paul Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 15 +++++++++++++-- drivers/gpu/drm/rockchip/cdn-dp-core.h | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index b8d0dd7abc61..a70eedc88e1a 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -939,6 +939,10 @@ static void cdn_dp_pd_event_work(struct work_struct *work) u8 sink_count; mutex_lock(&dp->lock); + + if (dp->suspended) + goto out; + ret = cdn_dp_request_firmware(dp); if (ret) goto out; @@ -1123,19 +1127,26 @@ static const struct component_ops cdn_dp_component_ops = { int cdn_dp_suspend(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); + int ret = 0; + mutex_lock(&dp->lock); if (dp->active) - return cdn_dp_disable(dp); + ret = cdn_dp_disable(dp); + dp->suspended = true; + mutex_unlock(&dp->lock); - return 0; + return ret; } int cdn_dp_resume(struct device *dev) { struct cdn_dp_device *dp = dev_get_drvdata(dev); + mutex_lock(&dp->lock); + dp->suspended = false; if (dp->fw_loaded) schedule_work(&dp->event_work); + mutex_unlock(&dp->lock); return 0; } diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 3bea4b8d265f..7d48661362c8 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -82,6 +82,7 @@ struct cdn_dp_device { struct mutex lock; bool connected; bool active; + bool suspended; const struct firmware *fw; /* cdn dp firmware */ unsigned int fw_version; /* cdn fw version */ -- cgit v1.2.3 From 81632df69772efaacc6218281f58d5ed19d5c97e Mon Sep 17 00:00:00 2001 From: Chris Zhong Date: Sun, 5 Feb 2017 15:54:59 +0800 Subject: drm/rockchip: cdn-dp: do not use drm_helper_hpd_irq_event The cdn_dp_pd_event_work is using drm_helper_hpd_irq_event to update the connector status, this function is used to update all connectors of drm_device. Therefore, the detect of other connector will be call, when cdn_dp_pd_event_work is triggered, every time. It is not necessary, and it may cause system crash. replace drm_helper_hpd_irq_event with drm_kms_helper_hotplug_event, only update cdn-dp status. Signed-off-by: Chris Zhong Tested-by: Guenter Roeck Reviewed-by: Guenter Roeck --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index a70eedc88e1a..62e02a48ebea 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -935,6 +935,9 @@ static void cdn_dp_pd_event_work(struct work_struct *work) { struct cdn_dp_device *dp = container_of(work, struct cdn_dp_device, event_work); + struct drm_connector *connector = &dp->connector; + enum drm_connector_status old_status; + int ret; u8 sink_count; @@ -997,7 +1000,11 @@ static void cdn_dp_pd_event_work(struct work_struct *work) out: mutex_unlock(&dp->lock); - drm_helper_hpd_irq_event(dp->drm_dev); + + old_status = connector->status; + connector->status = connector->funcs->detect(connector, false); + if (old_status != connector->status) + drm_kms_helper_hotplug_event(dp->drm_dev); } static int cdn_dp_pd_event(struct notifier_block *nb, -- cgit v1.2.3 From be0270e4d14656d36e76c098ebe822ca2fc34044 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Sun, 5 Feb 2017 15:55:00 +0800 Subject: drm/rockchip: cdn-dp: Move mutex_init to probe We're trying to lock mutex when cdn-dp shutdown, so we need to make sure the mutex is inited in cdn-dp's probe. Signed-off-by: Jeffy Chen Reviewed-by: Guenter Roeck Reviewed-by: Chris Zhong Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 62e02a48ebea..799e8260e7c4 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1041,7 +1041,6 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) dp->connected = false; dp->active = false; - mutex_init(&dp->lock); INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); encoder = &dp->encoder; @@ -1204,6 +1203,7 @@ static int cdn_dp_probe(struct platform_device *pdev) return -EINVAL; } + mutex_init(&dp->lock); dev_set_drvdata(dev, dp); return component_add(dev, &cdn_dp_component_ops); -- cgit v1.2.3 From 13e0e2069401f2692cf25eb3c7f61137b5f9902d Mon Sep 17 00:00:00 2001 From: Chris Zhong Date: Sun, 5 Feb 2017 15:55:01 +0800 Subject: drm/rockchip: cdn-dp: retry to check sink count Sometimes the Dock is disconnected, but cdn_dp_encoder_disable is not triggered by DRM. For example, unplug the Dock in console mode, and re-plug it again, the cdn_dp_event_work will try to get the sink count of Dock, since the DP is still active. But the Dock has been powered down, it need re-power on, and wait for a while until it is ready to DPCD communication. Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 91 +++++++++++++++++++--------------- drivers/gpu/drm/rockchip/cdn-dp-core.h | 1 + 2 files changed, 52 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 799e8260e7c4..a630b0d27b9f 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -197,6 +197,39 @@ static struct cdn_dp_port *cdn_dp_connected_port(struct cdn_dp_device *dp) return NULL; } +static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); + struct cdn_dp_port *port; + u8 sink_count = 0; + + if (dp->active_port < 0 || dp->active_port >= dp->ports) { + DRM_DEV_ERROR(dp->dev, "active_port is wrong!\n"); + return false; + } + + port = dp->port[dp->active_port]; + + /* + * Attempt to read sink count, retry in case the sink may not be ready. + * + * Sinks are *supposed* to come up within 1ms from an off state, but + * some docks need more time to power up. + */ + while (time_before(jiffies, timeout)) { + if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) + return false; + + if (!cdn_dp_get_sink_count(dp, &sink_count)) + return sink_count ? true : false; + + usleep_range(5000, 10000); + } + + DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); + return false; +} + static enum drm_connector_status cdn_dp_connector_detect(struct drm_connector *connector, bool force) { @@ -345,47 +378,24 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp) return cdn_dp_event_config(dp); } -static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp, - struct cdn_dp_port *port, - u8 *sink_count) +static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) { int ret; - unsigned long timeout = jiffies + msecs_to_jiffies(CDN_DPCD_TIMEOUT_MS); - /* - * Attempt to read sink count & sink capability, retry in case the sink - * may not be ready. - * - * Sinks are *supposed* to come up within 1ms from an off state, but - * some docks need more time to power up. - */ - while (time_before(jiffies, timeout)) { - if (!extcon_get_state(port->extcon, EXTCON_DISP_DP)) - return -ENODEV; - - if (cdn_dp_get_sink_count(dp, sink_count)) { - usleep_range(5000, 10000); - continue; - } - - if (!*sink_count) - return -ENODEV; - - ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, - DP_RECEIVER_CAP_SIZE); - if (ret) { - DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); - return ret; - } + if (!cdn_dp_check_sink_connection(dp)) + return -ENODEV; - kfree(dp->edid); - dp->edid = drm_do_get_edid(&dp->connector, - cdn_dp_get_edid_block, dp); - return 0; + ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret) { + DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); + return ret; } - DRM_DEV_ERROR(dp->dev, "Get sink capability timed out\n"); - return -ETIMEDOUT; + kfree(dp->edid); + dp->edid = drm_do_get_edid(&dp->connector, + cdn_dp_get_edid_block, dp); + return 0; } static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) @@ -437,6 +447,7 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port) goto err_power_on; } + dp->active_port = port->id; return 0; err_power_on: @@ -466,6 +477,7 @@ static int cdn_dp_disable_phy(struct cdn_dp_device *dp, port->phy_enabled = false; port->lanes = 0; + dp->active_port = -1; return 0; } @@ -504,7 +516,6 @@ static int cdn_dp_enable(struct cdn_dp_device *dp) { int ret, i, lanes; struct cdn_dp_port *port; - u8 sink_count; port = cdn_dp_connected_port(dp); if (!port) { @@ -535,8 +546,8 @@ static int cdn_dp_enable(struct cdn_dp_device *dp) if (ret) continue; - ret = cdn_dp_get_sink_capability(dp, port, &sink_count); - if (ret || (!ret && !sink_count)) { + ret = cdn_dp_get_sink_capability(dp); + if (ret) { cdn_dp_disable_phy(dp, port); } else { dp->active = true; @@ -939,7 +950,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work) enum drm_connector_status old_status; int ret; - u8 sink_count; mutex_lock(&dp->lock); @@ -967,7 +977,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work) } /* Enabled and connected to a dongle without a sink, notify userspace */ - } else if (cdn_dp_get_sink_count(dp, &sink_count) || !sink_count) { + } else if (!cdn_dp_check_sink_connection(dp)) { DRM_DEV_INFO(dp->dev, "Connected without sink. Assert hpd\n"); dp->connected = false; @@ -1040,6 +1050,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) dp->drm_dev = drm_dev; dp->connected = false; dp->active = false; + dp->active_port = -1; INIT_WORK(&dp->event_work, cdn_dp_pd_event_work); diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 7d48661362c8..f57e296401b8 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -104,6 +104,7 @@ struct cdn_dp_device { struct cdn_dp_port *port[MAX_PHY]; u8 ports; u8 lanes; + int active_port; u8 dpcd[DP_RECEIVER_CAP_SIZE]; bool sink_has_audio; -- cgit v1.2.3 From ef1844b7ed847430955a95d20242f0d1b9f5fa64 Mon Sep 17 00:00:00 2001 From: Chris Zhong Date: Sun, 5 Feb 2017 15:55:02 +0800 Subject: drm/rockchip: cdn-dp: don't configure hardware in mode_set With atomic modesetting the hardware will be powered off when the mode_set function is called. We should configure the hardware in the enable function. Signed-off-by: Chris Zhong --- drivers/gpu/drm/rockchip/cdn-dp-core.c | 49 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index a630b0d27b9f..9ab67a670885 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -568,9 +568,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, { struct cdn_dp_device *dp = encoder_to_dp(encoder); struct drm_display_info *display_info = &dp->connector.display_info; - struct rockchip_crtc_state *state; struct video_info *video = &dp->video_info; - int ret, val; switch (display_info->bpc) { case 10: @@ -585,31 +583,9 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, } video->color_fmt = PXL_RGB; - video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); - if (ret < 0) { - DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); - return; - } - - DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", - (ret) ? "LIT" : "BIG"); - state = to_rockchip_crtc_state(encoder->crtc->state); - if (ret) { - val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); - state->output_mode = ROCKCHIP_OUT_MODE_P888; - } else { - val = DP_SEL_VOP_LIT << 16; - state->output_mode = ROCKCHIP_OUT_MODE_AAAA; - } - - ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); - if (ret) - return; - memcpy(&dp->mode, adjusted, sizeof(*mode)); } @@ -635,9 +611,32 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) static void cdn_dp_encoder_enable(struct drm_encoder *encoder) { struct cdn_dp_device *dp = encoder_to_dp(encoder); - int ret; + int ret, val; + struct rockchip_crtc_state *state; + + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + if (ret < 0) { + DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); + return; + } + + DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", + (ret) ? "LIT" : "BIG"); + state = to_rockchip_crtc_state(encoder->crtc->state); + if (ret) { + val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); + state->output_mode = ROCKCHIP_OUT_MODE_P888; + } else { + val = DP_SEL_VOP_LIT << 16; + state->output_mode = ROCKCHIP_OUT_MODE_AAAA; + } + + ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); + if (ret) + return; mutex_lock(&dp->lock); + ret = cdn_dp_enable(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", -- cgit v1.2.3 From 213c4b96639818a2dc8750d1b7a37672a9c9eaeb Mon Sep 17 00:00:00 2001 From: Mark Yao Date: Sun, 5 Feb 2017 17:10:57 +0800 Subject: drm/rockchip: cdn-dp: fix cdn-dp complie warning fix warning: drivers/gpu/drm/rockchip/cdn-dp-reg.c:632:24: warning: 'val[1]' may be used uninitialized in this function [-Wmaybe-uninitialized] msa_misc = 2 * val[0] + 32 * val[1] + Signed-off-by: Mark Yao --- drivers/gpu/drm/rockchip/cdn-dp-reg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c index 3a5b8a4aa1e7..319dbbaa3609 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -592,7 +592,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video, struct drm_display_mode *mode) { u32 msa_misc; - u8 val[2]; + u8 val[2] = {0}; switch (video->color_fmt) { case PXL_RGB: -- cgit v1.2.3 From 38f993b7c59e261b8ff7deb66c96c7dff4017f7b Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Fri, 24 Jun 2016 10:13:31 +0800 Subject: drm/rockchip: Do not use DMA mapping API if attached to IOMMU domain The API is not suitable for subsystems consisting of multiple devices and requires severe hacks to use it. To mitigate this, this patch implements allocation and address space management locally by using helpers provided by DRM framework, like other DRM drivers do, e.g. Tegra. This patch should not introduce any functional changes until the driver is made to attach subdevices into an IOMMU domain with the generic IOMMU API, which will happen in following patch. Based heavily on GEM implementation of Tegra DRM driver. Signed-off-by: Tomasz Figa Signed-off-by: Shunqian Zheng Signed-off-by: Mark Yao Signed-off-by: rjan Eide --- drivers/gpu/drm/rockchip/rockchip_drm_drv.h | 6 +- drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 244 ++++++++++++++++++++++++++-- drivers/gpu/drm/rockchip/rockchip_drm_gem.h | 8 + 3 files changed, 244 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index fb6226cf84b7..adc39302bec5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -30,6 +30,7 @@ struct drm_device; struct drm_connector; +struct iommu_domain; /* * Rockchip drm private crtc funcs. @@ -60,7 +61,10 @@ struct rockchip_drm_private { struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; struct drm_atomic_state *state; - + struct iommu_domain *domain; + /* protect drm_mm on multi-threads */ + struct mutex mm_lock; + struct drm_mm mm; struct list_head psr_list; spinlock_t psr_list_lock; }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index b70f9423379c..df9e57064f19 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -16,11 +16,146 @@ #include #include #include +#include #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" -static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, +static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + struct rockchip_drm_private *private = drm->dev_private; + int prot = IOMMU_READ | IOMMU_WRITE; + ssize_t ret; + + mutex_lock(&private->mm_lock); + + ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, + rk_obj->base.size, PAGE_SIZE, + 0, 0); + + mutex_unlock(&private->mm_lock); + if (ret < 0) { + DRM_ERROR("out of I/O virtual memory: %zd\n", ret); + return ret; + } + + rk_obj->dma_addr = rk_obj->mm.start; + + ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, + rk_obj->sgt->nents, prot); + if (ret < rk_obj->base.size) { + DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", + ret, rk_obj->base.size); + ret = -ENOMEM; + goto err_remove_node; + } + + rk_obj->size = ret; + + return 0; + +err_remove_node: + drm_mm_remove_node(&rk_obj->mm); + + return ret; +} + +static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + struct rockchip_drm_private *private = drm->dev_private; + + iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); + + mutex_lock(&private->mm_lock); + + drm_mm_remove_node(&rk_obj->mm); + + mutex_unlock(&private->mm_lock); + + return 0; +} + +static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) +{ + struct drm_device *drm = rk_obj->base.dev; + int ret, i; + struct scatterlist *s; + + rk_obj->pages = drm_gem_get_pages(&rk_obj->base); + if (IS_ERR(rk_obj->pages)) + return PTR_ERR(rk_obj->pages); + + rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; + + rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + if (IS_ERR(rk_obj->sgt)) { + ret = PTR_ERR(rk_obj->sgt); + goto err_put_pages; + } + + /* + * Fake up the SG table so that dma_sync_sg_for_device() can be used + * to flush the pages associated with it. + * + * TODO: Replace this by drm_clflush_sg() once it can be implemented + * without relying on symbols that are not exported. + */ + for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) + sg_dma_address(s) = sg_phys(s); + + dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, + DMA_TO_DEVICE); + + return 0; + +err_put_pages: + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); + return ret; +} + +static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) +{ + sg_free_table(rk_obj->sgt); + kfree(rk_obj->sgt); + drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); +} + +static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, + bool alloc_kmap) +{ + int ret; + + ret = rockchip_gem_get_pages(rk_obj); + if (ret < 0) + return ret; + + ret = rockchip_gem_iommu_map(rk_obj); + if (ret < 0) + goto err_free; + + if (alloc_kmap) { + rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (!rk_obj->kvaddr) { + DRM_ERROR("failed to vmap() buffer\n"); + ret = -ENOMEM; + goto err_unmap; + } + } + + return 0; + +err_unmap: + rockchip_gem_iommu_unmap(rk_obj); +err_free: + rockchip_gem_put_pages(rk_obj); + + return ret; +} + +static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, bool alloc_kmap) { struct drm_gem_object *obj = &rk_obj->base; @@ -42,7 +177,27 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, return 0; } -static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) +static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, + bool alloc_kmap) +{ + struct drm_gem_object *obj = &rk_obj->base; + struct drm_device *drm = obj->dev; + struct rockchip_drm_private *private = drm->dev_private; + + if (private->domain) + return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); + else + return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); +} + +static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) +{ + vunmap(rk_obj->kvaddr); + rockchip_gem_iommu_unmap(rk_obj); + rockchip_gem_put_pages(rk_obj); +} + +static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) { struct drm_gem_object *obj = &rk_obj->base; struct drm_device *drm = obj->dev; @@ -51,23 +206,68 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) rk_obj->dma_attrs); } -static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) +static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) +{ + if (rk_obj->pages) + rockchip_gem_free_iommu(rk_obj); + else + rockchip_gem_free_dma(rk_obj); +} +static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, + struct vm_area_struct *vma) { + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + unsigned int i, count = obj->size >> PAGE_SHIFT; + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long uaddr = vma->vm_start; + unsigned long offset = vma->vm_pgoff; + unsigned long end = user_count + offset; int ret; + + if (user_count == 0) + return -ENXIO; + if (end > count) + return -ENXIO; + + for (i = offset; i < end; i++) { + ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); + if (ret) + return ret; + uaddr += PAGE_SIZE; + } + + return 0; +} + +static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); struct drm_device *drm = obj->dev; + return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, + obj->size, rk_obj->dma_attrs); +} + +static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + /* - * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear + * We allocated a struct page table for rk_obj, so clear * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). */ vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; - ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, - obj->size, rk_obj->dma_attrs); + if (rk_obj->pages) + ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); + else + ret = rockchip_drm_gem_object_mmap_dma(obj, vma); + if (ret) drm_gem_vm_close(vma); @@ -101,6 +301,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) return rockchip_drm_gem_object_mmap(obj, vma); } +static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) +{ + drm_gem_object_release(&rk_obj->base); + kfree(rk_obj); +} + struct rockchip_gem_object * rockchip_gem_create_object(struct drm_device *drm, unsigned int size, bool alloc_kmap) @@ -117,7 +323,7 @@ struct rockchip_gem_object * obj = &rk_obj->base; - drm_gem_private_object_init(drm, obj, size); + drm_gem_object_init(drm, obj, size); ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); if (ret) @@ -126,7 +332,7 @@ struct rockchip_gem_object * return rk_obj; err_free_rk_obj: - kfree(rk_obj); + rockchip_gem_release_object(rk_obj); return ERR_PTR(ret); } @@ -138,13 +344,11 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj; - drm_gem_free_mmap_offset(obj); - rk_obj = to_rockchip_obj(obj); rockchip_gem_free_buf(rk_obj); - kfree(rk_obj); + rockchip_gem_release_object(rk_obj); } /* @@ -253,6 +457,9 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *sgt; int ret; + if (rk_obj->pages) + return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); if (!sgt) return ERR_PTR(-ENOMEM); @@ -273,6 +480,10 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + if (rk_obj->pages) + return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, + pgprot_writecombine(PAGE_KERNEL)); + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return NULL; @@ -281,5 +492,12 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { - /* Nothing to do */ + struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); + + if (rk_obj->pages) { + vunmap(vaddr); + return; + } + + /* Nothing to do if allocated by DMA mapping API. */ } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 18b3488db4ec..3f6ea4d18a5c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -23,7 +23,15 @@ struct rockchip_gem_object { void *kvaddr; dma_addr_t dma_addr; + /* Used when IOMMU is disabled */ unsigned long dma_attrs; + + /* Used when IOMMU is enabled */ + struct drm_mm_node mm; + unsigned long num_pages; + struct page **pages; + struct sg_table *sgt; + size_t size; }; struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); -- cgit v1.2.3 From 1aa5ca6e3ec63aa5815d78646748e88a7ceb1c8e Mon Sep 17 00:00:00 2001 From: Shunqian Zheng Date: Fri, 24 Jun 2016 10:13:32 +0800 Subject: drm/rockchip: Use common IOMMU API to attach devices Rockchip DRM used the arm special API, arm_iommu_*(), to attach iommu for ARM32 SoCs. This patch convert to common iommu API so it would support ARM64 like RK3399. Since previous patch added support for direct IOMMU address space management, there is no need to use DMA API anymore and this patch wires things to use the new method. Signed-off-by: Shunqian Zheng Signed-off-by: Tomasz Figa Signed-off-by: Mark Yao --- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 101 +++++++++++++++------------- 1 file changed, 54 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/rockchip') diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c30d649cb147..b360e6251836 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -14,19 +14,19 @@ * GNU General Public License for more details. */ -#include - #include #include #include #include #include #include +#include #include #include #include #include #include +#include #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" @@ -50,28 +50,31 @@ static struct drm_driver rockchip_drm_driver; int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, struct device *dev) { - struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; + struct rockchip_drm_private *private = drm_dev->dev_private; int ret; if (!is_support_iommu) return 0; - ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); - if (ret) + ret = iommu_attach_device(private->domain, dev); + if (ret) { + dev_err(dev, "Failed to attach iommu device\n"); return ret; + } - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - - return arm_iommu_attach_device(dev, mapping); + return 0; } void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev) { + struct rockchip_drm_private *private = drm_dev->dev_private; + struct iommu_domain *domain = private->domain; + if (!is_support_iommu) return; - arm_iommu_detach_device(dev); + iommu_detach_device(domain, dev); } int rockchip_register_crtc_funcs(struct drm_crtc *crtc, @@ -123,11 +126,46 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, priv->crtc_funcs[pipe]->disable_vblank(crtc); } +static int rockchip_drm_init_iommu(struct drm_device *drm_dev) +{ + struct rockchip_drm_private *private = drm_dev->dev_private; + struct iommu_domain_geometry *geometry; + u64 start, end; + + if (!is_support_iommu) + return 0; + + private->domain = iommu_domain_alloc(&platform_bus_type); + if (!private->domain) + return -ENOMEM; + + geometry = &private->domain->geometry; + start = geometry->aperture_start; + end = geometry->aperture_end; + + DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n", + start, end); + drm_mm_init(&private->mm, start, end - start + 1); + mutex_init(&private->mm_lock); + + return 0; +} + +static void rockchip_iommu_cleanup(struct drm_device *drm_dev) +{ + struct rockchip_drm_private *private = drm_dev->dev_private; + + if (!is_support_iommu) + return; + + drm_mm_takedown(&private->mm); + iommu_domain_free(private->domain); +} + static int rockchip_drm_bind(struct device *dev) { struct drm_device *drm_dev; struct rockchip_drm_private *private; - struct dma_iommu_mapping *mapping = NULL; int ret; drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); @@ -151,38 +189,14 @@ static int rockchip_drm_bind(struct device *dev) rockchip_drm_mode_config_init(drm_dev); - dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), - GFP_KERNEL); - if (!dev->dma_parms) { - ret = -ENOMEM; + ret = rockchip_drm_init_iommu(drm_dev); + if (ret) goto err_config_cleanup; - } - - if (is_support_iommu) { - /* TODO(djkurtz): fetch the mapping start/size from somewhere */ - mapping = arm_iommu_create_mapping(&platform_bus_type, - 0x00000000, - SZ_2G); - if (IS_ERR(mapping)) { - ret = PTR_ERR(mapping); - goto err_config_cleanup; - } - - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) - goto err_release_mapping; - - dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); - - ret = arm_iommu_attach_device(dev, mapping); - if (ret) - goto err_release_mapping; - } /* Try to bind all sub drivers. */ ret = component_bind_all(dev, drm_dev); if (ret) - goto err_detach_device; + goto err_iommu_cleanup; /* init kms poll for handling hpd */ drm_kms_helper_poll_init(drm_dev); @@ -207,8 +221,6 @@ static int rockchip_drm_bind(struct device *dev) if (ret) goto err_fbdev_fini; - if (is_support_iommu) - arm_iommu_release_mapping(mapping); return 0; err_fbdev_fini: rockchip_drm_fbdev_fini(drm_dev); @@ -217,12 +229,8 @@ err_vblank_cleanup: err_kms_helper_poll_fini: drm_kms_helper_poll_fini(drm_dev); component_unbind_all(dev, drm_dev); -err_detach_device: - if (is_support_iommu) - arm_iommu_detach_device(dev); -err_release_mapping: - if (is_support_iommu) - arm_iommu_release_mapping(mapping); +err_iommu_cleanup: + rockchip_iommu_cleanup(drm_dev); err_config_cleanup: drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; @@ -239,8 +247,7 @@ static void rockchip_drm_unbind(struct device *dev) drm_vblank_cleanup(drm_dev); drm_kms_helper_poll_fini(drm_dev); component_unbind_all(dev, drm_dev); - if (is_support_iommu) - arm_iommu_detach_device(dev); + rockchip_iommu_cleanup(drm_dev); drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; drm_dev_unregister(drm_dev); -- cgit v1.2.3