summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorHai Li <hali@codeaurora.org>2015-05-15 20:04:04 +0300
committerRob Clark <robdclark@gmail.com>2015-06-11 20:11:04 +0300
commit825637b9c06cede2a742421b0ea6f24428099af3 (patch)
treeb0195dbd6f733b19d749e31d1a0b8aba1707a1fa /drivers/gpu/drm/msm
parentbdc80de2a6dbaa0d08c393b4ba8912a0ad4b047f (diff)
downloadlinux-825637b9c06cede2a742421b0ea6f24428099af3.tar.xz
drm/msm/dsi: Add DSI PLL clock driver support
DSI byte clock and pixel clocks are sourced from DSI PLL. This change adds the DSI PLL source clock driver under common clock framework. This change handles DSI 28nm PLL only. Signed-off-by: Hai Li <hali@codeaurora.org> Signed-off-by: Archit Taneja <architt@codeaurora.org> Signed-off-by: Stephane Viau <sviau@codeaurora.org> Signed-off-by: Wentao Xu <wentaox@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c164
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c652
8 files changed, 926 insertions, 7 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 0a6f6764a37c..08ba8d0d93f5 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -46,3 +46,10 @@ config DRM_MSM_DSI
Choose this option if you have a need for MIPI DSI connector
support.
+config DRM_MSM_DSI_PLL
+ bool "Enable DSI PLL driver in MSM DRM"
+ depends on DRM_MSM_DSI && COMMON_CLK
+ default y
+ help
+ Choose this option to enable DSI PLL driver which provides DSI
+ source clocks under common clock framework.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ab2086783fee..16a81b94d6f0 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
@@ -50,10 +51,14 @@ msm-y := \
msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/dsi_phy.o \
mdp/mdp5/mdp5_cmd_encoder.o
+msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
+ dsi/pll/dsi_pll_28nm.o
+
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 10f54d4e379a..321964a6b27e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -103,7 +103,8 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
struct msm_dsi_phy;
enum msm_dsi_phy_type {
MSM_DSI_PHY_UNKNOWN,
- MSM_DSI_PHY_28NM,
+ MSM_DSI_PHY_28NM_HPM,
+ MSM_DSI_PHY_28NM_LP,
MSM_DSI_PHY_MAX
};
struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 517e0709b77d..8a246cfa5d19 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -64,7 +64,7 @@ static const struct dsi_config dsi_cfgs[] = {
.major = MSM_DSI_VER_MAJOR_6G,
.minor = MSM_DSI_6G_VER_MINOR_V1_0,
.io_offset = DSI_6G_REG_SHIFT,
- .phy_type = MSM_DSI_PHY_28NM,
+ .phy_type = MSM_DSI_PHY_28NM_HPM,
.reg_cfg = {
.num = 4,
.regs = {
@@ -79,7 +79,7 @@ static const struct dsi_config dsi_cfgs[] = {
.major = MSM_DSI_VER_MAJOR_6G,
.minor = MSM_DSI_6G_VER_MINOR_V1_1,
.io_offset = DSI_6G_REG_SHIFT,
- .phy_type = MSM_DSI_PHY_28NM,
+ .phy_type = MSM_DSI_PHY_28NM_HPM,
.reg_cfg = {
.num = 4,
.regs = {
@@ -94,7 +94,7 @@ static const struct dsi_config dsi_cfgs[] = {
.major = MSM_DSI_VER_MAJOR_6G,
.minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
.io_offset = DSI_6G_REG_SHIFT,
- .phy_type = MSM_DSI_PHY_28NM,
+ .phy_type = MSM_DSI_PHY_28NM_HPM,
.reg_cfg = {
.num = 4,
.regs = {
@@ -109,7 +109,7 @@ static const struct dsi_config dsi_cfgs[] = {
.major = MSM_DSI_VER_MAJOR_6G,
.minor = MSM_DSI_6G_VER_MINOR_V1_2,
.io_offset = DSI_6G_REG_SHIFT,
- .phy_type = MSM_DSI_PHY_28NM,
+ .phy_type = MSM_DSI_PHY_28NM_HPM,
.reg_cfg = {
.num = 4,
.regs = {
@@ -124,7 +124,7 @@ static const struct dsi_config dsi_cfgs[] = {
.major = MSM_DSI_VER_MAJOR_6G,
.minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
.io_offset = DSI_6G_REG_SHIFT,
- .phy_type = MSM_DSI_PHY_28NM,
+ .phy_type = MSM_DSI_PHY_28NM_LP,
.reg_cfg = {
.num = 4,
.regs = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
index 61af19312711..4403f38bf220 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -311,7 +311,8 @@ struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
}
switch (type) {
- case MSM_DSI_PHY_28NM:
+ case MSM_DSI_PHY_28NM_HPM:
+ case MSM_DSI_PHY_28NM_LP:
dsi_phy_func_init(28nm);
break;
default:
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
new file mode 100644
index 000000000000..509376fdd112
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi_pll.h"
+
+static int dsi_pll_enable(struct msm_dsi_pll *pll)
+{
+ int i, ret = 0;
+
+ /*
+ * Certain PLLs do not allow VCO rate update when it is on.
+ * Keep track of their status to turn on/off after set rate success.
+ */
+ if (unlikely(pll->pll_on))
+ return 0;
+
+ /* Try all enable sequences until one succeeds */
+ for (i = 0; i < pll->en_seq_cnt; i++) {
+ ret = pll->enable_seqs[i](pll);
+ DBG("DSI PLL %s after sequence #%d",
+ ret ? "unlocked" : "locked", i + 1);
+ if (!ret)
+ break;
+ }
+
+ if (ret) {
+ DRM_ERROR("DSI PLL failed to lock\n");
+ return ret;
+ }
+
+ pll->pll_on = true;
+
+ return 0;
+}
+
+static void dsi_pll_disable(struct msm_dsi_pll *pll)
+{
+ if (unlikely(!pll->pll_on))
+ return;
+
+ pll->disable_seq(pll);
+
+ pll->pll_on = false;
+}
+
+/*
+ * DSI PLL Helper functions
+ */
+long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+ if (rate < pll->min_rate)
+ return pll->min_rate;
+ else if (rate > pll->max_rate)
+ return pll->max_rate;
+ else
+ return rate;
+}
+
+int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ int ret;
+
+ /*
+ * Certain PLLs need to update the same VCO rate and registers
+ * after resume in suspend/resume scenario.
+ */
+ if (pll->restore_state) {
+ ret = pll->restore_state(pll);
+ if (ret)
+ goto error;
+ }
+
+ ret = dsi_pll_enable(pll);
+
+error:
+ return ret;
+}
+
+void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+ if (pll->save_state)
+ pll->save_state(pll);
+
+ dsi_pll_disable(pll);
+}
+
+void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
+ struct clk **clks, u32 num_clks)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ if (!num_clks || !clks)
+ return;
+
+ do {
+ clk_unregister(clks[--num_clks]);
+ clks[num_clks] = NULL;
+ } while (num_clks);
+}
+
+/*
+ * DSI PLL API
+ */
+int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
+{
+ if (pll->get_provider)
+ return pll->get_provider(pll,
+ byte_clk_provider,
+ pixel_clk_provider);
+
+ return -EINVAL;
+}
+
+void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
+{
+ if (pll->destroy)
+ pll->destroy(pll);
+}
+
+struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
+{
+ struct device *dev = &pdev->dev;
+ struct msm_dsi_pll *pll;
+
+ switch (type) {
+ case MSM_DSI_PHY_28NM_HPM:
+ case MSM_DSI_PHY_28NM_LP:
+ pll = msm_dsi_pll_28nm_init(pdev, type, id);
+ break;
+ default:
+ pll = ERR_PTR(-ENXIO);
+ break;
+ }
+
+ if (IS_ERR(pll)) {
+ dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+ return NULL;
+ }
+
+ pll->type = type;
+
+ DBG("DSI:%d PLL registered", id);
+
+ return pll;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
new file mode 100644
index 000000000000..5a3bb241c039
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DSI_PLL_H__
+#define __DSI_PLL_H__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi.h"
+
+#define NUM_DSI_CLOCKS_MAX 6
+#define MAX_DSI_PLL_EN_SEQS 10
+
+struct msm_dsi_pll {
+ enum msm_dsi_phy_type type;
+
+ struct clk_hw clk_hw;
+ bool pll_on;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+ u32 en_seq_cnt;
+
+ int (*enable_seqs[MAX_DSI_PLL_EN_SEQS])(struct msm_dsi_pll *pll);
+ void (*disable_seq)(struct msm_dsi_pll *pll);
+ int (*get_provider)(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider);
+ void (*destroy)(struct msm_dsi_pll *pll);
+ void (*save_state)(struct msm_dsi_pll *pll);
+ int (*restore_state)(struct msm_dsi_pll *pll);
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
+
+static inline void pll_write(void __iomem *reg, u32 data)
+{
+ msm_writel(data, reg);
+}
+
+static inline u32 pll_read(const void __iomem *reg)
+{
+ return msm_readl(reg);
+}
+
+static inline void pll_write_udelay(void __iomem *reg, u32 data, u32 delay_us)
+{
+ pll_write(reg, data);
+ udelay(delay_us);
+}
+
+static inline void pll_write_ndelay(void __iomem *reg, u32 data, u32 delay_ns)
+{
+ pll_write((reg), data);
+ ndelay(delay_ns);
+}
+
+/*
+ * DSI PLL Helper functions
+ */
+
+/* clock callbacks */
+long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *parent_rate);
+int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw);
+void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw);
+/* misc */
+void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
+ struct clk **clks, u32 num_clks);
+
+/*
+ * Initialization for Each PLL Type
+ */
+struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id);
+
+#endif /* __DSI_PLL_H__ */
+
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
new file mode 100644
index 000000000000..eb8ac3097ff5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ * dsi0analog_postdiv_clk
+ * | dsi0indirect_path_div2_clk
+ * | |
+ * +------+ | +----+ | |\ dsi0byte_mux
+ * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
+ * | +------+ +----+ | m| | +----+
+ * | | u|--o--| /4 |-- dsi0pllbyte
+ * | | x| +----+
+ * o--------------------------| /
+ * | |/
+ * | +------+
+ * o----------| DIV3 |------------------------- dsi0pll
+ * +------+
+ */
+
+#define POLL_MAX_READS 10
+#define POLL_TIMEOUT_US 50
+
+#define NUM_PROVIDED_CLKS 2
+
+#define VCO_REF_CLK_RATE 19200000
+#define VCO_MIN_RATE 350000000
+#define VCO_MAX_RATE 750000000
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#define LPFR_LUT_SIZE 10
+struct lpfr_cfg {
+ unsigned long vco_rate;
+ u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+ { 479500000, 8 },
+ { 480000000, 11 },
+ { 575500000, 8 },
+ { 576000000, 12 },
+ { 610500000, 8 },
+ { 659500000, 9 },
+ { 671500000, 10 },
+ { 672000000, 14 },
+ { 708500000, 10 },
+ { 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+ unsigned long vco_rate;
+ u8 postdiv3;
+ u8 postdiv1;
+ u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+ void __iomem *mmio;
+
+ int vco_delay;
+
+ /* private clocks: */
+ struct clk *clks[NUM_DSI_CLOCKS_MAX];
+ u32 num_clks;
+
+ /* clock-provider: */
+ struct clk *provided_clks[NUM_PROVIDED_CLKS];
+ struct clk_onecell_data clk_data;
+
+ struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+ u32 nb_tries, u32 timeout_us)
+{
+ bool pll_locked = false;
+ u32 val;
+
+ while (nb_tries--) {
+ val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
+ pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout_us);
+ }
+ DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+ return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+ void __iomem *base = pll_28nm->mmio;
+
+ /*
+ * Add HW recommended delays after toggling the software
+ * reset bit off and back on.
+ */
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+ DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct device *dev = &pll_28nm->pdev->dev;
+ void __iomem *base = pll_28nm->mmio;
+ unsigned long div_fbx1000, gen_vco_clk;
+ u32 refclk_cfg, frac_n_mode, frac_n_value;
+ u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+ u32 cal_cfg10, cal_cfg11;
+ u32 rem;
+ int i;
+
+ VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+ /* Force postdiv2 to be div-4 */
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+ /* Configure the Loop filter resistance */
+ for (i = 0; i < LPFR_LUT_SIZE; i++)
+ if (rate <= lpfr_lut[i].vco_rate)
+ break;
+ if (i == LPFR_LUT_SIZE) {
+ dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+ rate);
+ return -EINVAL;
+ }
+ pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+ /* Loop filter capacitance values : c1 and c2 */
+ pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+ rem = rate % VCO_REF_CLK_RATE;
+ if (rem) {
+ refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ frac_n_mode = 1;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+ } else {
+ refclk_cfg = 0x0;
+ frac_n_mode = 0;
+ div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+ gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+ }
+
+ DBG("refclk_cfg = %d", refclk_cfg);
+
+ rem = div_fbx1000 % 1000;
+ frac_n_value = (rem << 16) / 1000;
+
+ DBG("div_fb = %lu", div_fbx1000);
+ DBG("frac_n_value = %d", frac_n_value);
+
+ DBG("Generated VCO Clock: %lu", gen_vco_clk);
+ rem = 0;
+ sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+ sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+ if (frac_n_mode) {
+ sdm_cfg0 = 0x0;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg3 = frac_n_value >> 8;
+ sdm_cfg2 = frac_n_value & 0xff;
+ } else {
+ sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+ sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+ (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+ sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+ sdm_cfg2 = 0;
+ sdm_cfg3 = 0;
+ }
+
+ DBG("sdm_cfg0=%d", sdm_cfg0);
+ DBG("sdm_cfg1=%d", sdm_cfg1);
+ DBG("sdm_cfg2=%d", sdm_cfg2);
+ DBG("sdm_cfg3=%d", sdm_cfg3);
+
+ cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+ cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+ DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+ pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+ pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+ /* Add hardware recommended delay for correct PLL configuration */
+ if (pll_28nm->vco_delay)
+ udelay(pll_28nm->vco_delay);
+
+ pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
+
+ return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+ POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ void __iomem *base = pll_28nm->mmio;
+ u32 sdm0, doubler, sdm_byp_div;
+ u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+ u32 ref_clk = VCO_REF_CLK_RATE;
+ unsigned long vco_rate;
+
+ VERB("parent_rate=%lu", parent_rate);
+
+ /* Check to see if the ref clk doubler is enabled */
+ doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+ DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+ ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+ /* see if it is integer mode or sdm mode */
+ sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+ if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+ /* integer mode */
+ sdm_byp_div = FIELD(
+ pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+ DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+ vco_rate = ref_clk * sdm_byp_div;
+ } else {
+ /* sdm mode */
+ sdm_dc_off = FIELD(
+ pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+ DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+ DBG("sdm_dc_off = %d", sdm_dc_off);
+ sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+ DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+ sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+ DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+ sdm_freq_seed = (sdm3 << 8) | sdm2;
+ DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+ vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+ mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+ DBG("vco rate = %lu", vco_rate);
+ }
+
+ DBG("returning vco rate = %lu", vco_rate);
+
+ return vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_28nm_clk_set_rate,
+ .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+ .prepare = msm_dsi_pll_helper_clk_prepare,
+ .unprepare = msm_dsi_pll_helper_clk_unprepare,
+ .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct device *dev = &pll_28nm->pdev->dev;
+ void __iomem *base = pll_28nm->mmio;
+ u32 max_reads = 5, timeout_us = 100;
+ bool locked;
+ u32 val;
+ int i;
+
+ DBG("id=%d", pll_28nm->id);
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+ for (i = 0; i < 2; i++) {
+ /* DSI Uniphy lock detect setting */
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+ 0x0c, 100);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+ /* poll for PLL ready status */
+ locked = pll_28nm_poll_for_ready(pll_28nm,
+ max_reads, timeout_us);
+ if (locked)
+ break;
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+ val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+ }
+
+ if (unlikely(!locked))
+ dev_err(dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL Lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct device *dev = &pll_28nm->pdev->dev;
+ void __iomem *base = pll_28nm->mmio;
+ bool locked;
+ u32 max_reads = 10, timeout_us = 50;
+ u32 val;
+
+ DBG("id=%d", pll_28nm->id);
+
+ pll_28nm_software_reset(pll_28nm);
+
+ /*
+ * PLL power up sequence.
+ * Add necessary delays recommended by hardware.
+ */
+ pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+ val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+ pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+ pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+ DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+ pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+ /* DSI PLL toggle lock detect setting */
+ pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+ pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+ locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+ if (unlikely(!locked))
+ dev_err(dev, "DSI PLL lock failed\n");
+ else
+ DBG("DSI PLL lock success");
+
+ return locked ? 0 : -EINVAL;
+}
+
+static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ DBG("id=%d", pll_28nm->id);
+ pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+}
+
+static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->mmio;
+
+ cached_state->postdiv3 =
+ pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+ cached_state->postdiv1 =
+ pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+ cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+ cached_state->vco_rate = __clk_get_rate(pll->clk_hw.clk);
+}
+
+static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+ void __iomem *base = pll_28nm->mmio;
+ int ret;
+
+ if ((cached_state->vco_rate != 0) &&
+ (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) {
+ ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
+ cached_state->vco_rate, 0);
+ if (ret) {
+ dev_err(&pll_28nm->pdev->dev,
+ "restore vco rate failed. ret=%d\n", ret);
+ return ret;
+ }
+
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ cached_state->postdiv3);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ cached_state->postdiv1);
+ pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+ cached_state->byte_mux);
+
+ cached_state->vco_rate = 0;
+ }
+
+ return 0;
+}
+
+static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
+ if (pixel_clk_provider)
+ *pixel_clk_provider =
+ pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
+
+ return 0;
+}
+
+static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
+ int i;
+
+ msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
+ pll_28nm->clks, pll_28nm->num_clks);
+
+ for (i = 0; i < NUM_PROVIDED_CLKS; i++)
+ pll_28nm->provided_clks[i] = NULL;
+
+ pll_28nm->num_clks = 0;
+ pll_28nm->clk_data.clks = NULL;
+ pll_28nm->clk_data.clk_num = 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
+{
+ char clk_name[32], parent1[32], parent2[32], vco_name[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .ops = &clk_ops_dsi_pll_28nm_vco,
+ };
+ struct device *dev = &pll_28nm->pdev->dev;
+ struct clk **clks = pll_28nm->clks;
+ struct clk **provided_clks = pll_28nm->provided_clks;
+ int num = 0;
+ int ret;
+
+ DBG("%d", pll_28nm->id);
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
+ pll_28nm->base.clk_hw.init = &vco_init;
+ clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
+
+ snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+ snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+ clks[num++] = clk_register_divider(dev, clk_name,
+ parent1, CLK_SET_RATE_PARENT,
+ pll_28nm->mmio +
+ REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+ 0, 4, 0, NULL);
+
+ snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+ snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
+ clks[num++] = clk_register_fixed_factor(dev, clk_name,
+ parent1, CLK_SET_RATE_PARENT,
+ 1, 2);
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
+ snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+ clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
+ clk_register_divider(dev, clk_name,
+ parent1, 0, pll_28nm->mmio +
+ REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+ 0, 8, 0, NULL);
+
+ snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
+ snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
+ snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
+ clks[num++] = clk_register_mux(dev, clk_name,
+ (const char *[]){
+ parent1, parent2
+ }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
+ REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
+ snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
+ clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
+ clk_register_fixed_factor(dev, clk_name,
+ parent1, CLK_SET_RATE_PARENT, 1, 4);
+
+ pll_28nm->num_clks = num;
+
+ pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
+ pll_28nm->clk_data.clks = provided_clks;
+
+ ret = of_clk_add_provider(dev->of_node,
+ of_clk_src_onecell_get, &pll_28nm->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
+ enum msm_dsi_phy_type type, int id)
+{
+ struct dsi_pll_28nm *pll_28nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+ if (!pll_28nm)
+ return ERR_PTR(-ENOMEM);
+
+ pll_28nm->pdev = pdev;
+ pll_28nm->id = id;
+
+ pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
+ dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = &pll_28nm->base;
+ pll->min_rate = VCO_MIN_RATE;
+ pll->max_rate = VCO_MAX_RATE;
+ pll->get_provider = dsi_pll_28nm_get_provider;
+ pll->destroy = dsi_pll_28nm_destroy;
+ pll->disable_seq = dsi_pll_28nm_disable_seq;
+ pll->save_state = dsi_pll_28nm_save_state;
+ pll->restore_state = dsi_pll_28nm_restore_state;
+
+ if (type == MSM_DSI_PHY_28NM_HPM) {
+ pll_28nm->vco_delay = 1;
+
+ pll->en_seq_cnt = 3;
+ pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
+ pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
+ pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
+ } else if (type == MSM_DSI_PHY_28NM_LP) {
+ pll_28nm->vco_delay = 1000;
+
+ pll->en_seq_cnt = 1;
+ pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
+ } else {
+ dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = pll_28nm_register(pll_28nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return pll;
+}
+