diff options
Diffstat (limited to 'drivers/clk/qcom/clk-cpu-8996.c')
-rw-r--r-- | drivers/clk/qcom/clk-cpu-8996.c | 146 |
1 files changed, 106 insertions, 40 deletions
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c index ee76ef958d31..592c7c3cdeb7 100644 --- a/drivers/clk/qcom/clk-cpu-8996.c +++ b/drivers/clk/qcom/clk-cpu-8996.c @@ -12,6 +12,8 @@ * +-------+ * XO | | * +------------------>0 | + * SYS_APCS_AUX | | + * +------------------>3 | * | | * PLL/2 | SMUX +----+ * +------->1 | | @@ -58,6 +60,8 @@ #include <linux/regmap.h> #include <soc/qcom/kryo-l2-accessors.h> +#include <asm/cputype.h> + #include "clk-alpha-pll.h" #include "clk-regmap.h" #include "clk-regmap-mux.h" @@ -74,10 +78,16 @@ enum _pmux_input { #define PWRCL_REG_OFFSET 0x0 #define PERFCL_REG_OFFSET 0x80000 #define MUX_OFFSET 0x40 +#define CLK_CTL_OFFSET 0x44 +#define CLK_CTL_AUTO_CLK_SEL BIT(8) #define ALT_PLL_OFFSET 0x100 #define SSSCTL_OFFSET 0x160 +#define PSCTL_OFFSET 0x164 #define PMUX_MASK 0x3 +#define MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK GENMASK(5, 4) +#define MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL \ + FIELD_PREP(MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, 0x03) static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = { [PLL_OFF_L_VAL] = 0x04, @@ -93,21 +103,20 @@ static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = { static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = { [PLL_OFF_L_VAL] = 0x04, [PLL_OFF_ALPHA_VAL] = 0x08, - [PLL_OFF_ALPHA_VAL_U] = 0x0c, [PLL_OFF_USER_CTL] = 0x10, - [PLL_OFF_USER_CTL_U] = 0x14, [PLL_OFF_CONFIG_CTL] = 0x18, [PLL_OFF_TEST_CTL] = 0x20, - [PLL_OFF_TEST_CTL_U] = 0x24, [PLL_OFF_STATUS] = 0x28, }; /* PLLs */ static const struct alpha_pll_config hfpll_config = { - .l = 60, - .config_ctl_val = 0x200d4aa8, + .l = 54, + .config_ctl_val = 0x200d4828, .config_ctl_hi_val = 0x006, + .test_ctl_val = 0x1c000000, + .test_ctl_hi_val = 0x00004000, .pre_div_mask = BIT(12), .post_div_mask = 0x3 << 8, .post_div_val = 0x1 << 8, @@ -127,7 +136,7 @@ static struct clk_alpha_pll pwrcl_pll = { .name = "pwrcl_pll", .parent_data = pll_parent, .num_parents = ARRAY_SIZE(pll_parent), - .ops = &clk_alpha_pll_huayra_ops, + .ops = &clk_alpha_pll_hwfsm_ops, }, }; @@ -139,7 +148,7 @@ static struct clk_alpha_pll perfcl_pll = { .name = "perfcl_pll", .parent_data = pll_parent, .num_parents = ARRAY_SIZE(pll_parent), - .ops = &clk_alpha_pll_huayra_ops, + .ops = &clk_alpha_pll_hwfsm_ops, }, }; @@ -311,20 +320,29 @@ static const struct clk_ops clk_cpu_8996_pmux_ops = { .determine_rate = clk_cpu_8996_pmux_determine_rate, }; +static const struct parent_map smux_parent_map[] = { + { .cfg = 0, }, /* xo */ + { .cfg = 1, }, /* pll */ + { .cfg = 3, }, /* sys_apcs_aux */ +}; + static const struct clk_parent_data pwrcl_smux_parents[] = { { .fw_name = "xo" }, { .hw = &pwrcl_pll_postdiv.hw }, + { .fw_name = "sys_apcs_aux" }, }; static const struct clk_parent_data perfcl_smux_parents[] = { { .fw_name = "xo" }, { .hw = &perfcl_pll_postdiv.hw }, + { .fw_name = "sys_apcs_aux" }, }; static struct clk_regmap_mux pwrcl_smux = { .reg = PWRCL_REG_OFFSET + MUX_OFFSET, .shift = 2, .width = 2, + .parent_map = smux_parent_map, .clkr.hw.init = &(struct clk_init_data) { .name = "pwrcl_smux", .parent_data = pwrcl_smux_parents, @@ -338,6 +356,7 @@ static struct clk_regmap_mux perfcl_smux = { .reg = PERFCL_REG_OFFSET + MUX_OFFSET, .shift = 2, .width = 2, + .parent_map = smux_parent_map, .clkr.hw.init = &(struct clk_init_data) { .name = "perfcl_smux", .parent_data = perfcl_smux_parents, @@ -414,11 +433,55 @@ static struct clk_regmap *cpu_msm8996_clks[] = { &perfcl_pmux.clkr, }; +static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap); + static int qcom_cpu_clk_msm8996_register_clks(struct device *dev, struct regmap *regmap) { int i, ret; + /* Select GPLL0 for 300MHz for both clusters */ + regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0xc); + regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0xc); + + /* Ensure write goes through before PLLs are reconfigured */ + udelay(5); + + /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */ + regmap_update_bits(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, + MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, + MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL); + regmap_update_bits(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, + MUX_AUTO_CLK_SEL_ALWAYS_ON_MASK, + MUX_AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL); + + clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config); + clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config); + clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config); + clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config); + + /* Wait for PLL(s) to lock */ + udelay(50); + + /* Enable auto clock selection for both clusters */ + regmap_update_bits(regmap, PWRCL_REG_OFFSET + CLK_CTL_OFFSET, + CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL); + regmap_update_bits(regmap, PERFCL_REG_OFFSET + CLK_CTL_OFFSET, + CLK_CTL_AUTO_CLK_SEL, CLK_CTL_AUTO_CLK_SEL); + + /* Ensure write goes through before muxes are switched */ + udelay(5); + + qcom_cpu_clk_msm8996_acd_init(regmap); + + /* Pulse swallower and soft-start settings */ + regmap_write(regmap, PWRCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005); + regmap_write(regmap, PERFCL_REG_OFFSET + PSCTL_OFFSET, 0x00030005); + + /* Switch clusters to use the ACD leg */ + regmap_write(regmap, PWRCL_REG_OFFSET + MUX_OFFSET, 0x32); + regmap_write(regmap, PERFCL_REG_OFFSET + MUX_OFFSET, 0x32); + for (i = 0; i < ARRAY_SIZE(cpu_msm8996_hw_clks); i++) { ret = devm_clk_hw_register(dev, cpu_msm8996_hw_clks[i]); if (ret) @@ -431,11 +494,6 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev, return ret; } - clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config); - clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config); - clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config); - clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config); - /* Enable alt PLLs */ clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk); clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk); @@ -446,9 +504,9 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev, return ret; } -#define CPU_AFINITY_MASK 0xFFF -#define PWRCL_CPU_REG_MASK 0x3 -#define PERFCL_CPU_REG_MASK 0x103 +#define CPU_CLUSTER_AFFINITY_MASK 0xf00 +#define PWRCL_AFFINITY_MASK 0x000 +#define PERFCL_AFFINITY_MASK 0x100 #define L2ACDCR_REG 0x580ULL #define L2ACDTD_REG 0x581ULL @@ -456,31 +514,32 @@ static int qcom_cpu_clk_msm8996_register_clks(struct device *dev, #define L2ACDSSCR_REG 0x589ULL static DEFINE_SPINLOCK(qcom_clk_acd_lock); -static void __iomem *base; -static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base) +static void qcom_cpu_clk_msm8996_acd_init(struct regmap *regmap) { u64 hwid; + u32 val; unsigned long flags; spin_lock_irqsave(&qcom_clk_acd_lock, flags); - hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK; + val = kryo_l2_get_indirect_reg(L2ACDTD_REG); + if (val == 0x00006a11) + goto out; kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11); kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f); kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601); - if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) { - writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET); - kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd); - } + kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd); - if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) { - kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd); - writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET); - } + hwid = read_cpuid_mpidr(); + if ((hwid & CPU_CLUSTER_AFFINITY_MASK) == PWRCL_AFFINITY_MASK) + regmap_write(regmap, PWRCL_REG_OFFSET + SSSCTL_OFFSET, 0xf); + else + regmap_write(regmap, PERFCL_REG_OFFSET + SSSCTL_OFFSET, 0xf); +out: spin_unlock_irqrestore(&qcom_clk_acd_lock, flags); } @@ -489,31 +548,40 @@ static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event, { struct clk_cpu_8996_pmux *cpuclk = to_clk_cpu_8996_pmux_nb(nb); struct clk_notifier_data *cnd = data; - int ret; switch (event) { case PRE_RATE_CHANGE: - ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ALT_INDEX); - qcom_cpu_clk_msm8996_acd_init(base); + qcom_cpu_clk_msm8996_acd_init(cpuclk->clkr.regmap); + + /* + * Avoid overvolting. clk_core_set_rate_nolock() walks from top + * to bottom, so it will change the rate of the PLL before + * chaging the parent of PMUX. This can result in pmux getting + * clocked twice the expected rate. + * + * Manually switch to PLL/2 here. + */ + if (cnd->new_rate < DIV_2_THRESHOLD && + cnd->old_rate > DIV_2_THRESHOLD) + clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, SMUX_INDEX); + break; - case POST_RATE_CHANGE: - if (cnd->new_rate < DIV_2_THRESHOLD) - ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, - SMUX_INDEX); - else - ret = clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, - ACD_INDEX); + case ABORT_RATE_CHANGE: + /* Revert manual change */ + if (cnd->new_rate < DIV_2_THRESHOLD && + cnd->old_rate > DIV_2_THRESHOLD) + clk_cpu_8996_pmux_set_parent(&cpuclk->clkr.hw, ACD_INDEX); break; default: - ret = 0; break; } - return notifier_from_errno(ret); + return NOTIFY_OK; }; static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev) { + static void __iomem *base; struct regmap *regmap; struct clk_hw_onecell_data *data; struct device *dev = &pdev->dev; @@ -535,8 +603,6 @@ static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev) if (ret) return ret; - qcom_cpu_clk_msm8996_acd_init(base); - data->hws[0] = &pwrcl_pmux.clkr.hw; data->hws[1] = &perfcl_pmux.clkr.hw; data->num = 2; |