diff options
Diffstat (limited to 'drivers/clk/renesas/rzv2h-cpg.c')
-rw-r--r-- | drivers/clk/renesas/rzv2h-cpg.c | 369 |
1 files changed, 300 insertions, 69 deletions
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c index b524a9d33610..fb39e6446b26 100644 --- a/drivers/clk/renesas/rzv2h-cpg.c +++ b/drivers/clk/renesas/rzv2h-cpg.c @@ -23,7 +23,9 @@ #include <linux/platform_device.h> #include <linux/pm_clock.h> #include <linux/pm_domain.h> +#include <linux/refcount.h> #include <linux/reset-controller.h> +#include <linux/string_choices.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -40,10 +42,21 @@ #define GET_RST_OFFSET(x) (0x900 + ((x) * 4)) #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4)) -#define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val))) -#define MDIV(val) FIELD_GET(GENMASK(15, 6), (val)) -#define PDIV(val) FIELD_GET(GENMASK(5, 0), (val)) -#define SDIV(val) FIELD_GET(GENMASK(2, 0), (val)) +#define CPG_BUS_1_MSTOP (0xd00) +#define CPG_BUS_MSTOP(m) (CPG_BUS_1_MSTOP + ((m) - 1) * 4) + +#define CPG_PLL_STBY(x) ((x)) +#define CPG_PLL_STBY_RESETB BIT(0) +#define CPG_PLL_STBY_RESETB_WEN BIT(16) +#define CPG_PLL_CLK1(x) ((x) + 0x004) +#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x))) +#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x)) +#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x)) +#define CPG_PLL_CLK2(x) ((x) + 0x008) +#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x)) +#define CPG_PLL_MON(x) ((x) + 0x010) +#define CPG_PLL_MON_RESETB BIT(0) +#define CPG_PLL_MON_LOCK BIT(4) #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16) @@ -64,6 +77,7 @@ * @resets: Array of resets * @num_resets: Number of Module Resets in info->resets[] * @last_dt_core_clk: ID of the last Core Clock exported to DT + * @mstop_count: Array of mstop values * @rcdev: Reset controller entity */ struct rzv2h_cpg_priv { @@ -78,6 +92,8 @@ struct rzv2h_cpg_priv { unsigned int num_resets; unsigned int last_dt_core_clk; + atomic_t *mstop_count; + struct reset_controller_dev rcdev; }; @@ -87,8 +103,7 @@ struct pll_clk { struct rzv2h_cpg_priv *priv; void __iomem *base; struct clk_hw hw; - unsigned int conf; - unsigned int type; + struct pll pll; }; #define to_pll(_hw) container_of(_hw, struct pll_clk, hw) @@ -97,15 +112,19 @@ struct pll_clk { * struct mod_clock - Module clock * * @priv: CPG private data + * @mstop_data: mstop data relating to module clock * @hw: handle between common and hardware-specific interfaces + * @no_pm: flag to indicate PM is not supported * @on_index: register offset * @on_bit: ON/MON bit * @mon_index: monitor register offset - * @mon_bit: montor bit + * @mon_bit: monitor bit */ struct mod_clock { struct rzv2h_cpg_priv *priv; + unsigned int mstop_data; struct clk_hw hw; + bool no_pm; u8 on_index; u8 on_bit; s8 mon_index; @@ -129,27 +148,78 @@ struct ddiv_clk { #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div) +static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw) +{ + struct pll_clk *pll_clk = to_pll(hw); + struct rzv2h_cpg_priv *priv = pll_clk->priv; + u32 val = readl(priv->base + CPG_PLL_MON(pll_clk->pll.offset)); + + /* Ensure both RESETB and LOCK bits are set */ + return (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == + (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK); +} + +static int rzv2h_cpg_pll_clk_enable(struct clk_hw *hw) +{ + struct pll_clk *pll_clk = to_pll(hw); + struct rzv2h_cpg_priv *priv = pll_clk->priv; + struct pll pll = pll_clk->pll; + u32 stby_offset; + u32 mon_offset; + u32 val; + int ret; + + if (rzv2h_cpg_pll_clk_is_enabled(hw)) + return 0; + + stby_offset = CPG_PLL_STBY(pll.offset); + mon_offset = CPG_PLL_MON(pll.offset); + + writel(CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB, + priv->base + stby_offset); + + /* + * Ensure PLL enters into normal mode + * + * Note: There is no HW information about the worst case latency. + * + * Since this latency might depend on external crystal or PLL rate, + * use a "super" safe timeout value. + */ + ret = readl_poll_timeout_atomic(priv->base + mon_offset, val, + (val & (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK)) == + (CPG_PLL_MON_RESETB | CPG_PLL_MON_LOCK), 200, 2000); + if (ret) + dev_err(priv->dev, "Failed to enable PLL 0x%x/%pC\n", + stby_offset, hw->clk); + + return ret; +} + static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct pll_clk *pll_clk = to_pll(hw); struct rzv2h_cpg_priv *priv = pll_clk->priv; + struct pll pll = pll_clk->pll; unsigned int clk1, clk2; u64 rate; - if (!PLL_CLK_ACCESS(pll_clk->conf)) + if (!pll.has_clkn) return 0; - clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf)); - clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf)); + clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset)); + clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset)); - rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1), - 16 + SDIV(clk2)); + rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) + + CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2)); - return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1)); + return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1)); } static const struct clk_ops rzv2h_cpg_pll_ops = { + .is_enabled = rzv2h_cpg_pll_clk_is_enabled, + .enable = rzv2h_cpg_pll_clk_enable, .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate, }; @@ -182,10 +252,9 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core, init.num_parents = 1; pll_clk->hw.init = &init; - pll_clk->conf = core->cfg.conf; + pll_clk->pll = core->cfg.pll; pll_clk->base = base; pll_clk->priv = priv; - pll_clk->type = core->type; ret = devm_clk_hw_register(dev, &pll_clk->hw); if (ret) @@ -230,6 +299,9 @@ static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon u32 bitmask = BIT(mon); u32 val; + if (mon == CSDIV_NO_MON) + return 0; + return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200); } @@ -261,12 +333,6 @@ static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate, writel(val, divider->reg); ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon); - if (ret) - goto ddiv_timeout; - - spin_unlock_irqrestore(divider->lock, flags); - - return 0; ddiv_timeout: spin_unlock_irqrestore(divider->lock, flags); @@ -309,9 +375,13 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, return ERR_PTR(-ENOMEM); init.name = core->name; - init.ops = &rzv2h_ddiv_clk_divider_ops; + if (cfg_ddiv.no_rmw) + init.ops = &clk_divider_ops; + else + init.ops = &rzv2h_ddiv_clk_divider_ops; init.parent_names = &parent_name; init.num_parents = 1; + init.flags = CLK_SET_RATE_PARENT; ddiv->priv = priv; ddiv->mon = cfg_ddiv.monbit; @@ -331,6 +401,24 @@ rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core, return div->hw.clk; } +static struct clk * __init +rzv2h_cpg_mux_clk_register(const struct cpg_core_clk *core, + struct rzv2h_cpg_priv *priv) +{ + struct smuxed mux = core->cfg.smux; + const struct clk_hw *clk_hw; + + clk_hw = devm_clk_hw_register_mux(priv->dev, core->name, + core->parent_names, core->num_parents, + core->flag, priv->base + mux.offset, + mux.shift, mux.width, + core->mux_flags, &priv->rmw_lock); + if (IS_ERR(clk_hw)) + return ERR_CAST(clk_hw); + + return clk_hw->clk; +} + static struct clk *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec, void *data) @@ -415,6 +503,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core, case CLK_TYPE_DDIV: clk = rzv2h_cpg_ddiv_clk_register(core, priv); break; + case CLK_TYPE_SMUX: + clk = rzv2h_cpg_mux_clk_register(core, priv); + break; default: goto fail; } @@ -431,8 +522,72 @@ fail: core->name, PTR_ERR(clk)); } +static void rzv2h_mod_clock_mstop_enable(struct rzv2h_cpg_priv *priv, + u32 mstop_data) +{ + unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); + u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); + atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; + unsigned long flags; + unsigned int i; + u32 val = 0; + + spin_lock_irqsave(&priv->rmw_lock, flags); + for_each_set_bit(i, &mstop_mask, 16) { + if (!atomic_read(&mstop[i])) + val |= BIT(i) << 16; + atomic_inc(&mstop[i]); + } + if (val) + writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); + spin_unlock_irqrestore(&priv->rmw_lock, flags); +} + +static void rzv2h_mod_clock_mstop_disable(struct rzv2h_cpg_priv *priv, + u32 mstop_data) +{ + unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, mstop_data); + u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, mstop_data); + atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; + unsigned long flags; + unsigned int i; + u32 val = 0; + + spin_lock_irqsave(&priv->rmw_lock, flags); + for_each_set_bit(i, &mstop_mask, 16) { + if (!atomic_read(&mstop[i]) || + atomic_dec_and_test(&mstop[i])) + val |= BIT(i) << 16 | BIT(i); + } + if (val) + writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); + spin_unlock_irqrestore(&priv->rmw_lock, flags); +} + +static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) +{ + struct mod_clock *clock = to_mod_clock(hw); + struct rzv2h_cpg_priv *priv = clock->priv; + u32 bitmask; + u32 offset; + + if (clock->mon_index >= 0) { + offset = GET_CLK_MON_OFFSET(clock->mon_index); + bitmask = BIT(clock->mon_bit); + + if (!(readl(priv->base + offset) & bitmask)) + return 0; + } + + offset = GET_CLK_ON_OFFSET(clock->on_index); + bitmask = BIT(clock->on_bit); + + return readl(priv->base + offset) & bitmask; +} + static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) { + bool enabled = rzv2h_mod_clock_is_enabled(hw); struct mod_clock *clock = to_mod_clock(hw); unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index); struct rzv2h_cpg_priv *priv = clock->priv; @@ -442,13 +597,22 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) int error; dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk, - enable ? "ON" : "OFF"); + str_on_off(enable)); + + if (enabled == enable) + return 0; value = bitmask << 16; - if (enable) + if (enable) { value |= bitmask; - - writel(value, priv->base + reg); + writel(value, priv->base + reg); + if (clock->mstop_data != BUS_MSTOP_NONE) + rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); + } else { + if (clock->mstop_data != BUS_MSTOP_NONE) + rzv2h_mod_clock_mstop_disable(priv, clock->mstop_data); + writel(value, priv->base + reg); + } if (!enable || clock->mon_index < 0) return 0; @@ -458,8 +622,8 @@ static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable) error = readl_poll_timeout_atomic(priv->base + reg, value, value & bitmask, 0, 10); if (error) - dev_err(dev, "Failed to enable CLK_ON %p\n", - priv->base + reg); + dev_err(dev, "Failed to enable CLK_ON 0x%x/%pC\n", + GET_CLK_ON_OFFSET(clock->on_index), hw->clk); return error; } @@ -474,24 +638,6 @@ static void rzv2h_mod_clock_disable(struct clk_hw *hw) rzv2h_mod_clock_endisable(hw, false); } -static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw) -{ - struct mod_clock *clock = to_mod_clock(hw); - struct rzv2h_cpg_priv *priv = clock->priv; - u32 bitmask; - u32 offset; - - if (clock->mon_index >= 0) { - offset = GET_CLK_MON_OFFSET(clock->mon_index); - bitmask = BIT(clock->mon_bit); - } else { - offset = GET_CLK_ON_OFFSET(clock->on_index); - bitmask = BIT(clock->on_bit); - } - - return readl(priv->base + offset) & bitmask; -} - static const struct clk_ops rzv2h_mod_clock_ops = { .enable = rzv2h_mod_clock_enable, .disable = rzv2h_mod_clock_disable, @@ -541,8 +687,10 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod, clock->on_bit = mod->on_bit; clock->mon_index = mod->mon_index; clock->mon_bit = mod->mon_bit; + clock->no_pm = mod->no_pm; clock->priv = priv; clock->hw.init = &init; + clock->mstop_data = mod->mstop_data; ret = devm_clk_hw_register(dev, &clock->hw); if (ret) { @@ -552,6 +700,40 @@ rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod, priv->clks[id] = clock->hw.clk; + /* + * Ensure the module clocks and MSTOP bits are synchronized when they are + * turned ON by the bootloader. Enable MSTOP bits for module clocks that were + * turned ON in an earlier boot stage. + */ + if (clock->mstop_data != BUS_MSTOP_NONE && + !mod->critical && rzv2h_mod_clock_is_enabled(&clock->hw)) { + rzv2h_mod_clock_mstop_enable(priv, clock->mstop_data); + } else if (clock->mstop_data != BUS_MSTOP_NONE && mod->critical) { + unsigned long mstop_mask = FIELD_GET(BUS_MSTOP_BITS_MASK, clock->mstop_data); + u16 mstop_index = FIELD_GET(BUS_MSTOP_IDX_MASK, clock->mstop_data); + atomic_t *mstop = &priv->mstop_count[mstop_index * 16]; + unsigned long flags; + unsigned int i; + u32 val = 0; + + /* + * Critical clocks are turned ON immediately upon registration, and the + * MSTOP counter is updated through the rzv2h_mod_clock_enable() path. + * However, if the critical clocks were already turned ON by the initial + * bootloader, synchronize the atomic counter here and clear the MSTOP bit. + */ + spin_lock_irqsave(&priv->rmw_lock, flags); + for_each_set_bit(i, &mstop_mask, 16) { + if (atomic_read(&mstop[i])) + continue; + val |= BIT(i) << 16; + atomic_inc(&mstop[i]); + } + if (val) + writel(val, priv->base + CPG_BUS_MSTOP(mstop_index)); + spin_unlock_irqrestore(&priv->rmw_lock, flags); + } + return; fail: @@ -559,8 +741,8 @@ fail: mod->name, PTR_ERR(clk)); } -static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, - unsigned long id) +static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) { struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); @@ -568,35 +750,31 @@ static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, u8 monbit = priv->resets[id].mon_bit; u32 value = mask << 16; - dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg); + dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n", + assert ? "assert" : "deassert", id, reg); + if (!assert) + value |= mask; writel(value, priv->base + reg); reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); mask = BIT(monbit); return readl_poll_timeout_atomic(priv->base + reg, value, - value & mask, 10, 200); + assert ? (value & mask) : !(value & mask), + 10, 200); +} + +static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return __rzv2h_cpg_assert(rcdev, id, true); } static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev, unsigned long id) { - struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev); - unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index); - u32 mask = BIT(priv->resets[id].reset_bit); - u8 monbit = priv->resets[id].mon_bit; - u32 value = (mask << 16) | mask; - - dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg); - - writel(value, priv->base + reg); - - reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index); - mask = BIT(monbit); - - return readl_poll_timeout_atomic(priv->base + reg, value, - !(value & mask), 10, 200); + return __rzv2h_cpg_assert(rcdev, id, false); } static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev, @@ -668,17 +846,51 @@ struct rzv2h_cpg_pd { struct generic_pm_domain genpd; }; +static bool rzv2h_cpg_is_pm_clk(struct rzv2h_cpg_pd *pd, + const struct of_phandle_args *clkspec) +{ + if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) + return false; + + switch (clkspec->args[0]) { + case CPG_MOD: { + struct rzv2h_cpg_priv *priv = pd->priv; + unsigned int id = clkspec->args[1]; + struct mod_clock *clock; + + if (id >= priv->num_mod_clks) + return false; + + if (priv->clks[priv->num_core_clks + id] == ERR_PTR(-ENOENT)) + return false; + + clock = to_mod_clock(__clk_get_hw(priv->clks[priv->num_core_clks + id])); + + return !clock->no_pm; + } + + case CPG_CORE: + default: + return false; + } +} + static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev) { + struct rzv2h_cpg_pd *pd = container_of(domain, struct rzv2h_cpg_pd, genpd); struct device_node *np = dev->of_node; struct of_phandle_args clkspec; bool once = true; struct clk *clk; + unsigned int i; int error; - int i = 0; - while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, - &clkspec)) { + for (i = 0; !of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec); i++) { + if (!rzv2h_cpg_is_pm_clk(pd, &clkspec)) { + of_node_put(clkspec.np); + continue; + } + if (once) { once = false; error = pm_clk_create(dev); @@ -700,7 +912,6 @@ static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device error); goto fail_put; } - i++; } return 0; @@ -786,6 +997,14 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev) if (!clks) return -ENOMEM; + priv->mstop_count = devm_kcalloc(dev, info->num_mstop_bits, + sizeof(*priv->mstop_count), GFP_KERNEL); + if (!priv->mstop_count) + return -ENOMEM; + + /* Adjust for CPG_BUS_m_MSTOP starting from m = 1 */ + priv->mstop_count -= 16; + priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) * info->num_resets, GFP_KERNEL); if (!priv->resets) @@ -827,6 +1046,18 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev) } static const struct of_device_id rzv2h_cpg_match[] = { +#ifdef CONFIG_CLK_R9A09G047 + { + .compatible = "renesas,r9a09g047-cpg", + .data = &r9a09g047_cpg_info, + }, +#endif +#ifdef CONFIG_CLK_R9A09G056 + { + .compatible = "renesas,r9a09g056-cpg", + .data = &r9a09g056_cpg_info, + }, +#endif #ifdef CONFIG_CLK_R9A09G057 { .compatible = "renesas,r9a09g057-cpg", |