diff options
author | Taniya Das <tdas@codeaurora.org> | 2021-04-25 10:08:22 +0300 |
---|---|---|
committer | Stephen Boyd <sboyd@kernel.org> | 2021-05-26 06:36:21 +0300 |
commit | 7f891faf596ede96dd209b2257abf9df636b481f (patch) | |
tree | f17b731f339a3ccb3ec92e947efdb0a74948d2ac /drivers/clk/qcom/clk-rcg2.c | |
parent | 6efb943b8616ec53a5e444193dccf1af9ad627b5 (diff) | |
download | linux-7f891faf596ede96dd209b2257abf9df636b481f.tar.xz |
clk: qcom: clk-rcg2: Add support for duty-cycle for RCG
The root clock generators with MND divider has the capability to support
change in duty-cycle by updating the 'D'. Add the clock ops which would
check all the boundary conditions and enable setting the desired duty-cycle
as per the consumer.
Signed-off-by: Taniya Das <tdas@codeaurora.org>
Link: https://lore.kernel.org/r/1619334502-9880-2-git-send-email-tdas@codeaurora.org
[sboyd@kernel.org: Remove _val everywhere]
Signed-off-by: Stephen Boyd <sboyd@kernel.org>
Diffstat (limited to 'drivers/clk/qcom/clk-rcg2.c')
-rw-r--r-- | drivers/clk/qcom/clk-rcg2.c | 81 |
1 files changed, 81 insertions, 0 deletions
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 05ff3b0d233e..e1b1b426fae4 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -357,6 +357,83 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, return __clk_rcg2_set_rate(hw, rate, FLOOR); } +static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + u32 notn_m, n, m, d, not2d, mask; + + if (!rcg->mnd_width) { + /* 50 % duty-cycle for Non-MND RCGs */ + duty->num = 1; + duty->den = 2; + return 0; + } + + regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), ¬2d); + regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); + regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); + + if (!not2d && !m && !notn_m) { + /* 50 % duty-cycle always */ + duty->num = 1; + duty->den = 2; + return 0; + } + + mask = BIT(rcg->mnd_width) - 1; + + d = ~(not2d) & mask; + d = DIV_ROUND_CLOSEST(d, 2); + + n = (~(notn_m) + m) & mask; + + duty->num = d; + duty->den = n; + + return 0; +} + +static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + u32 notn_m, n, m, d, not2d, mask, duty_per; + int ret; + + /* Duty-cycle cannot be modified for non-MND RCGs */ + if (!rcg->mnd_width) + return -EINVAL; + + mask = BIT(rcg->mnd_width) - 1; + + regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), ¬n_m); + regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); + + n = (~(notn_m) + m) & mask; + + duty_per = (duty->num * 100) / duty->den; + + /* Calculate 2d value */ + d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100); + + /* Check bit widths of 2d. If D is too big reduce duty cycle. */ + if (d > mask) + d = mask; + + if ((d / 2) > (n - m)) + d = (n - m) * 2; + else if ((d / 2) < (m / 2)) + d = m; + + not2d = ~d & mask; + + ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask, + not2d); + if (ret) + return ret; + + return update_config(rcg); +} + const struct clk_ops clk_rcg2_ops = { .is_enabled = clk_rcg2_is_enabled, .get_parent = clk_rcg2_get_parent, @@ -365,6 +442,8 @@ const struct clk_ops clk_rcg2_ops = { .determine_rate = clk_rcg2_determine_rate, .set_rate = clk_rcg2_set_rate, .set_rate_and_parent = clk_rcg2_set_rate_and_parent, + .get_duty_cycle = clk_rcg2_get_duty_cycle, + .set_duty_cycle = clk_rcg2_set_duty_cycle, }; EXPORT_SYMBOL_GPL(clk_rcg2_ops); @@ -376,6 +455,8 @@ const struct clk_ops clk_rcg2_floor_ops = { .determine_rate = clk_rcg2_determine_floor_rate, .set_rate = clk_rcg2_set_floor_rate, .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, + .get_duty_cycle = clk_rcg2_get_duty_cycle, + .set_duty_cycle = clk_rcg2_set_duty_cycle, }; EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); |