diff options
Diffstat (limited to 'drivers')
237 files changed, 14125 insertions, 4659 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 7376af25f947..801fa1cd0321 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -90,6 +90,17 @@ config COMMON_CLK_SCPI This driver uses SCPI Message Protocol to interact with the firmware providing all the clock controls. +config COMMON_CLK_SI5341 + tristate "Clock driver for SiLabs 5341 and 5340 A/B/C/D devices" + depends on I2C + select REGMAP_I2C + help + This driver supports Silicon Labs Si5341 and Si5340 programmable clock + generators. Not all features of these chips are currently supported + by the driver, in particular it only supports XTAL input. The chip can + be pre-programmed to support other configurations and features not yet + implemented in the driver. + config COMMON_CLK_SI5351 tristate "Clock driver for SiLabs 5351A/B/C" depends on I2C @@ -214,7 +225,7 @@ config CLK_QORIQ config COMMON_CLK_XGENE bool "Clock driver for APM XGene SoC" - default y + default ARCH_XGENE depends on ARM64 || COMPILE_TEST ---help--- Sypport for the APM X-Gene SoC reference, PLL, and device clocks. diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 9ef4305d55e0..0cad76021297 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o +obj-$(CONFIG_COMMON_CLK_SI5341) += clk-si5341.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o obj-$(CONFIG_COMMON_CLK_SI544) += clk-si544.o diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c index 45526f56f1ba..9bfe9a28294a 100644 --- a/drivers/clk/at91/sckc.c +++ b/drivers/clk/at91/sckc.c @@ -18,14 +18,18 @@ SLOW_CLOCK_FREQ) #define AT91_SCKC_CR 0x00 -#define AT91_SCKC_RCEN (1 << 0) -#define AT91_SCKC_OSC32EN (1 << 1) -#define AT91_SCKC_OSC32BYP (1 << 2) -#define AT91_SCKC_OSCSEL (1 << 3) + +struct clk_slow_bits { + u32 cr_rcen; + u32 cr_osc32en; + u32 cr_osc32byp; + u32 cr_oscsel; +}; struct clk_slow_osc { struct clk_hw hw; void __iomem *sckcr; + const struct clk_slow_bits *bits; unsigned long startup_usec; }; @@ -34,6 +38,7 @@ struct clk_slow_osc { struct clk_sama5d4_slow_osc { struct clk_hw hw; void __iomem *sckcr; + const struct clk_slow_bits *bits; unsigned long startup_usec; bool prepared; }; @@ -43,6 +48,7 @@ struct clk_sama5d4_slow_osc { struct clk_slow_rc_osc { struct clk_hw hw; void __iomem *sckcr; + const struct clk_slow_bits *bits; unsigned long frequency; unsigned long accuracy; unsigned long startup_usec; @@ -53,6 +59,7 @@ struct clk_slow_rc_osc { struct clk_sam9x5_slow { struct clk_hw hw; void __iomem *sckcr; + const struct clk_slow_bits *bits; u8 parent; }; @@ -64,10 +71,10 @@ static int clk_slow_osc_prepare(struct clk_hw *hw) void __iomem *sckcr = osc->sckcr; u32 tmp = readl(sckcr); - if (tmp & (AT91_SCKC_OSC32BYP | AT91_SCKC_OSC32EN)) + if (tmp & (osc->bits->cr_osc32byp | osc->bits->cr_osc32en)) return 0; - writel(tmp | AT91_SCKC_OSC32EN, sckcr); + writel(tmp | osc->bits->cr_osc32en, sckcr); usleep_range(osc->startup_usec, osc->startup_usec + 1); @@ -80,10 +87,10 @@ static void clk_slow_osc_unprepare(struct clk_hw *hw) void __iomem *sckcr = osc->sckcr; u32 tmp = readl(sckcr); - if (tmp & AT91_SCKC_OSC32BYP) + if (tmp & osc->bits->cr_osc32byp) return; - writel(tmp & ~AT91_SCKC_OSC32EN, sckcr); + writel(tmp & ~osc->bits->cr_osc32en, sckcr); } static int clk_slow_osc_is_prepared(struct clk_hw *hw) @@ -92,10 +99,10 @@ static int clk_slow_osc_is_prepared(struct clk_hw *hw) void __iomem *sckcr = osc->sckcr; u32 tmp = readl(sckcr); - if (tmp & AT91_SCKC_OSC32BYP) + if (tmp & osc->bits->cr_osc32byp) return 1; - return !!(tmp & AT91_SCKC_OSC32EN); + return !!(tmp & osc->bits->cr_osc32en); } static const struct clk_ops slow_osc_ops = { @@ -109,7 +116,8 @@ at91_clk_register_slow_osc(void __iomem *sckcr, const char *name, const char *parent_name, unsigned long startup, - bool bypass) + bool bypass, + const struct clk_slow_bits *bits) { struct clk_slow_osc *osc; struct clk_hw *hw; @@ -132,10 +140,11 @@ at91_clk_register_slow_osc(void __iomem *sckcr, osc->hw.init = &init; osc->sckcr = sckcr; osc->startup_usec = startup; + osc->bits = bits; if (bypass) - writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP, - sckcr); + writel((readl(sckcr) & ~osc->bits->cr_osc32en) | + osc->bits->cr_osc32byp, sckcr); hw = &osc->hw; ret = clk_hw_register(NULL, &osc->hw); @@ -147,6 +156,14 @@ at91_clk_register_slow_osc(void __iomem *sckcr, return hw; } +static void at91_clk_unregister_slow_osc(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + + clk_hw_unregister(hw); + kfree(osc); +} + static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -168,7 +185,7 @@ static int clk_slow_rc_osc_prepare(struct clk_hw *hw) struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); void __iomem *sckcr = osc->sckcr; - writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr); + writel(readl(sckcr) | osc->bits->cr_rcen, sckcr); usleep_range(osc->startup_usec, osc->startup_usec + 1); @@ -180,14 +197,14 @@ static void clk_slow_rc_osc_unprepare(struct clk_hw *hw) struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); void __iomem *sckcr = osc->sckcr; - writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr); + writel(readl(sckcr) & ~osc->bits->cr_rcen, sckcr); } static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw) { struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); - return !!(readl(osc->sckcr) & AT91_SCKC_RCEN); + return !!(readl(osc->sckcr) & osc->bits->cr_rcen); } static const struct clk_ops slow_rc_osc_ops = { @@ -203,7 +220,8 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr, const char *name, unsigned long frequency, unsigned long accuracy, - unsigned long startup) + unsigned long startup, + const struct clk_slow_bits *bits) { struct clk_slow_rc_osc *osc; struct clk_hw *hw; @@ -225,6 +243,7 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr, osc->hw.init = &init; osc->sckcr = sckcr; + osc->bits = bits; osc->frequency = frequency; osc->accuracy = accuracy; osc->startup_usec = startup; @@ -239,6 +258,14 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr, return hw; } +static void at91_clk_unregister_slow_rc_osc(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + clk_hw_unregister(hw); + kfree(osc); +} + static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) { struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); @@ -250,14 +277,14 @@ static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) tmp = readl(sckcr); - if ((!index && !(tmp & AT91_SCKC_OSCSEL)) || - (index && (tmp & AT91_SCKC_OSCSEL))) + if ((!index && !(tmp & slowck->bits->cr_oscsel)) || + (index && (tmp & slowck->bits->cr_oscsel))) return 0; if (index) - tmp |= AT91_SCKC_OSCSEL; + tmp |= slowck->bits->cr_oscsel; else - tmp &= ~AT91_SCKC_OSCSEL; + tmp &= ~slowck->bits->cr_oscsel; writel(tmp, sckcr); @@ -270,7 +297,7 @@ static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw) { struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); - return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL); + return !!(readl(slowck->sckcr) & slowck->bits->cr_oscsel); } static const struct clk_ops sam9x5_slow_ops = { @@ -282,7 +309,8 @@ static struct clk_hw * __init at91_clk_register_sam9x5_slow(void __iomem *sckcr, const char *name, const char **parent_names, - int num_parents) + int num_parents, + const struct clk_slow_bits *bits) { struct clk_sam9x5_slow *slowck; struct clk_hw *hw; @@ -304,7 +332,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr, slowck->hw.init = &init; slowck->sckcr = sckcr; - slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL); + slowck->bits = bits; + slowck->parent = !!(readl(sckcr) & slowck->bits->cr_oscsel); hw = &slowck->hw; ret = clk_hw_register(NULL, &slowck->hw); @@ -316,22 +345,33 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr, return hw; } +static void at91_clk_unregister_sam9x5_slow(struct clk_hw *hw) +{ + struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); + + clk_hw_unregister(hw); + kfree(slowck); +} + static void __init at91sam9x5_sckc_register(struct device_node *np, - unsigned int rc_osc_startup_us) + unsigned int rc_osc_startup_us, + const struct clk_slow_bits *bits) { const char *parent_names[2] = { "slow_rc_osc", "slow_osc" }; void __iomem *regbase = of_iomap(np, 0); struct device_node *child = NULL; const char *xtal_name; - struct clk_hw *hw; + struct clk_hw *slow_rc, *slow_osc, *slowck; bool bypass; + int ret; if (!regbase) return; - hw = at91_clk_register_slow_rc_osc(regbase, parent_names[0], 32768, - 50000000, rc_osc_startup_us); - if (IS_ERR(hw)) + slow_rc = at91_clk_register_slow_rc_osc(regbase, parent_names[0], + 32768, 50000000, + rc_osc_startup_us, bits); + if (IS_ERR(slow_rc)) return; xtal_name = of_clk_get_parent_name(np, 0); @@ -339,7 +379,7 @@ static void __init at91sam9x5_sckc_register(struct device_node *np, /* DT backward compatibility */ child = of_get_compatible_child(np, "atmel,at91sam9x5-clk-slow-osc"); if (!child) - return; + goto unregister_slow_rc; xtal_name = of_clk_get_parent_name(child, 0); bypass = of_property_read_bool(child, "atmel,osc-bypass"); @@ -350,38 +390,133 @@ static void __init at91sam9x5_sckc_register(struct device_node *np, } if (!xtal_name) - return; - - hw = at91_clk_register_slow_osc(regbase, parent_names[1], xtal_name, - 1200000, bypass); - if (IS_ERR(hw)) - return; + goto unregister_slow_rc; - hw = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, 2); - if (IS_ERR(hw)) - return; + slow_osc = at91_clk_register_slow_osc(regbase, parent_names[1], + xtal_name, 1200000, bypass, bits); + if (IS_ERR(slow_osc)) + goto unregister_slow_rc; - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); + slowck = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, + 2, bits); + if (IS_ERR(slowck)) + goto unregister_slow_osc; /* DT backward compatibility */ if (child) - of_clk_add_hw_provider(child, of_clk_hw_simple_get, hw); + ret = of_clk_add_hw_provider(child, of_clk_hw_simple_get, + slowck); + else + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, slowck); + + if (WARN_ON(ret)) + goto unregister_slowck; + + return; + +unregister_slowck: + at91_clk_unregister_sam9x5_slow(slowck); +unregister_slow_osc: + at91_clk_unregister_slow_osc(slow_osc); +unregister_slow_rc: + at91_clk_unregister_slow_rc_osc(slow_rc); } +static const struct clk_slow_bits at91sam9x5_bits = { + .cr_rcen = BIT(0), + .cr_osc32en = BIT(1), + .cr_osc32byp = BIT(2), + .cr_oscsel = BIT(3), +}; + static void __init of_at91sam9x5_sckc_setup(struct device_node *np) { - at91sam9x5_sckc_register(np, 75); + at91sam9x5_sckc_register(np, 75, &at91sam9x5_bits); } CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc", of_at91sam9x5_sckc_setup); static void __init of_sama5d3_sckc_setup(struct device_node *np) { - at91sam9x5_sckc_register(np, 500); + at91sam9x5_sckc_register(np, 500, &at91sam9x5_bits); } CLK_OF_DECLARE(sama5d3_clk_sckc, "atmel,sama5d3-sckc", of_sama5d3_sckc_setup); +static const struct clk_slow_bits at91sam9x60_bits = { + .cr_osc32en = BIT(1), + .cr_osc32byp = BIT(2), + .cr_oscsel = BIT(24), +}; + +static void __init of_sam9x60_sckc_setup(struct device_node *np) +{ + void __iomem *regbase = of_iomap(np, 0); + struct clk_hw_onecell_data *clk_data; + struct clk_hw *slow_rc, *slow_osc; + const char *xtal_name; + const char *parent_names[2] = { "slow_rc_osc", "slow_osc" }; + bool bypass; + int ret; + + if (!regbase) + return; + + slow_rc = clk_hw_register_fixed_rate(NULL, parent_names[0], NULL, 0, + 32768); + if (IS_ERR(slow_rc)) + return; + + xtal_name = of_clk_get_parent_name(np, 0); + if (!xtal_name) + goto unregister_slow_rc; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + slow_osc = at91_clk_register_slow_osc(regbase, parent_names[1], + xtal_name, 5000000, bypass, + &at91sam9x60_bits); + if (IS_ERR(slow_osc)) + goto unregister_slow_rc; + + clk_data = kzalloc(sizeof(*clk_data) + (2 * sizeof(struct clk_hw *)), + GFP_KERNEL); + if (!clk_data) + goto unregister_slow_osc; + + /* MD_SLCK and TD_SLCK. */ + clk_data->num = 2; + clk_data->hws[0] = clk_hw_register_fixed_rate(NULL, "md_slck", + parent_names[0], + 0, 32768); + if (IS_ERR(clk_data->hws[0])) + goto clk_data_free; + + clk_data->hws[1] = at91_clk_register_sam9x5_slow(regbase, "td_slck", + parent_names, 2, + &at91sam9x60_bits); + if (IS_ERR(clk_data->hws[1])) + goto unregister_md_slck; + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); + if (WARN_ON(ret)) + goto unregister_td_slck; + + return; + +unregister_td_slck: + at91_clk_unregister_sam9x5_slow(clk_data->hws[1]); +unregister_md_slck: + clk_hw_unregister(clk_data->hws[0]); +clk_data_free: + kfree(clk_data); +unregister_slow_osc: + at91_clk_unregister_slow_osc(slow_osc); +unregister_slow_rc: + clk_hw_unregister(slow_rc); +} +CLK_OF_DECLARE(sam9x60_clk_sckc, "microchip,sam9x60-sckc", + of_sam9x60_sckc_setup); + static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw) { struct clk_sama5d4_slow_osc *osc = to_clk_sama5d4_slow_osc(hw); @@ -393,7 +528,7 @@ static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw) * Assume that if it has already been selected (for example by the * bootloader), enough time has aready passed. */ - if ((readl(osc->sckcr) & AT91_SCKC_OSCSEL)) { + if ((readl(osc->sckcr) & osc->bits->cr_oscsel)) { osc->prepared = true; return 0; } @@ -416,33 +551,35 @@ static const struct clk_ops sama5d4_slow_osc_ops = { .is_prepared = clk_sama5d4_slow_osc_is_prepared, }; +static const struct clk_slow_bits at91sama5d4_bits = { + .cr_oscsel = BIT(3), +}; + static void __init of_sama5d4_sckc_setup(struct device_node *np) { void __iomem *regbase = of_iomap(np, 0); - struct clk_hw *hw; + struct clk_hw *slow_rc, *slowck; struct clk_sama5d4_slow_osc *osc; struct clk_init_data init; const char *xtal_name; const char *parent_names[2] = { "slow_rc_osc", "slow_osc" }; - bool bypass; int ret; if (!regbase) return; - hw = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0], - NULL, 0, 32768, - 250000000); - if (IS_ERR(hw)) + slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, + parent_names[0], + NULL, 0, 32768, + 250000000); + if (IS_ERR(slow_rc)) return; xtal_name = of_clk_get_parent_name(np, 0); - bypass = of_property_read_bool(np, "atmel,osc-bypass"); - osc = kzalloc(sizeof(*osc), GFP_KERNEL); if (!osc) - return; + goto unregister_slow_rc; init.name = parent_names[1]; init.ops = &sama5d4_slow_osc_ops; @@ -453,22 +590,32 @@ static void __init of_sama5d4_sckc_setup(struct device_node *np) osc->hw.init = &init; osc->sckcr = regbase; osc->startup_usec = 1200000; + osc->bits = &at91sama5d4_bits; - if (bypass) - writel((readl(regbase) | AT91_SCKC_OSC32BYP), regbase); - - hw = &osc->hw; ret = clk_hw_register(NULL, &osc->hw); - if (ret) { - kfree(osc); - return; - } - - hw = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, 2); - if (IS_ERR(hw)) - return; - - of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw); + if (ret) + goto free_slow_osc_data; + + slowck = at91_clk_register_sam9x5_slow(regbase, "slowck", + parent_names, 2, + &at91sama5d4_bits); + if (IS_ERR(slowck)) + goto unregister_slow_osc; + + ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, slowck); + if (WARN_ON(ret)) + goto unregister_slowck; + + return; + +unregister_slowck: + at91_clk_unregister_sam9x5_slow(slowck); +unregister_slow_osc: + clk_hw_unregister(&osc->hw); +free_slow_osc_data: + kfree(osc); +unregister_slow_rc: + clk_hw_unregister(slow_rc); } CLK_OF_DECLARE(sama5d4_clk_sckc, "atmel,sama5d4-sckc", of_sama5d4_sckc_setup); diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index 29ee7b776cd4..8c83977a7dc4 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -1,4 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only +config CLK_BCM2835 + bool "Broadcom BCM2835 clock support" + depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST + depends on COMMON_CLK + default ARCH_BCM2835 || ARCH_BRCMSTB + help + Enable common clock framework support for Broadcom BCM2835 + SoCs. + config CLK_BCM_63XX bool "Broadcom BCM63xx clock support" depends on ARCH_BCM_63XX || COMPILE_TEST @@ -8,6 +17,14 @@ config CLK_BCM_63XX Enable common clock framework support for Broadcom BCM63xx DSL SoCs based on the ARM architecture +config CLK_BCM_63XX_GATE + bool "Broadcom BCM63xx gated clock support" + depends on BMIPS_GENERIC || COMPILE_TEST + default BMIPS_GENERIC + help + Enable common clock framework support for Broadcom BCM63xx DSL SoCs + based on the MIPS architecture + config CLK_BCM_KONA bool "Broadcom Kona CCU clock support" depends on ARCH_BCM_MOBILE || COMPILE_TEST @@ -64,3 +81,10 @@ config CLK_BCM_SR default ARCH_BCM_IPROC help Enable common clock framework support for the Broadcom Stingray SoC + +config CLK_RASPBERRYPI + tristate "Raspberry Pi firmware based clock support" + depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) + help + Enable common clock framework support for Raspberry Pi's firmware + dependent clocks diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile index 002661d39128..0070ddf6cdd2 100644 --- a/drivers/clk/bcm/Makefile +++ b/drivers/clk/bcm/Makefile @@ -1,12 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CLK_BCM_63XX) += clk-bcm63xx.o +obj-$(CONFIG_CLK_BCM_63XX_GATE) += clk-bcm63xx-gate.o obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o -obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o -obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835-aux.o +obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835.o +obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835-aux.o +obj-$(CONFIG_CLK_RASPBERRYPI) += clk-raspberrypi.o obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o obj-$(CONFIG_CLK_BCM_HR2) += clk-hr2.o diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 770bb01f523e..867ae3c20041 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -1651,30 +1651,10 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), - /* PLLB is used for the ARM's clock. */ - [BCM2835_PLLB] = REGISTER_PLL( - .name = "pllb", - .cm_ctrl_reg = CM_PLLB, - .a2w_ctrl_reg = A2W_PLLB_CTRL, - .frac_reg = A2W_PLLB_FRAC, - .ana_reg_base = A2W_PLLB_ANA0, - .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE, - .lock_mask = CM_LOCK_FLOCKB, - - .ana = &bcm2835_ana_default, - - .min_rate = 600000000u, - .max_rate = 3000000000u, - .max_fb_rate = BCM2835_MAX_FB_RATE), - [BCM2835_PLLB_ARM] = REGISTER_PLL_DIV( - .name = "pllb_arm", - .source_pll = "pllb", - .cm_reg = CM_PLLB, - .a2w_reg = A2W_PLLB_ARM, - .load_mask = CM_PLLB_LOADARM, - .hold_mask = CM_PLLB_HOLDARM, - .fixed_divider = 1, - .flags = CLK_SET_RATE_PARENT), + /* + * PLLB is used for the ARM's clock. Controlled by firmware, see + * clk-raspberrypi.c. + */ /* * PLLC is the core PLL, used to drive the core VPU clock. diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c new file mode 100644 index 000000000000..9e1dcd43258c --- /dev/null +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/clk-provider.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +struct clk_bcm63xx_table_entry { + const char * const name; + u8 bit; + unsigned long flags; +}; + +struct clk_bcm63xx_hw { + void __iomem *regs; + spinlock_t lock; + + struct clk_hw_onecell_data data; +}; + +static const struct clk_bcm63xx_table_entry bcm3368_clocks[] = { + { .name = "mac", .bit = 3, }, + { .name = "tc", .bit = 5, }, + { .name = "us_top", .bit = 6, }, + { .name = "ds_top", .bit = 7, }, + { .name = "acm", .bit = 8, }, + { .name = "spi", .bit = 9, }, + { .name = "usbs", .bit = 10, }, + { .name = "bmu", .bit = 11, }, + { .name = "pcm", .bit = 12, }, + { .name = "ntp", .bit = 13, }, + { .name = "acp_b", .bit = 14, }, + { .name = "acp_a", .bit = 15, }, + { .name = "emusb", .bit = 17, }, + { .name = "enet0", .bit = 18, }, + { .name = "enet1", .bit = 19, }, + { .name = "usbsu", .bit = 20, }, + { .name = "ephy", .bit = 21, }, + { }, +}; + +static const struct clk_bcm63xx_table_entry bcm6328_clocks[] = { + { .name = "phy_mips", .bit = 0, }, + { .name = "adsl_qproc", .bit = 1, }, + { .name = "adsl_afe", .bit = 2, }, + { .name = "adsl", .bit = 3, }, + { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, + { .name = "sar", .bit = 5, }, + { .name = "pcm", .bit = 6, }, + { .name = "usbd", .bit = 7, }, + { .name = "usbh", .bit = 8, }, + { .name = "hsspi", .bit = 9, }, + { .name = "pcie", .bit = 10, }, + { .name = "robosw", .bit = 11, }, + { }, +}; + +static const struct clk_bcm63xx_table_entry bcm6358_clocks[] = { + { .name = "enet", .bit = 4, }, + { .name = "adslphy", .bit = 5, }, + { .name = "pcm", .bit = 8, }, + { .name = "spi", .bit = 9, }, + { .name = "usbs", .bit = 10, }, + { .name = "sar", .bit = 11, }, + { .name = "emusb", .bit = 17, }, + { .name = "enet0", .bit = 18, }, + { .name = "enet1", .bit = 19, }, + { .name = "usbsu", .bit = 20, }, + { .name = "ephy", .bit = 21, }, + { }, +}; + +static const struct clk_bcm63xx_table_entry bcm6362_clocks[] = { + { .name = "adsl_qproc", .bit = 1, }, + { .name = "adsl_afe", .bit = 2, }, + { .name = "adsl", .bit = 3, }, + { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, + { .name = "wlan_ocp", .bit = 5, }, + { .name = "swpkt_usb", .bit = 7, }, + { .name = "swpkt_sar", .bit = 8, }, + { .name = "sar", .bit = 9, }, + { .name = "robosw", .bit = 10, }, + { .name = "pcm", .bit = 11, }, + { .name = "usbd", .bit = 12, }, + { .name = "usbh", .bit = 13, }, + { .name = "ipsec", .bit = 14, }, + { .name = "spi", .bit = 15, }, + { .name = "hsspi", .bit = 16, }, + { .name = "pcie", .bit = 17, }, + { .name = "fap", .bit = 18, }, + { .name = "phymips", .bit = 19, }, + { .name = "nand", .bit = 20, }, + { }, +}; + +static const struct clk_bcm63xx_table_entry bcm6368_clocks[] = { + { .name = "vdsl_qproc", .bit = 2, }, + { .name = "vdsl_afe", .bit = 3, }, + { .name = "vdsl_bonding", .bit = 4, }, + { .name = "vdsl", .bit = 5, }, + { .name = "phymips", .bit = 6, }, + { .name = "swpkt_usb", .bit = 7, }, + { .name = "swpkt_sar", .bit = 8, }, + { .name = "spi", .bit = 9, }, + { .name = "usbd", .bit = 10, }, + { .name = "sar", .bit = 11, }, + { .name = "robosw", .bit = 12, }, + { .name = "utopia", .bit = 13, }, + { .name = "pcm", .bit = 14, }, + { .name = "usbh", .bit = 15, }, + { .name = "disable_gless", .bit = 16, }, + { .name = "nand", .bit = 17, }, + { .name = "ipsec", .bit = 18, }, + { }, +}; + +static const struct clk_bcm63xx_table_entry bcm63268_clocks[] = { + { .name = "disable_gless", .bit = 0, }, + { .name = "vdsl_qproc", .bit = 1, }, + { .name = "vdsl_afe", .bit = 2, }, + { .name = "vdsl", .bit = 3, }, + { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, + { .name = "wlan_ocp", .bit = 5, }, + { .name = "dect", .bit = 6, }, + { .name = "fap0", .bit = 7, }, + { .name = "fap1", .bit = 8, }, + { .name = "sar", .bit = 9, }, + { .name = "robosw", .bit = 10, }, + { .name = "pcm", .bit = 11, }, + { .name = "usbd", .bit = 12, }, + { .name = "usbh", .bit = 13, }, + { .name = "ipsec", .bit = 14, }, + { .name = "spi", .bit = 15, }, + { .name = "hsspi", .bit = 16, }, + { .name = "pcie", .bit = 17, }, + { .name = "phymips", .bit = 18, }, + { .name = "gmac", .bit = 19, }, + { .name = "nand", .bit = 20, }, + { .name = "tbus", .bit = 27, }, + { .name = "robosw250", .bit = 31, }, + { }, +}; + +static int clk_bcm63xx_probe(struct platform_device *pdev) +{ + const struct clk_bcm63xx_table_entry *entry, *table; + struct clk_bcm63xx_hw *hw; + struct resource *r; + u8 maxbit = 0; + int i, ret; + + table = of_device_get_match_data(&pdev->dev); + if (!table) + return -EINVAL; + + for (entry = table; entry->name; entry++) + maxbit = max_t(u8, maxbit, entry->bit); + + hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit), + GFP_KERNEL); + if (!hw) + return -ENOMEM; + + platform_set_drvdata(pdev, hw); + + spin_lock_init(&hw->lock); + + hw->data.num = maxbit; + for (i = 0; i < maxbit; i++) + hw->data.hws[i] = ERR_PTR(-ENODEV); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hw->regs = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(hw->regs)) + return PTR_ERR(hw->regs); + + for (entry = table; entry->name; entry++) { + struct clk_hw *clk; + + clk = clk_hw_register_gate(&pdev->dev, entry->name, NULL, + entry->flags, hw->regs, entry->bit, + CLK_GATE_BIG_ENDIAN, &hw->lock); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto out_err; + } + + hw->data.hws[entry->bit] = clk; + } + + ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, + &hw->data); + if (!ret) + return 0; +out_err: + for (i = 0; i < hw->data.num; i++) { + if (!IS_ERR(hw->data.hws[i])) + clk_hw_unregister_gate(hw->data.hws[i]); + } + + return ret; +} + +static int clk_bcm63xx_remove(struct platform_device *pdev) +{ + struct clk_bcm63xx_hw *hw = platform_get_drvdata(pdev); + int i; + + of_clk_del_provider(pdev->dev.of_node); + + for (i = 0; i < hw->data.num; i++) { + if (!IS_ERR(hw->data.hws[i])) + clk_hw_unregister_gate(hw->data.hws[i]); + } + + return 0; +} + +static const struct of_device_id clk_bcm63xx_dt_ids[] = { + { .compatible = "brcm,bcm3368-clocks", .data = &bcm3368_clocks, }, + { .compatible = "brcm,bcm6328-clocks", .data = &bcm6328_clocks, }, + { .compatible = "brcm,bcm6358-clocks", .data = &bcm6358_clocks, }, + { .compatible = "brcm,bcm6362-clocks", .data = &bcm6362_clocks, }, + { .compatible = "brcm,bcm6368-clocks", .data = &bcm6368_clocks, }, + { .compatible = "brcm,bcm63268-clocks", .data = &bcm63268_clocks, }, + { } +}; + +static struct platform_driver clk_bcm63xx = { + .probe = clk_bcm63xx_probe, + .remove = clk_bcm63xx_remove, + .driver = { + .name = "bcm63xx-clock", + .of_match_table = clk_bcm63xx_dt_ids, + }, +}; +builtin_platform_driver(clk_bcm63xx); diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c new file mode 100644 index 000000000000..1654fd0eedc9 --- /dev/null +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Raspberry Pi driver for firmware controlled clocks + * + * Even though clk-bcm2835 provides an interface to the hardware registers for + * the system clocks we've had to factor out 'pllb' as the firmware 'owns' it. + * We're not allowed to change it directly as we might race with the + * over-temperature and under-voltage protections provided by the firmware. + * + * Copyright (C) 2019 Nicolas Saenz Julienne <nsaenzjulienne@suse.de> + */ + +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <soc/bcm2835/raspberrypi-firmware.h> + +#define RPI_FIRMWARE_ARM_CLK_ID 0x00000003 + +#define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0) +#define RPI_FIRMWARE_STATE_WAIT_BIT BIT(1) + +/* + * Even though the firmware interface alters 'pllb' the frequencies are + * provided as per 'pllb_arm'. We need to scale before passing them trough. + */ +#define RPI_FIRMWARE_PLLB_ARM_DIV_RATE 2 + +#define A2W_PLL_FRAC_BITS 20 + +struct raspberrypi_clk { + struct device *dev; + struct rpi_firmware *firmware; + struct platform_device *cpufreq; + + unsigned long min_rate; + unsigned long max_rate; + + struct clk_hw pllb; + struct clk_hw *pllb_arm; + struct clk_lookup *pllb_arm_lookup; +}; + +/* + * Structure of the message passed to Raspberry Pi's firmware in order to + * change clock rates. The 'disable_turbo' option is only available to the ARM + * clock (pllb) which we enable by default as turbo mode will alter multiple + * clocks at once. + * + * Even though we're able to access the clock registers directly we're bound to + * use the firmware interface as the firmware ultimately takes care of + * mitigating overheating/undervoltage situations and we would be changing + * frequencies behind his back. + * + * For more information on the firmware interface check: + * https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface + */ +struct raspberrypi_firmware_prop { + __le32 id; + __le32 val; + __le32 disable_turbo; +} __packed; + +static int raspberrypi_clock_property(struct rpi_firmware *firmware, u32 tag, + u32 clk, u32 *val) +{ + struct raspberrypi_firmware_prop msg = { + .id = cpu_to_le32(clk), + .val = cpu_to_le32(*val), + .disable_turbo = cpu_to_le32(1), + }; + int ret; + + ret = rpi_firmware_property(firmware, tag, &msg, sizeof(msg)); + if (ret) + return ret; + + *val = le32_to_cpu(msg.val); + + return 0; +} + +static int raspberrypi_fw_pll_is_on(struct clk_hw *hw) +{ + struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, + pllb); + u32 val = 0; + int ret; + + ret = raspberrypi_clock_property(rpi->firmware, + RPI_FIRMWARE_GET_CLOCK_STATE, + RPI_FIRMWARE_ARM_CLK_ID, &val); + if (ret) + return 0; + + return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT); +} + + +static unsigned long raspberrypi_fw_pll_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, + pllb); + u32 val = 0; + int ret; + + ret = raspberrypi_clock_property(rpi->firmware, + RPI_FIRMWARE_GET_CLOCK_RATE, + RPI_FIRMWARE_ARM_CLK_ID, + &val); + if (ret) + return ret; + + return val * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; +} + +static int raspberrypi_fw_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, + pllb); + u32 new_rate = rate / RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + int ret; + + ret = raspberrypi_clock_property(rpi->firmware, + RPI_FIRMWARE_SET_CLOCK_RATE, + RPI_FIRMWARE_ARM_CLK_ID, + &new_rate); + if (ret) + dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d", + clk_hw_get_name(hw), ret); + + return ret; +} + +/* + * Sadly there is no firmware rate rounding interface. We borrowed it from + * clk-bcm2835. + */ +static int raspberrypi_pll_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, + pllb); + u64 div, final_rate; + u32 ndiv, fdiv; + + /* We can't use req->rate directly as it would overflow */ + final_rate = clamp(req->rate, rpi->min_rate, rpi->max_rate); + + div = (u64)final_rate << A2W_PLL_FRAC_BITS; + do_div(div, req->best_parent_rate); + + ndiv = div >> A2W_PLL_FRAC_BITS; + fdiv = div & ((1 << A2W_PLL_FRAC_BITS) - 1); + + final_rate = ((u64)req->best_parent_rate * + ((ndiv << A2W_PLL_FRAC_BITS) + fdiv)); + + req->rate = final_rate >> A2W_PLL_FRAC_BITS; + + return 0; +} + +static const struct clk_ops raspberrypi_firmware_pll_clk_ops = { + .is_prepared = raspberrypi_fw_pll_is_on, + .recalc_rate = raspberrypi_fw_pll_get_rate, + .set_rate = raspberrypi_fw_pll_set_rate, + .determine_rate = raspberrypi_pll_determine_rate, +}; + +static int raspberrypi_register_pllb(struct raspberrypi_clk *rpi) +{ + u32 min_rate = 0, max_rate = 0; + struct clk_init_data init; + int ret; + + memset(&init, 0, sizeof(init)); + + /* All of the PLLs derive from the external oscillator. */ + init.parent_names = (const char *[]){ "osc" }; + init.num_parents = 1; + init.name = "pllb"; + init.ops = &raspberrypi_firmware_pll_clk_ops; + init.flags = CLK_GET_RATE_NOCACHE | CLK_IGNORE_UNUSED; + + /* Get min & max rates set by the firmware */ + ret = raspberrypi_clock_property(rpi->firmware, + RPI_FIRMWARE_GET_MIN_CLOCK_RATE, + RPI_FIRMWARE_ARM_CLK_ID, + &min_rate); + if (ret) { + dev_err(rpi->dev, "Failed to get %s min freq: %d\n", + init.name, ret); + return ret; + } + + ret = raspberrypi_clock_property(rpi->firmware, + RPI_FIRMWARE_GET_MAX_CLOCK_RATE, + RPI_FIRMWARE_ARM_CLK_ID, + &max_rate); + if (ret) { + dev_err(rpi->dev, "Failed to get %s max freq: %d\n", + init.name, ret); + return ret; + } + + if (!min_rate || !max_rate) { + dev_err(rpi->dev, "Unexpected frequency range: min %u, max %u\n", + min_rate, max_rate); + return -EINVAL; + } + + dev_info(rpi->dev, "CPU frequency range: min %u, max %u\n", + min_rate, max_rate); + + rpi->min_rate = min_rate * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + rpi->max_rate = max_rate * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + + rpi->pllb.init = &init; + + return devm_clk_hw_register(rpi->dev, &rpi->pllb); +} + +static int raspberrypi_register_pllb_arm(struct raspberrypi_clk *rpi) +{ + rpi->pllb_arm = clk_hw_register_fixed_factor(rpi->dev, + "pllb_arm", "pllb", + CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, + 1, 2); + if (IS_ERR(rpi->pllb_arm)) { + dev_err(rpi->dev, "Failed to initialize pllb_arm\n"); + return PTR_ERR(rpi->pllb_arm); + } + + rpi->pllb_arm_lookup = clkdev_hw_create(rpi->pllb_arm, NULL, "cpu0"); + if (!rpi->pllb_arm_lookup) { + dev_err(rpi->dev, "Failed to initialize pllb_arm_lookup\n"); + clk_hw_unregister_fixed_factor(rpi->pllb_arm); + return -ENOMEM; + } + + return 0; +} + +static int raspberrypi_clk_probe(struct platform_device *pdev) +{ + struct device_node *firmware_node; + struct device *dev = &pdev->dev; + struct rpi_firmware *firmware; + struct raspberrypi_clk *rpi; + int ret; + + firmware_node = of_find_compatible_node(NULL, NULL, + "raspberrypi,bcm2835-firmware"); + if (!firmware_node) { + dev_err(dev, "Missing firmware node\n"); + return -ENOENT; + } + + firmware = rpi_firmware_get(firmware_node); + of_node_put(firmware_node); + if (!firmware) + return -EPROBE_DEFER; + + rpi = devm_kzalloc(dev, sizeof(*rpi), GFP_KERNEL); + if (!rpi) + return -ENOMEM; + + rpi->dev = dev; + rpi->firmware = firmware; + platform_set_drvdata(pdev, rpi); + + ret = raspberrypi_register_pllb(rpi); + if (ret) { + dev_err(dev, "Failed to initialize pllb, %d\n", ret); + return ret; + } + + ret = raspberrypi_register_pllb_arm(rpi); + if (ret) + return ret; + + rpi->cpufreq = platform_device_register_data(dev, "raspberrypi-cpufreq", + -1, NULL, 0); + + return 0; +} + +static int raspberrypi_clk_remove(struct platform_device *pdev) +{ + struct raspberrypi_clk *rpi = platform_get_drvdata(pdev); + + platform_device_unregister(rpi->cpufreq); + + return 0; +} + +static struct platform_driver raspberrypi_clk_driver = { + .driver = { + .name = "raspberrypi-clk", + }, + .probe = raspberrypi_clk_probe, + .remove = raspberrypi_clk_remove, +}; +module_platform_driver(raspberrypi_clk_driver); + +MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>"); +MODULE_DESCRIPTION("Raspberry Pi firmware clock driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:raspberrypi-clk"); diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index 06499568cf07..524bf9a53098 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c @@ -75,8 +75,8 @@ void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) } EXPORT_SYMBOL_GPL(clk_bulk_put); -int __must_check clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) +static int __clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks, bool optional) { int ret; int i; @@ -88,10 +88,14 @@ int __must_check clk_bulk_get(struct device *dev, int num_clks, clks[i].clk = clk_get(dev, clks[i].id); if (IS_ERR(clks[i].clk)) { ret = PTR_ERR(clks[i].clk); + clks[i].clk = NULL; + + if (ret == -ENOENT && optional) + continue; + if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get clk '%s': %d\n", clks[i].id, ret); - clks[i].clk = NULL; goto err; } } @@ -103,8 +107,21 @@ err: return ret; } + +int __must_check clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return __clk_bulk_get(dev, num_clks, clks, false); +} EXPORT_SYMBOL(clk_bulk_get); +int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return __clk_bulk_get(dev, num_clks, clks, true); +} +EXPORT_SYMBOL_GPL(clk_bulk_get_optional); + void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) { if (IS_ERR_OR_NULL(clks)) diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index 0443dfc82794..239102e37e2f 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -630,7 +630,7 @@ of_clk_cdce_get(struct of_phandle_args *clkspec, void *data) static int cdce706_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; struct cdce706_dev_data *cdce; int ret; diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index daa1fc8fba53..be160764911b 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -52,8 +52,8 @@ static void devm_clk_bulk_release(struct device *dev, void *res) clk_bulk_put(devres->num_clks, devres->clks); } -int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, - struct clk_bulk_data *clks) +static int __devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks, bool optional) { struct clk_bulk_devres *devres; int ret; @@ -63,7 +63,10 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, if (!devres) return -ENOMEM; - ret = clk_bulk_get(dev, num_clks, clks); + if (optional) + ret = clk_bulk_get_optional(dev, num_clks, clks); + else + ret = clk_bulk_get(dev, num_clks, clks); if (!ret) { devres->clks = clks; devres->num_clks = num_clks; @@ -74,8 +77,21 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, return ret; } + +int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return __devm_clk_bulk_get(dev, num_clks, clks, false); +} EXPORT_SYMBOL_GPL(devm_clk_bulk_get); +int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, + struct clk_bulk_data *clks) +{ + return __devm_clk_bulk_get(dev, num_clks, clks, true); +} +EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional); + int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks) { diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c index a2f31e58ee48..fa8c91758b1d 100644 --- a/drivers/clk/clk-lochnagar.c +++ b/drivers/clk/clk-lochnagar.c @@ -16,7 +16,6 @@ #include <linux/platform_device.h> #include <linux/regmap.h> -#include <linux/mfd/lochnagar.h> #include <linux/mfd/lochnagar1_regs.h> #include <linux/mfd/lochnagar2_regs.h> @@ -40,48 +39,46 @@ struct lochnagar_clk { struct lochnagar_clk_priv { struct device *dev; struct regmap *regmap; - enum lochnagar_type type; - - const char **parents; - unsigned int nparents; struct lochnagar_clk lclks[LOCHNAGAR_NUM_CLOCKS]; }; -static const char * const lochnagar1_clk_parents[] = { - "ln-none", - "ln-spdif-mclk", - "ln-psia1-mclk", - "ln-psia2-mclk", - "ln-cdc-clkout", - "ln-dsp-clkout", - "ln-pmic-32k", - "ln-gf-mclk1", - "ln-gf-mclk3", - "ln-gf-mclk2", - "ln-gf-mclk4", +#define LN_PARENT(NAME) { .name = NAME, .fw_name = NAME } + +static const struct clk_parent_data lochnagar1_clk_parents[] = { + LN_PARENT("ln-none"), + LN_PARENT("ln-spdif-mclk"), + LN_PARENT("ln-psia1-mclk"), + LN_PARENT("ln-psia2-mclk"), + LN_PARENT("ln-cdc-clkout"), + LN_PARENT("ln-dsp-clkout"), + LN_PARENT("ln-pmic-32k"), + LN_PARENT("ln-gf-mclk1"), + LN_PARENT("ln-gf-mclk3"), + LN_PARENT("ln-gf-mclk2"), + LN_PARENT("ln-gf-mclk4"), }; -static const char * const lochnagar2_clk_parents[] = { - "ln-none", - "ln-cdc-clkout", - "ln-dsp-clkout", - "ln-pmic-32k", - "ln-spdif-mclk", - "ln-clk-12m", - "ln-clk-11m", - "ln-clk-24m", - "ln-clk-22m", - "ln-clk-8m", - "ln-usb-clk-24m", - "ln-gf-mclk1", - "ln-gf-mclk3", - "ln-gf-mclk2", - "ln-psia1-mclk", - "ln-psia2-mclk", - "ln-spdif-clkout", - "ln-adat-mclk", - "ln-usb-clk-12m", +static const struct clk_parent_data lochnagar2_clk_parents[] = { + LN_PARENT("ln-none"), + LN_PARENT("ln-cdc-clkout"), + LN_PARENT("ln-dsp-clkout"), + LN_PARENT("ln-pmic-32k"), + LN_PARENT("ln-spdif-mclk"), + LN_PARENT("ln-clk-12m"), + LN_PARENT("ln-clk-11m"), + LN_PARENT("ln-clk-24m"), + LN_PARENT("ln-clk-22m"), + LN_PARENT("ln-clk-8m"), + LN_PARENT("ln-usb-clk-24m"), + LN_PARENT("ln-gf-mclk1"), + LN_PARENT("ln-gf-mclk3"), + LN_PARENT("ln-gf-mclk2"), + LN_PARENT("ln-psia1-mclk"), + LN_PARENT("ln-psia2-mclk"), + LN_PARENT("ln-spdif-clkout"), + LN_PARENT("ln-adat-mclk"), + LN_PARENT("ln-usb-clk-12m"), }; #define LN1_CLK(ID, NAME, REG) \ @@ -122,6 +119,24 @@ static const struct lochnagar_clk lochnagar2_clks[LOCHNAGAR_NUM_CLOCKS] = { LN2_CLK(SOUNDCARD_MCLK, "ln-soundcard-mclk"), }; +struct lochnagar_config { + const struct clk_parent_data *parents; + int nparents; + const struct lochnagar_clk *clks; +}; + +static const struct lochnagar_config lochnagar1_conf = { + .parents = lochnagar1_clk_parents, + .nparents = ARRAY_SIZE(lochnagar1_clk_parents), + .clks = lochnagar1_clks, +}; + +static const struct lochnagar_config lochnagar2_conf = { + .parents = lochnagar2_clk_parents, + .nparents = ARRAY_SIZE(lochnagar2_clk_parents), + .clks = lochnagar2_clks, +}; + static inline struct lochnagar_clk *lochnagar_hw_to_lclk(struct clk_hw *hw) { return container_of(hw, struct lochnagar_clk, hw); @@ -183,7 +198,7 @@ static u8 lochnagar_clk_get_parent(struct clk_hw *hw) if (ret < 0) { dev_dbg(priv->dev, "Failed to read parent of %s: %d\n", lclk->name, ret); - return priv->nparents; + return hw->init->num_parents; } val &= lclk->src_mask; @@ -198,46 +213,6 @@ static const struct clk_ops lochnagar_clk_ops = { .get_parent = lochnagar_clk_get_parent, }; -static int lochnagar_init_parents(struct lochnagar_clk_priv *priv) -{ - struct device_node *np = priv->dev->of_node; - int i, j; - - switch (priv->type) { - case LOCHNAGAR1: - memcpy(priv->lclks, lochnagar1_clks, sizeof(lochnagar1_clks)); - - priv->nparents = ARRAY_SIZE(lochnagar1_clk_parents); - priv->parents = devm_kmemdup(priv->dev, lochnagar1_clk_parents, - sizeof(lochnagar1_clk_parents), - GFP_KERNEL); - break; - case LOCHNAGAR2: - memcpy(priv->lclks, lochnagar2_clks, sizeof(lochnagar2_clks)); - - priv->nparents = ARRAY_SIZE(lochnagar2_clk_parents); - priv->parents = devm_kmemdup(priv->dev, lochnagar2_clk_parents, - sizeof(lochnagar2_clk_parents), - GFP_KERNEL); - break; - default: - dev_err(priv->dev, "Unknown Lochnagar type: %d\n", priv->type); - return -EINVAL; - } - - if (!priv->parents) - return -ENOMEM; - - for (i = 0; i < priv->nparents; i++) { - j = of_property_match_string(np, "clock-names", - priv->parents[i]); - if (j >= 0) - priv->parents[i] = of_clk_get_parent_name(np, j); - } - - return 0; -} - static struct clk_hw * lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data) { @@ -252,16 +227,42 @@ lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data) return &priv->lclks[idx].hw; } -static int lochnagar_init_clks(struct lochnagar_clk_priv *priv) +static const struct of_device_id lochnagar_of_match[] = { + { .compatible = "cirrus,lochnagar1-clk", .data = &lochnagar1_conf }, + { .compatible = "cirrus,lochnagar2-clk", .data = &lochnagar2_conf }, + {} +}; +MODULE_DEVICE_TABLE(of, lochnagar_of_match); + +static int lochnagar_clk_probe(struct platform_device *pdev) { struct clk_init_data clk_init = { .ops = &lochnagar_clk_ops, - .parent_names = priv->parents, - .num_parents = priv->nparents, }; + struct device *dev = &pdev->dev; + struct lochnagar_clk_priv *priv; + const struct of_device_id *of_id; struct lochnagar_clk *lclk; + struct lochnagar_config *conf; int ret, i; + of_id = of_match_device(lochnagar_of_match, dev); + if (!of_id) + return -EINVAL; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->regmap = dev_get_regmap(dev->parent, NULL); + conf = (struct lochnagar_config *)of_id->data; + + memcpy(priv->lclks, conf->clks, sizeof(priv->lclks)); + + clk_init.parent_data = conf->parents; + clk_init.num_parents = conf->nparents; + for (i = 0; i < ARRAY_SIZE(priv->lclks); i++) { lclk = &priv->lclks[i]; @@ -273,55 +274,21 @@ static int lochnagar_init_clks(struct lochnagar_clk_priv *priv) lclk->priv = priv; lclk->hw.init = &clk_init; - ret = devm_clk_hw_register(priv->dev, &lclk->hw); + ret = devm_clk_hw_register(dev, &lclk->hw); if (ret) { - dev_err(priv->dev, "Failed to register %s: %d\n", + dev_err(dev, "Failed to register %s: %d\n", lclk->name, ret); return ret; } } - ret = devm_of_clk_add_hw_provider(priv->dev, lochnagar_of_clk_hw_get, - priv); + ret = devm_of_clk_add_hw_provider(dev, lochnagar_of_clk_hw_get, priv); if (ret < 0) - dev_err(priv->dev, "Failed to register provider: %d\n", ret); + dev_err(dev, "Failed to register provider: %d\n", ret); return ret; } -static const struct of_device_id lochnagar_of_match[] = { - { .compatible = "cirrus,lochnagar1-clk", .data = (void *)LOCHNAGAR1 }, - { .compatible = "cirrus,lochnagar2-clk", .data = (void *)LOCHNAGAR2 }, - {} -}; -MODULE_DEVICE_TABLE(of, lochnagar_of_match); - -static int lochnagar_clk_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct lochnagar_clk_priv *priv; - const struct of_device_id *of_id; - int ret; - - of_id = of_match_device(lochnagar_of_match, dev); - if (!of_id) - return -EINVAL; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->dev = dev; - priv->regmap = dev_get_regmap(dev->parent, NULL); - priv->type = (enum lochnagar_type)of_id->data; - - ret = lochnagar_init_parents(priv); - if (ret) - return ret; - - return lochnagar_init_clks(priv); -} - static struct platform_driver lochnagar_clk_driver = { .driver = { .name = "lochnagar-clk", diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c index 5f0490b8f6cb..87fe0b0e01a3 100644 --- a/drivers/clk/clk-pwm.c +++ b/drivers/clk/clk-pwm.c @@ -44,10 +44,24 @@ static unsigned long clk_pwm_recalc_rate(struct clk_hw *hw, return clk_pwm->fixed_rate; } +static int clk_pwm_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) +{ + struct clk_pwm *clk_pwm = to_clk_pwm(hw); + struct pwm_state state; + + pwm_get_state(clk_pwm->pwm, &state); + + duty->num = state.duty_cycle; + duty->den = state.period; + + return 0; +} + static const struct clk_ops clk_pwm_ops = { .prepare = clk_pwm_prepare, .unprepare = clk_pwm_unprepare, .recalc_rate = clk_pwm_recalc_rate, + .get_duty_cycle = clk_pwm_get_duty_cycle, }; static int clk_pwm_probe(struct platform_device *pdev) diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index dd93d3acc67d..07f3b252f3e0 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -635,6 +635,17 @@ static const struct clockgen_chipinfo chipinfo[] = { .flags = CG_VER3 | CG_LITTLE_ENDIAN, }, { + .compat = "fsl,lx2160a-clockgen", + .cmux_groups = { + &clockgen2_cmux_cga12, &clockgen2_cmux_cgb + }, + .cmux_to_group = { + 0, 0, 0, 0, 1, 1, 1, 1, -1 + }, + .pll_mask = 0x37, + .flags = CG_VER3 | CG_LITTLE_ENDIAN, + }, + { .compat = "fsl,p2041-clockgen", .guts_compat = "fsl,qoriq-device-config-1.0", .init_periph = p2041_init_periph, @@ -1493,6 +1504,7 @@ CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); +CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init); CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init); diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c new file mode 100644 index 000000000000..72424eb7e5f8 --- /dev/null +++ b/drivers/clk/clk-si5341.c @@ -0,0 +1,1346 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Silicon Labs Si5341/Si5340 Clock generator + * Copyright (C) 2019 Topic Embedded Products + * Author: Mike Looijmans <mike.looijmans@topic.nl> + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/gcd.h> +#include <linux/math64.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <asm/unaligned.h> + +#define SI5341_MAX_NUM_OUTPUTS 10 +#define SI5340_MAX_NUM_OUTPUTS 4 + +#define SI5341_NUM_SYNTH 5 +#define SI5340_NUM_SYNTH 4 + +/* Range of the synthesizer fractional divider */ +#define SI5341_SYNTH_N_MIN 10 +#define SI5341_SYNTH_N_MAX 4095 + +/* The chip can get its input clock from 3 input pins or an XTAL */ + +/* There is one PLL running at 13500–14256 MHz */ +#define SI5341_PLL_VCO_MIN 13500000000ull +#define SI5341_PLL_VCO_MAX 14256000000ull + +/* The 5 frequency synthesizers obtain their input from the PLL */ +struct clk_si5341_synth { + struct clk_hw hw; + struct clk_si5341 *data; + u8 index; +}; +#define to_clk_si5341_synth(_hw) \ + container_of(_hw, struct clk_si5341_synth, hw) + +/* The output stages can be connected to any synth (full mux) */ +struct clk_si5341_output { + struct clk_hw hw; + struct clk_si5341 *data; + u8 index; +}; +#define to_clk_si5341_output(_hw) \ + container_of(_hw, struct clk_si5341_output, hw) + +struct clk_si5341 { + struct clk_hw hw; + struct regmap *regmap; + struct i2c_client *i2c_client; + struct clk_si5341_synth synth[SI5341_NUM_SYNTH]; + struct clk_si5341_output clk[SI5341_MAX_NUM_OUTPUTS]; + struct clk *pxtal; + const char *pxtal_name; + const u16 *reg_output_offset; + const u16 *reg_rdiv_offset; + u64 freq_vco; /* 13500–14256 MHz */ + u8 num_outputs; + u8 num_synth; +}; +#define to_clk_si5341(_hw) container_of(_hw, struct clk_si5341, hw) + +struct clk_si5341_output_config { + u8 out_format_drv_bits; + u8 out_cm_ampl_bits; + bool synth_master; + bool always_on; +}; + +#define SI5341_PAGE 0x0001 +#define SI5341_PN_BASE 0x0002 +#define SI5341_DEVICE_REV 0x0005 +#define SI5341_STATUS 0x000C +#define SI5341_SOFT_RST 0x001C + +/* Input dividers (48-bit) */ +#define SI5341_IN_PDIV(x) (0x0208 + ((x) * 10)) +#define SI5341_IN_PSET(x) (0x020E + ((x) * 10)) + +/* PLL configuration */ +#define SI5341_PLL_M_NUM 0x0235 +#define SI5341_PLL_M_DEN 0x023B + +/* Output configuration */ +#define SI5341_OUT_CONFIG(output) \ + ((output)->data->reg_output_offset[(output)->index]) +#define SI5341_OUT_FORMAT(output) (SI5341_OUT_CONFIG(output) + 1) +#define SI5341_OUT_CM(output) (SI5341_OUT_CONFIG(output) + 2) +#define SI5341_OUT_MUX_SEL(output) (SI5341_OUT_CONFIG(output) + 3) +#define SI5341_OUT_R_REG(output) \ + ((output)->data->reg_rdiv_offset[(output)->index]) + +/* Synthesize N divider */ +#define SI5341_SYNTH_N_NUM(x) (0x0302 + ((x) * 11)) +#define SI5341_SYNTH_N_DEN(x) (0x0308 + ((x) * 11)) +#define SI5341_SYNTH_N_UPD(x) (0x030C + ((x) * 11)) + +/* Synthesizer output enable, phase bypass, power mode */ +#define SI5341_SYNTH_N_CLK_TO_OUTX_EN 0x0A03 +#define SI5341_SYNTH_N_PIBYP 0x0A04 +#define SI5341_SYNTH_N_PDNB 0x0A05 +#define SI5341_SYNTH_N_CLK_DIS 0x0B4A + +#define SI5341_REGISTER_MAX 0xBFF + +/* SI5341_OUT_CONFIG bits */ +#define SI5341_OUT_CFG_PDN BIT(0) +#define SI5341_OUT_CFG_OE BIT(1) +#define SI5341_OUT_CFG_RDIV_FORCE2 BIT(2) + +/* Static configuration (to be moved to firmware) */ +struct si5341_reg_default { + u16 address; + u8 value; +}; + +/* Output configuration registers 0..9 are not quite logically organized */ +static const u16 si5341_reg_output_offset[] = { + 0x0108, + 0x010D, + 0x0112, + 0x0117, + 0x011C, + 0x0121, + 0x0126, + 0x012B, + 0x0130, + 0x013A, +}; + +static const u16 si5340_reg_output_offset[] = { + 0x0112, + 0x0117, + 0x0126, + 0x012B, +}; + +/* The location of the R divider registers */ +static const u16 si5341_reg_rdiv_offset[] = { + 0x024A, + 0x024D, + 0x0250, + 0x0253, + 0x0256, + 0x0259, + 0x025C, + 0x025F, + 0x0262, + 0x0268, +}; +static const u16 si5340_reg_rdiv_offset[] = { + 0x0250, + 0x0253, + 0x025C, + 0x025F, +}; + +/* + * Programming sequence from ClockBuilder, settings to initialize the system + * using only the XTAL input, without pre-divider. + * This also contains settings that aren't mentioned anywhere in the datasheet. + * The "known" settings like synth and output configuration are done later. + */ +static const struct si5341_reg_default si5341_reg_defaults[] = { + { 0x0017, 0x3A }, /* INT mask (disable interrupts) */ + { 0x0018, 0xFF }, /* INT mask */ + { 0x0021, 0x0F }, /* Select XTAL as input */ + { 0x0022, 0x00 }, /* Not in datasheet */ + { 0x002B, 0x02 }, /* SPI config */ + { 0x002C, 0x20 }, /* LOS enable for XTAL */ + { 0x002D, 0x00 }, /* LOS timing */ + { 0x002E, 0x00 }, + { 0x002F, 0x00 }, + { 0x0030, 0x00 }, + { 0x0031, 0x00 }, + { 0x0032, 0x00 }, + { 0x0033, 0x00 }, + { 0x0034, 0x00 }, + { 0x0035, 0x00 }, + { 0x0036, 0x00 }, + { 0x0037, 0x00 }, + { 0x0038, 0x00 }, /* LOS setting (thresholds) */ + { 0x0039, 0x00 }, + { 0x003A, 0x00 }, + { 0x003B, 0x00 }, + { 0x003C, 0x00 }, + { 0x003D, 0x00 }, /* LOS setting (thresholds) end */ + { 0x0041, 0x00 }, /* LOS0_DIV_SEL */ + { 0x0042, 0x00 }, /* LOS1_DIV_SEL */ + { 0x0043, 0x00 }, /* LOS2_DIV_SEL */ + { 0x0044, 0x00 }, /* LOS3_DIV_SEL */ + { 0x009E, 0x00 }, /* Not in datasheet */ + { 0x0102, 0x01 }, /* Enable outputs */ + { 0x013F, 0x00 }, /* Not in datasheet */ + { 0x0140, 0x00 }, /* Not in datasheet */ + { 0x0141, 0x40 }, /* OUT LOS */ + { 0x0202, 0x00 }, /* XAXB_FREQ_OFFSET (=0)*/ + { 0x0203, 0x00 }, + { 0x0204, 0x00 }, + { 0x0205, 0x00 }, + { 0x0206, 0x00 }, /* PXAXB (2^x) */ + { 0x0208, 0x00 }, /* Px divider setting (usually 0) */ + { 0x0209, 0x00 }, + { 0x020A, 0x00 }, + { 0x020B, 0x00 }, + { 0x020C, 0x00 }, + { 0x020D, 0x00 }, + { 0x020E, 0x00 }, + { 0x020F, 0x00 }, + { 0x0210, 0x00 }, + { 0x0211, 0x00 }, + { 0x0212, 0x00 }, + { 0x0213, 0x00 }, + { 0x0214, 0x00 }, + { 0x0215, 0x00 }, + { 0x0216, 0x00 }, + { 0x0217, 0x00 }, + { 0x0218, 0x00 }, + { 0x0219, 0x00 }, + { 0x021A, 0x00 }, + { 0x021B, 0x00 }, + { 0x021C, 0x00 }, + { 0x021D, 0x00 }, + { 0x021E, 0x00 }, + { 0x021F, 0x00 }, + { 0x0220, 0x00 }, + { 0x0221, 0x00 }, + { 0x0222, 0x00 }, + { 0x0223, 0x00 }, + { 0x0224, 0x00 }, + { 0x0225, 0x00 }, + { 0x0226, 0x00 }, + { 0x0227, 0x00 }, + { 0x0228, 0x00 }, + { 0x0229, 0x00 }, + { 0x022A, 0x00 }, + { 0x022B, 0x00 }, + { 0x022C, 0x00 }, + { 0x022D, 0x00 }, + { 0x022E, 0x00 }, + { 0x022F, 0x00 }, /* Px divider setting (usually 0) end */ + { 0x026B, 0x00 }, /* DESIGN_ID (ASCII string) */ + { 0x026C, 0x00 }, + { 0x026D, 0x00 }, + { 0x026E, 0x00 }, + { 0x026F, 0x00 }, + { 0x0270, 0x00 }, + { 0x0271, 0x00 }, + { 0x0272, 0x00 }, /* DESIGN_ID (ASCII string) end */ + { 0x0339, 0x1F }, /* N_FSTEP_MSK */ + { 0x033B, 0x00 }, /* Nx_FSTEPW (Frequency step) */ + { 0x033C, 0x00 }, + { 0x033D, 0x00 }, + { 0x033E, 0x00 }, + { 0x033F, 0x00 }, + { 0x0340, 0x00 }, + { 0x0341, 0x00 }, + { 0x0342, 0x00 }, + { 0x0343, 0x00 }, + { 0x0344, 0x00 }, + { 0x0345, 0x00 }, + { 0x0346, 0x00 }, + { 0x0347, 0x00 }, + { 0x0348, 0x00 }, + { 0x0349, 0x00 }, + { 0x034A, 0x00 }, + { 0x034B, 0x00 }, + { 0x034C, 0x00 }, + { 0x034D, 0x00 }, + { 0x034E, 0x00 }, + { 0x034F, 0x00 }, + { 0x0350, 0x00 }, + { 0x0351, 0x00 }, + { 0x0352, 0x00 }, + { 0x0353, 0x00 }, + { 0x0354, 0x00 }, + { 0x0355, 0x00 }, + { 0x0356, 0x00 }, + { 0x0357, 0x00 }, + { 0x0358, 0x00 }, /* Nx_FSTEPW (Frequency step) end */ + { 0x0359, 0x00 }, /* Nx_DELAY */ + { 0x035A, 0x00 }, + { 0x035B, 0x00 }, + { 0x035C, 0x00 }, + { 0x035D, 0x00 }, + { 0x035E, 0x00 }, + { 0x035F, 0x00 }, + { 0x0360, 0x00 }, + { 0x0361, 0x00 }, + { 0x0362, 0x00 }, /* Nx_DELAY end */ + { 0x0802, 0x00 }, /* Not in datasheet */ + { 0x0803, 0x00 }, /* Not in datasheet */ + { 0x0804, 0x00 }, /* Not in datasheet */ + { 0x090E, 0x02 }, /* XAXB_EXTCLK_EN=0 XAXB_PDNB=1 (use XTAL) */ + { 0x091C, 0x04 }, /* ZDM_EN=4 (Normal mode) */ + { 0x0943, 0x00 }, /* IO_VDD_SEL=0 (0=1v8, use 1=3v3) */ + { 0x0949, 0x00 }, /* IN_EN (disable input clocks) */ + { 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */ + { 0x0A02, 0x00 }, /* Not in datasheet */ + { 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */ +}; + +/* Read and interpret a 44-bit followed by a 32-bit value in the regmap */ +static int si5341_decode_44_32(struct regmap *regmap, unsigned int reg, + u64 *val1, u32 *val2) +{ + int err; + u8 r[10]; + + err = regmap_bulk_read(regmap, reg, r, 10); + if (err < 0) + return err; + + *val1 = ((u64)((r[5] & 0x0f) << 8 | r[4]) << 32) | + (get_unaligned_le32(r)); + *val2 = get_unaligned_le32(&r[6]); + + return 0; +} + +static int si5341_encode_44_32(struct regmap *regmap, unsigned int reg, + u64 n_num, u32 n_den) +{ + u8 r[10]; + + /* Shift left as far as possible without overflowing */ + while (!(n_num & BIT_ULL(43)) && !(n_den & BIT(31))) { + n_num <<= 1; + n_den <<= 1; + } + + /* 44 bits (6 bytes) numerator */ + put_unaligned_le32(n_num, r); + r[4] = (n_num >> 32) & 0xff; + r[5] = (n_num >> 40) & 0x0f; + /* 32 bits denominator */ + put_unaligned_le32(n_den, &r[6]); + + /* Program the fraction */ + return regmap_bulk_write(regmap, reg, r, sizeof(r)); +} + +/* VCO, we assume it runs at a constant frequency */ +static unsigned long si5341_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_si5341 *data = to_clk_si5341(hw); + int err; + u64 res; + u64 m_num; + u32 m_den; + unsigned int shift; + + /* Assume that PDIV is not being used, just read the PLL setting */ + err = si5341_decode_44_32(data->regmap, SI5341_PLL_M_NUM, + &m_num, &m_den); + if (err < 0) + return 0; + + if (!m_num || !m_den) + return 0; + + /* + * Though m_num is 64-bit, only the upper bits are actually used. While + * calculating m_num and m_den, they are shifted as far as possible to + * the left. To avoid 96-bit division here, we just shift them back so + * we can do with just 64 bits. + */ + shift = 0; + res = m_num; + while (res & 0xffff00000000ULL) { + ++shift; + res >>= 1; + } + res *= parent_rate; + do_div(res, (m_den >> shift)); + + /* We cannot return the actual frequency in 32 bit, store it locally */ + data->freq_vco = res; + + /* Report kHz since the value is out of range */ + do_div(res, 1000); + + return (unsigned long)res; +} + +static const struct clk_ops si5341_clk_ops = { + .recalc_rate = si5341_clk_recalc_rate, +}; + +/* Synthesizers, there are 5 synthesizers that connect to any of the outputs */ + +/* The synthesizer is on if all power and enable bits are set */ +static int si5341_synth_clk_is_on(struct clk_hw *hw) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + int err; + u32 val; + u8 index = synth->index; + + err = regmap_read(synth->data->regmap, + SI5341_SYNTH_N_CLK_TO_OUTX_EN, &val); + if (err < 0) + return 0; + + if (!(val & BIT(index))) + return 0; + + err = regmap_read(synth->data->regmap, SI5341_SYNTH_N_PDNB, &val); + if (err < 0) + return 0; + + if (!(val & BIT(index))) + return 0; + + /* This bit must be 0 for the synthesizer to receive clock input */ + err = regmap_read(synth->data->regmap, SI5341_SYNTH_N_CLK_DIS, &val); + if (err < 0) + return 0; + + return !(val & BIT(index)); +} + +static void si5341_synth_clk_unprepare(struct clk_hw *hw) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + u8 index = synth->index; /* In range 0..5 */ + u8 mask = BIT(index); + + /* Disable output */ + regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_CLK_TO_OUTX_EN, mask, 0); + /* Power down */ + regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_PDNB, mask, 0); + /* Disable clock input to synth (set to 1 to disable) */ + regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_CLK_DIS, mask, mask); +} + +static int si5341_synth_clk_prepare(struct clk_hw *hw) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + int err; + u8 index = synth->index; + u8 mask = BIT(index); + + /* Power up */ + err = regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_PDNB, mask, mask); + if (err < 0) + return err; + + /* Enable clock input to synth (set bit to 0 to enable) */ + err = regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_CLK_DIS, mask, 0); + if (err < 0) + return err; + + /* Enable output */ + return regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_CLK_TO_OUTX_EN, mask, mask); +} + +/* Synth clock frequency: Fvco * n_den / n_den, with Fvco in 13500-14256 MHz */ +static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + u64 f; + u64 n_num; + u32 n_den; + int err; + + err = si5341_decode_44_32(synth->data->regmap, + SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den); + if (err < 0) + return err; + + /* + * n_num and n_den are shifted left as much as possible, so to prevent + * overflow in 64-bit math, we shift n_den 4 bits to the right + */ + f = synth->data->freq_vco; + f *= n_den >> 4; + + /* Now we need to to 64-bit division: f/n_num */ + /* And compensate for the 4 bits we dropped */ + f = div64_u64(f, (n_num >> 4)); + + return f; +} + +static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + u64 f; + + /* The synthesizer accuracy is such that anything in range will work */ + f = synth->data->freq_vco; + do_div(f, SI5341_SYNTH_N_MAX); + if (rate < f) + return f; + + f = synth->data->freq_vco; + do_div(f, SI5341_SYNTH_N_MIN); + if (rate > f) + return f; + + return rate; +} + +static int si5341_synth_program(struct clk_si5341_synth *synth, + u64 n_num, u32 n_den, bool is_integer) +{ + int err; + u8 index = synth->index; + + err = si5341_encode_44_32(synth->data->regmap, + SI5341_SYNTH_N_NUM(index), n_num, n_den); + + err = regmap_update_bits(synth->data->regmap, + SI5341_SYNTH_N_PIBYP, BIT(index), is_integer ? BIT(index) : 0); + if (err < 0) + return err; + + return regmap_write(synth->data->regmap, + SI5341_SYNTH_N_UPD(index), 0x01); +} + + +static int si5341_synth_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_si5341_synth *synth = to_clk_si5341_synth(hw); + u64 n_num; + u32 n_den; + u32 r; + u32 g; + bool is_integer; + + n_num = synth->data->freq_vco; + n_den = rate; + + /* see if there's an integer solution */ + r = do_div(n_num, rate); + is_integer = (r == 0); + if (is_integer) { + /* Integer divider equal to n_num */ + n_den = 1; + } else { + /* Calculate a fractional solution */ + g = gcd(r, rate); + n_den = rate / g; + n_num *= n_den; + n_num += r / g; + } + + dev_dbg(&synth->data->i2c_client->dev, + "%s(%u): n=0x%llx d=0x%x %s\n", __func__, + synth->index, n_num, n_den, + is_integer ? "int" : "frac"); + + return si5341_synth_program(synth, n_num, n_den, is_integer); +} + +static const struct clk_ops si5341_synth_clk_ops = { + .is_prepared = si5341_synth_clk_is_on, + .prepare = si5341_synth_clk_prepare, + .unprepare = si5341_synth_clk_unprepare, + .recalc_rate = si5341_synth_clk_recalc_rate, + .round_rate = si5341_synth_clk_round_rate, + .set_rate = si5341_synth_clk_set_rate, +}; + +static int si5341_output_clk_is_on(struct clk_hw *hw) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + int err; + u32 val; + + err = regmap_read(output->data->regmap, + SI5341_OUT_CONFIG(output), &val); + if (err < 0) + return err; + + /* Bit 0=PDN, 1=OE so only a value of 0x2 enables the output */ + return (val & 0x03) == SI5341_OUT_CFG_OE; +} + +/* Disables and then powers down the output */ +static void si5341_output_clk_unprepare(struct clk_hw *hw) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + + regmap_update_bits(output->data->regmap, + SI5341_OUT_CONFIG(output), + SI5341_OUT_CFG_OE, 0); + regmap_update_bits(output->data->regmap, + SI5341_OUT_CONFIG(output), + SI5341_OUT_CFG_PDN, SI5341_OUT_CFG_PDN); +} + +/* Powers up and then enables the output */ +static int si5341_output_clk_prepare(struct clk_hw *hw) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + int err; + + err = regmap_update_bits(output->data->regmap, + SI5341_OUT_CONFIG(output), + SI5341_OUT_CFG_PDN, 0); + if (err < 0) + return err; + + return regmap_update_bits(output->data->regmap, + SI5341_OUT_CONFIG(output), + SI5341_OUT_CFG_OE, SI5341_OUT_CFG_OE); +} + +static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + int err; + u32 val; + u32 r_divider; + u8 r[3]; + + err = regmap_bulk_read(output->data->regmap, + SI5341_OUT_R_REG(output), r, 3); + if (err < 0) + return err; + + /* Calculate value as 24-bit integer*/ + r_divider = r[2] << 16 | r[1] << 8 | r[0]; + + /* If Rx_REG is zero, the divider is disabled, so return a "0" rate */ + if (!r_divider) + return 0; + + /* Divider is 2*(Rx_REG+1) */ + r_divider += 1; + r_divider <<= 1; + + err = regmap_read(output->data->regmap, + SI5341_OUT_CONFIG(output), &val); + if (err < 0) + return err; + + if (val & SI5341_OUT_CFG_RDIV_FORCE2) + r_divider = 2; + + return parent_rate / r_divider; +} + +static long si5341_output_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long r; + + r = *parent_rate >> 1; + + /* If rate is an even divisor, no changes to parent required */ + if (r && !(r % rate)) + return (long)rate; + + if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) { + if (rate > 200000000) { + /* minimum r-divider is 2 */ + r = 2; + } else { + /* Take a parent frequency near 400 MHz */ + r = (400000000u / rate) & ~1; + } + *parent_rate = r * rate; + } else { + /* We cannot change our parent's rate, report what we can do */ + r /= rate; + rate = *parent_rate / (r << 1); + } + + return rate; +} + +static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + /* Frequency divider is (r_div + 1) * 2 */ + u32 r_div = (parent_rate / rate) >> 1; + int err; + u8 r[3]; + + if (r_div <= 1) + r_div = 0; + else if (r_div >= BIT(24)) + r_div = BIT(24) - 1; + else + --r_div; + + /* For a value of "2", we set the "OUT0_RDIV_FORCE2" bit */ + err = regmap_update_bits(output->data->regmap, + SI5341_OUT_CONFIG(output), + SI5341_OUT_CFG_RDIV_FORCE2, + (r_div == 0) ? SI5341_OUT_CFG_RDIV_FORCE2 : 0); + if (err < 0) + return err; + + /* Always write Rx_REG, because a zero value disables the divider */ + r[0] = r_div ? (r_div & 0xff) : 1; + r[1] = (r_div >> 8) & 0xff; + r[2] = (r_div >> 16) & 0xff; + err = regmap_bulk_write(output->data->regmap, + SI5341_OUT_R_REG(output), r, 3); + + return 0; +} + +static int si5341_output_reparent(struct clk_si5341_output *output, u8 index) +{ + return regmap_update_bits(output->data->regmap, + SI5341_OUT_MUX_SEL(output), 0x07, index); +} + +static int si5341_output_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + + if (index >= output->data->num_synth) + return -EINVAL; + + return si5341_output_reparent(output, index); +} + +static u8 si5341_output_get_parent(struct clk_hw *hw) +{ + struct clk_si5341_output *output = to_clk_si5341_output(hw); + int err; + u32 val; + + err = regmap_read(output->data->regmap, + SI5341_OUT_MUX_SEL(output), &val); + + return val & 0x7; +} + +static const struct clk_ops si5341_output_clk_ops = { + .is_prepared = si5341_output_clk_is_on, + .prepare = si5341_output_clk_prepare, + .unprepare = si5341_output_clk_unprepare, + .recalc_rate = si5341_output_clk_recalc_rate, + .round_rate = si5341_output_clk_round_rate, + .set_rate = si5341_output_clk_set_rate, + .set_parent = si5341_output_set_parent, + .get_parent = si5341_output_get_parent, +}; + +/* + * The chip can be bought in a pre-programmed version, or one can program the + * NVM in the chip to boot up in a preset mode. This routine tries to determine + * if that's the case, or if we need to reset and program everything from + * scratch. Returns negative error, or true/false. + */ +static int si5341_is_programmed_already(struct clk_si5341 *data) +{ + int err; + u8 r[4]; + + /* Read the PLL divider value, it must have a non-zero value */ + err = regmap_bulk_read(data->regmap, SI5341_PLL_M_DEN, + r, ARRAY_SIZE(r)); + if (err < 0) + return err; + + return !!get_unaligned_le32(r); +} + +static struct clk_hw * +of_clk_si5341_get(struct of_phandle_args *clkspec, void *_data) +{ + struct clk_si5341 *data = _data; + unsigned int idx = clkspec->args[1]; + unsigned int group = clkspec->args[0]; + + switch (group) { + case 0: + if (idx >= data->num_outputs) { + dev_err(&data->i2c_client->dev, + "invalid output index %u\n", idx); + return ERR_PTR(-EINVAL); + } + return &data->clk[idx].hw; + case 1: + if (idx >= data->num_synth) { + dev_err(&data->i2c_client->dev, + "invalid synthesizer index %u\n", idx); + return ERR_PTR(-EINVAL); + } + return &data->synth[idx].hw; + case 2: + if (idx > 0) { + dev_err(&data->i2c_client->dev, + "invalid PLL index %u\n", idx); + return ERR_PTR(-EINVAL); + } + return &data->hw; + default: + dev_err(&data->i2c_client->dev, "invalid group %u\n", group); + return ERR_PTR(-EINVAL); + } +} + +static int si5341_probe_chip_id(struct clk_si5341 *data) +{ + int err; + u8 reg[4]; + u16 model; + + err = regmap_bulk_read(data->regmap, SI5341_PN_BASE, reg, + ARRAY_SIZE(reg)); + if (err < 0) { + dev_err(&data->i2c_client->dev, "Failed to read chip ID\n"); + return err; + } + + model = get_unaligned_le16(reg); + + dev_info(&data->i2c_client->dev, "Chip: %x Grade: %u Rev: %u\n", + model, reg[2], reg[3]); + + switch (model) { + case 0x5340: + data->num_outputs = SI5340_MAX_NUM_OUTPUTS; + data->num_synth = SI5340_NUM_SYNTH; + data->reg_output_offset = si5340_reg_output_offset; + data->reg_rdiv_offset = si5340_reg_rdiv_offset; + break; + case 0x5341: + data->num_outputs = SI5341_MAX_NUM_OUTPUTS; + data->num_synth = SI5341_NUM_SYNTH; + data->reg_output_offset = si5341_reg_output_offset; + data->reg_rdiv_offset = si5341_reg_rdiv_offset; + break; + default: + dev_err(&data->i2c_client->dev, "Model '%x' not supported\n", + model); + return -EINVAL; + } + + return 0; +} + +/* Read active settings into the regmap cache for later reference */ +static int si5341_read_settings(struct clk_si5341 *data) +{ + int err; + u8 i; + u8 r[10]; + + err = regmap_bulk_read(data->regmap, SI5341_PLL_M_NUM, r, 10); + if (err < 0) + return err; + + err = regmap_bulk_read(data->regmap, + SI5341_SYNTH_N_CLK_TO_OUTX_EN, r, 3); + if (err < 0) + return err; + + err = regmap_bulk_read(data->regmap, + SI5341_SYNTH_N_CLK_DIS, r, 1); + if (err < 0) + return err; + + for (i = 0; i < data->num_synth; ++i) { + err = regmap_bulk_read(data->regmap, + SI5341_SYNTH_N_NUM(i), r, 10); + if (err < 0) + return err; + } + + for (i = 0; i < data->num_outputs; ++i) { + err = regmap_bulk_read(data->regmap, + data->reg_output_offset[i], r, 4); + if (err < 0) + return err; + + err = regmap_bulk_read(data->regmap, + data->reg_rdiv_offset[i], r, 3); + if (err < 0) + return err; + } + + return 0; +} + +static int si5341_write_multiple(struct clk_si5341 *data, + const struct si5341_reg_default *values, unsigned int num_values) +{ + unsigned int i; + int res; + + for (i = 0; i < num_values; ++i) { + res = regmap_write(data->regmap, + values[i].address, values[i].value); + if (res < 0) { + dev_err(&data->i2c_client->dev, + "Failed to write %#x:%#x\n", + values[i].address, values[i].value); + return res; + } + } + + return 0; +} + +static const struct si5341_reg_default si5341_preamble[] = { + { 0x0B25, 0x00 }, + { 0x0502, 0x01 }, + { 0x0505, 0x03 }, + { 0x0957, 0x1F }, + { 0x0B4E, 0x1A }, +}; + +static int si5341_send_preamble(struct clk_si5341 *data) +{ + int res; + u32 revision; + + /* For revision 2 and up, the values are slightly different */ + res = regmap_read(data->regmap, SI5341_DEVICE_REV, &revision); + if (res < 0) + return res; + + /* Write "preamble" as specified by datasheet */ + res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xD8 : 0xC0); + if (res < 0) + return res; + res = si5341_write_multiple(data, + si5341_preamble, ARRAY_SIZE(si5341_preamble)); + if (res < 0) + return res; + + /* Datasheet specifies a 300ms wait after sending the preamble */ + msleep(300); + + return 0; +} + +/* Perform a soft reset and write post-amble */ +static int si5341_finalize_defaults(struct clk_si5341 *data) +{ + int res; + u32 revision; + + res = regmap_read(data->regmap, SI5341_DEVICE_REV, &revision); + if (res < 0) + return res; + + dev_dbg(&data->i2c_client->dev, "%s rev=%u\n", __func__, revision); + + res = regmap_write(data->regmap, SI5341_SOFT_RST, 0x01); + if (res < 0) + return res; + + /* Datasheet does not explain these nameless registers */ + res = regmap_write(data->regmap, 0xB24, revision < 2 ? 0xDB : 0xC3); + if (res < 0) + return res; + res = regmap_write(data->regmap, 0x0B25, 0x02); + if (res < 0) + return res; + + return 0; +} + + +static const struct regmap_range si5341_regmap_volatile_range[] = { + regmap_reg_range(0x000C, 0x0012), /* Status */ + regmap_reg_range(0x001C, 0x001E), /* reset, finc/fdec */ + regmap_reg_range(0x00E2, 0x00FE), /* NVM, interrupts, device ready */ + /* Update bits for synth config */ + regmap_reg_range(SI5341_SYNTH_N_UPD(0), SI5341_SYNTH_N_UPD(0)), + regmap_reg_range(SI5341_SYNTH_N_UPD(1), SI5341_SYNTH_N_UPD(1)), + regmap_reg_range(SI5341_SYNTH_N_UPD(2), SI5341_SYNTH_N_UPD(2)), + regmap_reg_range(SI5341_SYNTH_N_UPD(3), SI5341_SYNTH_N_UPD(3)), + regmap_reg_range(SI5341_SYNTH_N_UPD(4), SI5341_SYNTH_N_UPD(4)), +}; + +static const struct regmap_access_table si5341_regmap_volatile = { + .yes_ranges = si5341_regmap_volatile_range, + .n_yes_ranges = ARRAY_SIZE(si5341_regmap_volatile_range), +}; + +/* Pages 0, 1, 2, 3, 9, A, B are valid, so there are 12 pages */ +static const struct regmap_range_cfg si5341_regmap_ranges[] = { + { + .range_min = 0, + .range_max = SI5341_REGISTER_MAX, + .selector_reg = SI5341_PAGE, + .selector_mask = 0xff, + .selector_shift = 0, + .window_start = 0, + .window_len = 256, + }, +}; + +static const struct regmap_config si5341_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .ranges = si5341_regmap_ranges, + .num_ranges = ARRAY_SIZE(si5341_regmap_ranges), + .max_register = SI5341_REGISTER_MAX, + .volatile_table = &si5341_regmap_volatile, +}; + +static int si5341_dt_parse_dt(struct i2c_client *client, + struct clk_si5341_output_config *config) +{ + struct device_node *child; + struct device_node *np = client->dev.of_node; + u32 num; + u32 val; + + memset(config, 0, sizeof(struct clk_si5341_output_config) * + SI5341_MAX_NUM_OUTPUTS); + + for_each_child_of_node(np, child) { + if (of_property_read_u32(child, "reg", &num)) { + dev_err(&client->dev, "missing reg property of %s\n", + child->name); + goto put_child; + } + + if (num >= SI5341_MAX_NUM_OUTPUTS) { + dev_err(&client->dev, "invalid clkout %d\n", num); + goto put_child; + } + + if (!of_property_read_u32(child, "silabs,format", &val)) { + /* Set cm and ampl conservatively to 3v3 settings */ + switch (val) { + case 1: /* normal differential */ + config[num].out_cm_ampl_bits = 0x33; + break; + case 2: /* low-power differential */ + config[num].out_cm_ampl_bits = 0x13; + break; + case 4: /* LVCMOS */ + config[num].out_cm_ampl_bits = 0x33; + /* Set SI recommended impedance for LVCMOS */ + config[num].out_format_drv_bits |= 0xc0; + break; + default: + dev_err(&client->dev, + "invalid silabs,format %u for %u\n", + val, num); + goto put_child; + } + config[num].out_format_drv_bits &= ~0x07; + config[num].out_format_drv_bits |= val & 0x07; + /* Always enable the SYNC feature */ + config[num].out_format_drv_bits |= 0x08; + } + + if (!of_property_read_u32(child, "silabs,common-mode", &val)) { + if (val > 0xf) { + dev_err(&client->dev, + "invalid silabs,common-mode %u\n", + val); + goto put_child; + } + config[num].out_cm_ampl_bits &= 0xf0; + config[num].out_cm_ampl_bits |= val & 0x0f; + } + + if (!of_property_read_u32(child, "silabs,amplitude", &val)) { + if (val > 0xf) { + dev_err(&client->dev, + "invalid silabs,amplitude %u\n", + val); + goto put_child; + } + config[num].out_cm_ampl_bits &= 0x0f; + config[num].out_cm_ampl_bits |= (val << 4) & 0xf0; + } + + if (of_property_read_bool(child, "silabs,disable-high")) + config[num].out_format_drv_bits |= 0x10; + + config[num].synth_master = + of_property_read_bool(child, "silabs,synth-master"); + + config[num].always_on = + of_property_read_bool(child, "always-on"); + } + + return 0; + +put_child: + of_node_put(child); + return -EINVAL; +} + +/* + * If not pre-configured, calculate and set the PLL configuration manually. + * For low-jitter performance, the PLL should be set such that the synthesizers + * only need integer division. + * Without any user guidance, we'll set the PLL to 14GHz, which still allows + * the chip to generate any frequency on its outputs, but jitter performance + * may be sub-optimal. + */ +static int si5341_initialize_pll(struct clk_si5341 *data) +{ + struct device_node *np = data->i2c_client->dev.of_node; + u32 m_num = 0; + u32 m_den = 0; + + if (of_property_read_u32(np, "silabs,pll-m-num", &m_num)) { + dev_err(&data->i2c_client->dev, + "PLL configuration requires silabs,pll-m-num\n"); + } + if (of_property_read_u32(np, "silabs,pll-m-den", &m_den)) { + dev_err(&data->i2c_client->dev, + "PLL configuration requires silabs,pll-m-den\n"); + } + + if (!m_num || !m_den) { + dev_err(&data->i2c_client->dev, + "PLL configuration invalid, assume 14GHz\n"); + m_den = clk_get_rate(data->pxtal) / 10; + m_num = 1400000000; + } + + return si5341_encode_44_32(data->regmap, + SI5341_PLL_M_NUM, m_num, m_den); +} + +static int si5341_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct clk_si5341 *data; + struct clk_init_data init; + const char *root_clock_name; + const char *synth_clock_names[SI5341_NUM_SYNTH]; + int err; + unsigned int i; + struct clk_si5341_output_config config[SI5341_MAX_NUM_OUTPUTS]; + bool initialization_required; + + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->i2c_client = client; + + data->pxtal = devm_clk_get(&client->dev, "xtal"); + if (IS_ERR(data->pxtal)) { + if (PTR_ERR(data->pxtal) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + dev_err(&client->dev, "Missing xtal clock input\n"); + } + + err = si5341_dt_parse_dt(client, config); + if (err) + return err; + + if (of_property_read_string(client->dev.of_node, "clock-output-names", + &init.name)) + init.name = client->dev.of_node->name; + root_clock_name = init.name; + + data->regmap = devm_regmap_init_i2c(client, &si5341_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); + + i2c_set_clientdata(client, data); + + err = si5341_probe_chip_id(data); + if (err < 0) + return err; + + /* "Activate" the xtal (usually a fixed clock) */ + clk_prepare_enable(data->pxtal); + + if (of_property_read_bool(client->dev.of_node, "silabs,reprogram")) { + initialization_required = true; + } else { + err = si5341_is_programmed_already(data); + if (err < 0) + return err; + + initialization_required = !err; + } + + if (initialization_required) { + /* Populate the regmap cache in preparation for "cache only" */ + err = si5341_read_settings(data); + if (err < 0) + return err; + + err = si5341_send_preamble(data); + if (err < 0) + return err; + + /* + * We intend to send all 'final' register values in a single + * transaction. So cache all register writes until we're done + * configuring. + */ + regcache_cache_only(data->regmap, true); + + /* Write the configuration pairs from the firmware blob */ + err = si5341_write_multiple(data, si5341_reg_defaults, + ARRAY_SIZE(si5341_reg_defaults)); + if (err < 0) + return err; + + /* PLL configuration is required */ + err = si5341_initialize_pll(data); + if (err < 0) + return err; + } + + /* Register the PLL */ + data->pxtal_name = __clk_get_name(data->pxtal); + init.parent_names = &data->pxtal_name; + init.num_parents = 1; /* For now, only XTAL input supported */ + init.ops = &si5341_clk_ops; + init.flags = 0; + data->hw.init = &init; + + err = devm_clk_hw_register(&client->dev, &data->hw); + if (err) { + dev_err(&client->dev, "clock registration failed\n"); + return err; + } + + init.num_parents = 1; + init.parent_names = &root_clock_name; + init.ops = &si5341_synth_clk_ops; + for (i = 0; i < data->num_synth; ++i) { + synth_clock_names[i] = devm_kasprintf(&client->dev, GFP_KERNEL, + "%s.N%u", client->dev.of_node->name, i); + init.name = synth_clock_names[i]; + data->synth[i].index = i; + data->synth[i].data = data; + data->synth[i].hw.init = &init; + err = devm_clk_hw_register(&client->dev, &data->synth[i].hw); + if (err) { + dev_err(&client->dev, + "synth N%u registration failed\n", i); + } + } + + init.num_parents = data->num_synth; + init.parent_names = synth_clock_names; + init.ops = &si5341_output_clk_ops; + for (i = 0; i < data->num_outputs; ++i) { + init.name = kasprintf(GFP_KERNEL, "%s.%d", + client->dev.of_node->name, i); + init.flags = config[i].synth_master ? CLK_SET_RATE_PARENT : 0; + data->clk[i].index = i; + data->clk[i].data = data; + data->clk[i].hw.init = &init; + if (config[i].out_format_drv_bits & 0x07) { + regmap_write(data->regmap, + SI5341_OUT_FORMAT(&data->clk[i]), + config[i].out_format_drv_bits); + regmap_write(data->regmap, + SI5341_OUT_CM(&data->clk[i]), + config[i].out_cm_ampl_bits); + } + err = devm_clk_hw_register(&client->dev, &data->clk[i].hw); + kfree(init.name); /* clock framework made a copy of the name */ + if (err) { + dev_err(&client->dev, + "output %u registration failed\n", i); + return err; + } + if (config[i].always_on) + clk_prepare(data->clk[i].hw.clk); + } + + err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get, + data); + if (err) { + dev_err(&client->dev, "unable to add clk provider\n"); + return err; + } + + if (initialization_required) { + /* Synchronize */ + regcache_cache_only(data->regmap, false); + err = regcache_sync(data->regmap); + if (err < 0) + return err; + + err = si5341_finalize_defaults(data); + if (err < 0) + return err; + } + + /* Free the names, clk framework makes copies */ + for (i = 0; i < data->num_synth; ++i) + devm_kfree(&client->dev, (void *)synth_clock_names[i]); + + return 0; +} + +static const struct i2c_device_id si5341_id[] = { + { "si5340", 0 }, + { "si5341", 1 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, si5341_id); + +static const struct of_device_id clk_si5341_of_match[] = { + { .compatible = "silabs,si5340" }, + { .compatible = "silabs,si5341" }, + { } +}; +MODULE_DEVICE_TABLE(of, clk_si5341_of_match); + +static struct i2c_driver si5341_driver = { + .driver = { + .name = "si5341", + .of_match_table = clk_si5341_of_match, + }, + .probe = si5341_probe, + .id_table = si5341_id, +}; +module_i2c_driver(si5341_driver); + +MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>"); +MODULE_DESCRIPTION("Si5341 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c index 64e607f3232a..d9ec9086184d 100644 --- a/drivers/clk/clk-si544.c +++ b/drivers/clk/clk-si544.c @@ -7,6 +7,7 @@ #include <linux/clk-provider.h> #include <linux/delay.h> +#include <linux/math64.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/regmap.h> @@ -50,6 +51,11 @@ /* Lowest frequency synthesizeable using only the HS divider */ #define MIN_HSDIV_FREQ (FVCO_MIN / HS_DIV_MAX) +/* Range and interpretation of the adjustment value */ +#define DELTA_M_MAX 8161512 +#define DELTA_M_FRAC_NUM 19 +#define DELTA_M_FRAC_DEN 20000 + enum si544_speed_grade { si544a, si544b, @@ -71,12 +77,14 @@ struct clk_si544 { * @hs_div: 1st divider, 5..2046, must be even when >33 * @ls_div_bits: 2nd divider, as 2^x, range 0..5 * If ls_div_bits is non-zero, hs_div must be even + * @delta_m: Frequency shift for small -950..+950 ppm changes, 24 bit */ struct clk_si544_muldiv { u32 fb_div_frac; u16 fb_div_int; u16 hs_div; u8 ls_div_bits; + s32 delta_m; }; /* Enables or disables the output driver */ @@ -134,9 +142,30 @@ static int si544_get_muldiv(struct clk_si544 *data, settings->fb_div_int = reg[4] | (reg[5] & 0x07) << 8; settings->fb_div_frac = reg[0] | reg[1] << 8 | reg[2] << 16 | reg[3] << 24; + + err = regmap_bulk_read(data->regmap, SI544_REG_ADPLL_DELTA_M0, reg, 3); + if (err) + return err; + + /* Interpret as 24-bit signed number */ + settings->delta_m = reg[0] << 8 | reg[1] << 16 | reg[2] << 24; + settings->delta_m >>= 8; + return 0; } +static int si544_set_delta_m(struct clk_si544 *data, s32 delta_m) +{ + u8 reg[3]; + + reg[0] = delta_m; + reg[1] = delta_m >> 8; + reg[2] = delta_m >> 16; + + return regmap_bulk_write(data->regmap, SI544_REG_ADPLL_DELTA_M0, + reg, 3); +} + static int si544_set_muldiv(struct clk_si544 *data, struct clk_si544_muldiv *settings) { @@ -238,11 +267,15 @@ static int si544_calc_muldiv(struct clk_si544_muldiv *settings, do_div(vco, FXO); settings->fb_div_frac = vco; + /* Reset the frequency adjustment */ + settings->delta_m = 0; + return 0; } /* Calculate resulting frequency given the register settings */ -static unsigned long si544_calc_rate(struct clk_si544_muldiv *settings) +static unsigned long si544_calc_center_rate( + const struct clk_si544_muldiv *settings) { u32 d = settings->hs_div * BIT(settings->ls_div_bits); u64 vco; @@ -261,6 +294,25 @@ static unsigned long si544_calc_rate(struct clk_si544_muldiv *settings) return vco; } +static unsigned long si544_calc_rate(const struct clk_si544_muldiv *settings) +{ + unsigned long rate = si544_calc_center_rate(settings); + s64 delta = (s64)rate * (DELTA_M_FRAC_NUM * settings->delta_m); + + /* + * The clock adjustment is much smaller than 1 Hz, round to the + * nearest multiple. Apparently div64_s64 rounds towards zero, hence + * check the sign and adjust into the proper direction. + */ + if (settings->delta_m < 0) + delta -= ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN) / 2; + else + delta += ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN) / 2; + delta = div64_s64(delta, ((s64)DELTA_M_MAX * DELTA_M_FRAC_DEN)); + + return rate + delta; +} + static unsigned long si544_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -279,33 +331,60 @@ static long si544_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct clk_si544 *data = to_clk_si544(hw); - struct clk_si544_muldiv settings; - int err; if (!is_valid_frequency(data, rate)) return -EINVAL; - err = si544_calc_muldiv(&settings, rate); - if (err) - return err; + /* The accuracy is less than 1 Hz, so any rate is possible */ + return rate; +} - return si544_calc_rate(&settings); +/* Calculates the maximum "small" change, 950 * rate / 1000000 */ +static unsigned long si544_max_delta(unsigned long rate) +{ + u64 num = rate; + + num *= DELTA_M_FRAC_NUM; + do_div(num, DELTA_M_FRAC_DEN); + + return num; +} + +static s32 si544_calc_delta(s32 delta, s32 max_delta) +{ + s64 n = (s64)delta * DELTA_M_MAX; + + return div_s64(n, max_delta); } -/* - * Update output frequency for "big" frequency changes - */ static int si544_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_si544 *data = to_clk_si544(hw); struct clk_si544_muldiv settings; + unsigned long center; + long max_delta; + long delta; unsigned int old_oe_state; int err; if (!is_valid_frequency(data, rate)) return -EINVAL; + /* Try using the frequency adjustment feature for a <= 950ppm change */ + err = si544_get_muldiv(data, &settings); + if (err) + return err; + + center = si544_calc_center_rate(&settings); + max_delta = si544_max_delta(center); + delta = rate - center; + + if (abs(delta) <= max_delta) + return si544_set_delta_m(data, + si544_calc_delta(delta, max_delta)); + + /* Too big for the delta adjustment, need to reprogram */ err = si544_calc_muldiv(&settings, rate); if (err) return err; @@ -321,6 +400,9 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate, if (err < 0) return err; + err = si544_set_delta_m(data, settings.delta_m); + if (err < 0) + return err; err = si544_set_muldiv(data, &settings); if (err < 0) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 87b410d6e51d..c0990703ce54 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1324,10 +1324,7 @@ static void clk_core_init_rate_req(struct clk_core * const core, static bool clk_core_can_round(struct clk_core * const core) { - if (core->ops->determine_rate || core->ops->round_rate) - return true; - - return false; + return core->ops->determine_rate || core->ops->round_rate; } static int clk_core_round_rate_nolock(struct clk_core *core, @@ -2194,7 +2191,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) EXPORT_SYMBOL_GPL(clk_set_rate); /** - * clk_set_rate_exclusive - specify a new rate get exclusive control + * clk_set_rate_exclusive - specify a new rate and get exclusive control * @clk: the clk whose rate is being changed * @rate: the new rate for clk * @@ -2202,7 +2199,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate); * within a critical section * * This can be used initially to ensure that at least 1 consumer is - * statisfied when several consumers are competing for exclusivity over the + * satisfied when several consumers are competing for exclusivity over the * same clock provider. * * The exclusivity is not applied if setting the rate failed. @@ -2997,20 +2994,65 @@ static int clk_flags_show(struct seq_file *s, void *data) } DEFINE_SHOW_ATTRIBUTE(clk_flags); +static void possible_parent_show(struct seq_file *s, struct clk_core *core, + unsigned int i, char terminator) +{ + struct clk_core *parent; + + /* + * Go through the following options to fetch a parent's name. + * + * 1. Fetch the registered parent clock and use its name + * 2. Use the global (fallback) name if specified + * 3. Use the local fw_name if provided + * 4. Fetch parent clock's clock-output-name if DT index was set + * + * This may still fail in some cases, such as when the parent is + * specified directly via a struct clk_hw pointer, but it isn't + * registered (yet). + */ + parent = clk_core_get_parent_by_index(core, i); + if (parent) + seq_printf(s, "%s", parent->name); + else if (core->parents[i].name) + seq_printf(s, "%s", core->parents[i].name); + else if (core->parents[i].fw_name) + seq_printf(s, "<%s>(fw)", core->parents[i].fw_name); + else if (core->parents[i].index >= 0) + seq_printf(s, "%s", + of_clk_get_parent_name(core->of_node, + core->parents[i].index)); + else + seq_puts(s, "(missing)"); + + seq_putc(s, terminator); +} + static int possible_parents_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; int i; for (i = 0; i < core->num_parents - 1; i++) - seq_printf(s, "%s ", core->parents[i].name); + possible_parent_show(s, core, i, ' '); - seq_printf(s, "%s\n", core->parents[i].name); + possible_parent_show(s, core, i, '\n'); return 0; } DEFINE_SHOW_ATTRIBUTE(possible_parents); +static int current_parent_show(struct seq_file *s, void *data) +{ + struct clk_core *core = s->private; + + if (core->parent) + seq_printf(s, "%s\n", core->parent->name); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(current_parent); + static int clk_duty_cycle_show(struct seq_file *s, void *data) { struct clk_core *core = s->private; @@ -3043,6 +3085,10 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) debugfs_create_file("clk_duty_cycle", 0444, root, core, &clk_duty_cycle_fops); + if (core->num_parents > 0) + debugfs_create_file("clk_parent", 0444, root, core, + ¤t_parent_fops); + if (core->num_parents > 1) debugfs_create_file("clk_possible_parents", 0444, root, core, &possible_parents_fops); @@ -4038,6 +4084,7 @@ struct of_clk_provider { void *data; }; +extern struct of_device_id __clk_of_table; static const struct of_device_id __clk_of_table_sentinel __used __section(__clk_of_table_end); diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h index d8400d623b34..2d801900cad5 100644 --- a/drivers/clk/clk.h +++ b/drivers/clk/clk.h @@ -33,10 +33,6 @@ clk_hw_create_clk(struct device *dev, struct clk_hw *hw, const char *dev_id, { return (struct clk *)hw; } -static struct clk_hw *__clk_get_hw(struct clk *clk) -{ - return (struct clk_hw *)clk; -} static inline void __clk_put(struct clk *clk) { } #endif diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c index 7b35a11238ca..25c863da32c7 100644 --- a/drivers/clk/imx/clk-busy.c +++ b/drivers/clk/imx/clk-busy.c @@ -72,13 +72,14 @@ static const struct clk_ops clk_busy_divider_ops = { .set_rate = clk_busy_divider_set_rate, }; -struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift) { struct clk_busy_divider *busy; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; busy = kzalloc(sizeof(*busy), GFP_KERNEL); if (!busy) @@ -101,11 +102,15 @@ struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, busy->div.hw.init = &init; - clk = clk_register(NULL, &busy->div.hw); - if (IS_ERR(clk)) + hw = &busy->div.hw; + + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(busy); + return ERR_PTR(ret); + } - return clk; + return hw; } struct clk_busy_mux { @@ -146,13 +151,14 @@ static const struct clk_ops clk_busy_mux_ops = { .set_parent = clk_busy_mux_set_parent, }; -struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, +struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, const char * const *parent_names, int num_parents) { struct clk_busy_mux *busy; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; busy = kzalloc(sizeof(*busy), GFP_KERNEL); if (!busy) @@ -175,9 +181,13 @@ struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, busy->mux.hw.init = &init; - clk = clk_register(NULL, &busy->mux.hw); - if (IS_ERR(clk)) + hw = &busy->mux.hw; + + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(busy); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-cpu.c b/drivers/clk/imx/clk-cpu.c index 00d026eb7891..cb182bec79ba 100644 --- a/drivers/clk/imx/clk-cpu.c +++ b/drivers/clk/imx/clk-cpu.c @@ -69,13 +69,14 @@ static const struct clk_ops clk_cpu_ops = { .set_rate = clk_cpu_set_rate, }; -struct clk *imx_clk_cpu(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name, struct clk *div, struct clk *mux, struct clk *pll, struct clk *step) { struct clk_cpu *cpu; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; cpu = kzalloc(sizeof(*cpu), GFP_KERNEL); if (!cpu) @@ -93,10 +94,13 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name, init.num_parents = 1; cpu->hw.init = &init; + hw = &cpu->hw; - clk = clk_register(NULL, &cpu->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(cpu); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c index bab46c6fd3db..4b17b91504ed 100644 --- a/drivers/clk/imx/clk-fixup-div.c +++ b/drivers/clk/imx/clk-fixup-div.c @@ -85,13 +85,14 @@ static const struct clk_ops clk_fixup_div_ops = { .set_rate = clk_fixup_div_set_rate, }; -struct clk *imx_clk_fixup_divider(const char *name, const char *parent, +struct clk_hw *imx_clk_hw_fixup_divider(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width, void (*fixup)(u32 *val)) { struct clk_fixup_div *fixup_div; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; if (!fixup) return ERR_PTR(-EINVAL); @@ -114,9 +115,13 @@ struct clk *imx_clk_fixup_divider(const char *name, const char *parent, fixup_div->ops = &clk_divider_ops; fixup_div->fixup = fixup; - clk = clk_register(NULL, &fixup_div->divider.hw); - if (IS_ERR(clk)) + hw = &fixup_div->divider.hw; + + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(fixup_div); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c index 1aa3e8d9abf3..b569d919c645 100644 --- a/drivers/clk/imx/clk-fixup-mux.c +++ b/drivers/clk/imx/clk-fixup-mux.c @@ -63,13 +63,14 @@ static const struct clk_ops clk_fixup_mux_ops = { .set_parent = clk_fixup_mux_set_parent, }; -struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, +struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)) { struct clk_fixup_mux *fixup_mux; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; if (!fixup) return ERR_PTR(-EINVAL); @@ -92,9 +93,13 @@ struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, fixup_mux->ops = &clk_mux_ops; fixup_mux->fixup = fixup; - clk = clk_register(NULL, &fixup_mux->mux.hw); - if (IS_ERR(clk)) + hw = &fixup_mux->mux.hw; + + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(fixup_mux); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-gate-exclusive.c b/drivers/clk/imx/clk-gate-exclusive.c index cffa4966568d..77342893bb71 100644 --- a/drivers/clk/imx/clk-gate-exclusive.c +++ b/drivers/clk/imx/clk-gate-exclusive.c @@ -55,13 +55,14 @@ static const struct clk_ops clk_gate_exclusive_ops = { .is_enabled = clk_gate_exclusive_is_enabled, }; -struct clk *imx_clk_gate_exclusive(const char *name, const char *parent, +struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent, void __iomem *reg, u8 shift, u32 exclusive_mask) { struct clk_gate_exclusive *exgate; struct clk_gate *gate; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; if (exclusive_mask == 0) return ERR_PTR(-EINVAL); @@ -83,9 +84,13 @@ struct clk *imx_clk_gate_exclusive(const char *name, const char *parent, gate->hw.init = &init; exgate->exclusive_mask = exclusive_mask; - clk = clk_register(NULL, &gate->hw); - if (IS_ERR(clk)) - kfree(exgate); + hw = &gate->hw; - return clk; + ret = clk_hw_register(NULL, hw); + if (ret) { + kfree(gate); + return ERR_PTR(ret); + } + + return hw; } diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c index ec08fda547a3..7d44ce814806 100644 --- a/drivers/clk/imx/clk-gate2.c +++ b/drivers/clk/imx/clk-gate2.c @@ -122,15 +122,16 @@ static const struct clk_ops clk_gate2_ops = { .is_enabled = clk_gate2_is_enabled, }; -struct clk *clk_register_gate2(struct device *dev, const char *name, +struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 clk_gate2_flags, spinlock_t *lock, unsigned int *share_count) { struct clk_gate2 *gate; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; gate = kzalloc(sizeof(struct clk_gate2), GFP_KERNEL); if (!gate) @@ -151,10 +152,13 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, init.num_parents = parent_name ? 1 : 0; gate->hw.init = &init; + hw = &gate->hw; - clk = clk_register(dev, &gate->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(gate); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index d5de733f336e..60f2de851f39 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c @@ -8,6 +8,7 @@ #include <linux/types.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of.h> @@ -87,8 +88,8 @@ static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; -static struct clk *clk[IMX6QDL_CLK_END]; -static struct clk_onecell_data clk_data; +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; static struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, @@ -138,12 +139,13 @@ static inline int clk_on_imx6dl(void) return of_machine_is_compatible("fsl,imx6dl"); } -static struct clk ** const uart_clks[] __initconst = { - &clk[IMX6QDL_CLK_UART_IPG], - &clk[IMX6QDL_CLK_UART_SERIAL], - NULL +static const int uart_clk_ids[] __initconst = { + IMX6QDL_CLK_UART_IPG, + IMX6QDL_CLK_UART_SERIAL, }; +static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata; + static int ldb_di_sel_by_clock_id(int clock_id) { switch (clock_id) { @@ -254,25 +256,14 @@ static bool pll6_bypassed(struct device_node *node) return false; } -#define CCM_CCDR 0x04 #define CCM_CCSR 0x0c #define CCM_CS2CDR 0x2c -#define CCDR_MMDC_CH1_MASK BIT(16) #define CCSR_PLL3_SW_CLK_SEL BIT(0) #define CS2CDR_LDB_DI0_CLK_SEL_SHIFT 9 #define CS2CDR_LDB_DI1_CLK_SEL_SHIFT 12 -static void __init imx6q_mmdc_ch1_mask_handshake(void __iomem *ccm_base) -{ - unsigned int reg; - - reg = readl_relaxed(ccm_base + CCM_CCDR); - reg |= CCDR_MMDC_CH1_MASK; - writel_relaxed(reg, ccm_base + CCM_CCDR); -} - /* * The only way to disable the MMDC_CH1 clock is to move it to pll3_sw_clk * via periph2_clk2_sel and then to disable pll3_sw_clk by selecting the @@ -282,14 +273,8 @@ static void mmdc_ch1_disable(void __iomem *ccm_base) { unsigned int reg; - clk_set_parent(clk[IMX6QDL_CLK_PERIPH2_CLK2_SEL], - clk[IMX6QDL_CLK_PLL3_USB_OTG]); - - /* - * Handshake with mmdc_ch1 module must be masked when changing - * periph2_clk_sel. - */ - clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_CLK2]); + clk_set_parent(hws[IMX6QDL_CLK_PERIPH2_CLK2_SEL]->clk, + hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk); /* Disable pll3_sw_clk by selecting the bypass clock source */ reg = readl_relaxed(ccm_base + CCM_CCSR); @@ -305,8 +290,6 @@ static void mmdc_ch1_reenable(void __iomem *ccm_base) reg = readl_relaxed(ccm_base + CCM_CCSR); reg &= ~CCSR_PLL3_SW_CLK_SEL; writel_relaxed(reg, ccm_base + CCM_CCSR); - - clk_set_parent(clk[IMX6QDL_CLK_PERIPH2], clk[IMX6QDL_CLK_PERIPH2_PRE]); } /* @@ -365,8 +348,8 @@ static void init_ldb_clks(struct device_node *np, void __iomem *ccm_base) /* Only switch to or from pll2_pfd2_396m if it is disabled */ if ((sel[i][0] == 2 || sel[i][3] == 2) && - (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) == - clk[IMX6QDL_CLK_PLL2_PFD2_396M])) { + (clk_get_parent(hws[IMX6QDL_CLK_PERIPH_PRE]->clk) == + hws[IMX6QDL_CLK_PLL2_PFD2_396M]->clk)) { pr_err("ccm: ldb_di%d_sel: couldn't disable pll2_pfd2_396m\n", i); sel[i][3] = sel[i][2] = sel[i][1] = sel[i][0]; @@ -418,8 +401,8 @@ static void disable_anatop_clocks(void __iomem *anatop_base) /* Make sure PLL2 PFDs 0-2 are gated */ reg = readl_relaxed(anatop_base + CCM_ANALOG_PFD_528); /* Cannot gate PFD2 if pll2_pfd2_396m is the parent of MMDC clock */ - if (clk_get_parent(clk[IMX6QDL_CLK_PERIPH_PRE]) == - clk[IMX6QDL_CLK_PLL2_PFD2_396M]) + if (clk_get_parent(hws[IMX6QDL_CLK_PERIPH_PRE]->clk) == + hws[IMX6QDL_CLK_PLL2_PFD2_396M]->clk) reg |= PFD0_CLKGATE | PFD1_CLKGATE; else reg |= PFD0_CLKGATE | PFD1_CLKGATE | PFD2_CLKGATE; @@ -436,31 +419,45 @@ static void disable_anatop_clocks(void __iomem *anatop_base) writel_relaxed(reg, anatop_base + CCM_ANALOG_PLL_VIDEO); } +static struct clk_hw * __init imx6q_obtain_fixed_clk_hw(struct device_node *np, + const char *name, + unsigned long rate) +{ + struct clk *clk = of_clk_get_by_name(np, name); + struct clk_hw *hw; + + if (IS_ERR(clk)) + hw = imx_obtain_fixed_clock_hw(name, rate); + else + hw = __clk_get_hw(clk); + + return hw; +} + static void __init imx6q_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *anatop_base, *base; int ret; + int i; + + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX6QDL_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; - clk[IMX6QDL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); - clk[IMX6QDL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); - if (IS_ERR(clk[IMX6QDL_CLK_CKIL])) - clk[IMX6QDL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0); - clk[IMX6QDL_CLK_CKIH] = of_clk_get_by_name(ccm_node, "ckih1"); - if (IS_ERR(clk[IMX6QDL_CLK_CKIH])) - clk[IMX6QDL_CLK_CKIH] = imx_obtain_fixed_clock("ckih1", 0); - clk[IMX6QDL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); - if (IS_ERR(clk[IMX6QDL_CLK_OSC])) - clk[IMX6QDL_CLK_OSC] = imx_obtain_fixed_clock("osc", 0); + clk_hw_data->num = IMX6QDL_CLK_END; + hws = clk_hw_data->hws; - /* Clock source from external clock via CLK1/2 PADs */ - clk[IMX6QDL_CLK_ANACLK1] = of_clk_get_by_name(ccm_node, "anaclk1"); - if (IS_ERR(clk[IMX6QDL_CLK_ANACLK1])) - clk[IMX6QDL_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0); + hws[IMX6QDL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); - clk[IMX6QDL_CLK_ANACLK2] = of_clk_get_by_name(ccm_node, "anaclk2"); - if (IS_ERR(clk[IMX6QDL_CLK_ANACLK2])) - clk[IMX6QDL_CLK_ANACLK2] = imx_obtain_fixed_clock("anaclk2", 0); + hws[IMX6QDL_CLK_CKIL] = imx6q_obtain_fixed_clk_hw(ccm_node, "ckil", 0); + hws[IMX6QDL_CLK_CKIH] = imx6q_obtain_fixed_clk_hw(ccm_node, "ckih1", 0); + hws[IMX6QDL_CLK_OSC] = imx6q_obtain_fixed_clk_hw(ccm_node, "osc", 0); + + /* Clock source from external clock via CLK1/2 PADs */ + hws[IMX6QDL_CLK_ANACLK1] = imx6q_obtain_fixed_clk_hw(ccm_node, "anaclk1", 0); + hws[IMX6QDL_CLK_ANACLK2] = imx6q_obtain_fixed_clk_hw(ccm_node, "anaclk2", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); anatop_base = base = of_iomap(np, 0); @@ -475,47 +472,47 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) video_div_table[3].div = 1; } - clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[IMX6QDL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6QDL_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clk[IMX6QDL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); - clk[IMX6QDL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); - clk[IMX6QDL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); - clk[IMX6QDL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); - clk[IMX6QDL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); - clk[IMX6QDL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); - clk[IMX6QDL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); - - clk[IMX6QDL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + hws[IMX6QDL_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + hws[IMX6QDL_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + hws[IMX6QDL_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + hws[IMX6QDL_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + hws[IMX6QDL_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + hws[IMX6QDL_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); + + hws[IMX6QDL_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); /* Do not bypass PLLs initially */ - clk_set_parent(clk[IMX6QDL_PLL1_BYPASS], clk[IMX6QDL_CLK_PLL1]); - clk_set_parent(clk[IMX6QDL_PLL2_BYPASS], clk[IMX6QDL_CLK_PLL2]); - clk_set_parent(clk[IMX6QDL_PLL3_BYPASS], clk[IMX6QDL_CLK_PLL3]); - clk_set_parent(clk[IMX6QDL_PLL4_BYPASS], clk[IMX6QDL_CLK_PLL4]); - clk_set_parent(clk[IMX6QDL_PLL5_BYPASS], clk[IMX6QDL_CLK_PLL5]); - clk_set_parent(clk[IMX6QDL_PLL6_BYPASS], clk[IMX6QDL_CLK_PLL6]); - clk_set_parent(clk[IMX6QDL_PLL7_BYPASS], clk[IMX6QDL_CLK_PLL7]); - - clk[IMX6QDL_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); - clk[IMX6QDL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); - clk[IMX6QDL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); - clk[IMX6QDL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); - clk[IMX6QDL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); - clk[IMX6QDL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); - clk[IMX6QDL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + clk_set_parent(hws[IMX6QDL_PLL1_BYPASS]->clk, hws[IMX6QDL_CLK_PLL1]->clk); + clk_set_parent(hws[IMX6QDL_PLL2_BYPASS]->clk, hws[IMX6QDL_CLK_PLL2]->clk); + clk_set_parent(hws[IMX6QDL_PLL3_BYPASS]->clk, hws[IMX6QDL_CLK_PLL3]->clk); + clk_set_parent(hws[IMX6QDL_PLL4_BYPASS]->clk, hws[IMX6QDL_CLK_PLL4]->clk); + clk_set_parent(hws[IMX6QDL_PLL5_BYPASS]->clk, hws[IMX6QDL_CLK_PLL5]->clk); + clk_set_parent(hws[IMX6QDL_PLL6_BYPASS]->clk, hws[IMX6QDL_CLK_PLL6]->clk); + clk_set_parent(hws[IMX6QDL_PLL7_BYPASS]->clk, hws[IMX6QDL_CLK_PLL7]->clk); + + hws[IMX6QDL_CLK_PLL1_SYS] = imx_clk_hw_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); + hws[IMX6QDL_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + hws[IMX6QDL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + hws[IMX6QDL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + hws[IMX6QDL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + hws[IMX6QDL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + hws[IMX6QDL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); /* * Bit 20 is the reserved and read-only bit, we do this only for: @@ -523,15 +520,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework may need to enable/disable usbphy's parent */ - clk[IMX6QDL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); - clk[IMX6QDL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + hws[IMX6QDL_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + hws[IMX6QDL_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ - clk[IMX6QDL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); - clk[IMX6QDL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); + hws[IMX6QDL_CLK_USBPHY1_GATE] = imx_clk_hw_gate("usbphy1_gate", "dummy", base + 0x10, 6); + hws[IMX6QDL_CLK_USBPHY2_GATE] = imx_clk_hw_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* * The ENET PLL is special in that is has multiple outputs with @@ -545,22 +542,22 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) * */ if (!pll6_bypassed(ccm_node)) { - clk[IMX6QDL_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5); - clk[IMX6QDL_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4); - clk[IMX6QDL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, + hws[IMX6QDL_CLK_SATA_REF] = imx_clk_hw_fixed_factor("sata_ref", "pll6_enet", 1, 5); + hws[IMX6QDL_CLK_PCIE_REF] = imx_clk_hw_fixed_factor("pcie_ref", "pll6_enet", 1, 4); + hws[IMX6QDL_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); } else { - clk[IMX6QDL_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 1); - clk[IMX6QDL_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 1); - clk[IMX6QDL_CLK_ENET_REF] = imx_clk_fixed_factor("enet_ref", "pll6_enet", 1, 1); + hws[IMX6QDL_CLK_SATA_REF] = imx_clk_hw_fixed_factor("sata_ref", "pll6_enet", 1, 1); + hws[IMX6QDL_CLK_PCIE_REF] = imx_clk_hw_fixed_factor("pcie_ref", "pll6_enet", 1, 1); + hws[IMX6QDL_CLK_ENET_REF] = imx_clk_hw_fixed_factor("enet_ref", "pll6_enet", 1, 1); } - clk[IMX6QDL_CLK_SATA_REF_100M] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20); - clk[IMX6QDL_CLK_PCIE_REF_125M] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); + hws[IMX6QDL_CLK_SATA_REF_100M] = imx_clk_hw_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20); + hws[IMX6QDL_CLK_PCIE_REF_125M] = imx_clk_hw_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); - clk[IMX6QDL_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); - clk[IMX6QDL_CLK_LVDS2_SEL] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); + hws[IMX6QDL_CLK_LVDS1_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); + hws[IMX6QDL_CLK_LVDS2_SEL] = imx_clk_hw_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); /* * lvds1_gate and lvds2_gate are pseudo-gates. Both can be @@ -572,84 +569,84 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) * it. */ writel(readl(base + 0x160) & ~0x3c00, base + 0x160); - clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12)); - clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13)); + hws[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_hw_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12)); + hws[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_hw_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13)); - clk[IMX6QDL_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); - clk[IMX6QDL_CLK_LVDS2_IN] = imx_clk_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11)); + hws[IMX6QDL_CLK_LVDS1_IN] = imx_clk_hw_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); + hws[IMX6QDL_CLK_LVDS2_IN] = imx_clk_hw_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11)); /* name parent_name reg idx */ - clk[IMX6QDL_CLK_PLL2_PFD0_352M] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); - clk[IMX6QDL_CLK_PLL2_PFD1_594M] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); - clk[IMX6QDL_CLK_PLL2_PFD2_396M] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); - clk[IMX6QDL_CLK_PLL3_PFD0_720M] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); - clk[IMX6QDL_CLK_PLL3_PFD1_540M] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); - clk[IMX6QDL_CLK_PLL3_PFD2_508M] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); - clk[IMX6QDL_CLK_PLL3_PFD3_454M] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); + hws[IMX6QDL_CLK_PLL2_PFD0_352M] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); + hws[IMX6QDL_CLK_PLL2_PFD1_594M] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); + hws[IMX6QDL_CLK_PLL2_PFD2_396M] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); + hws[IMX6QDL_CLK_PLL3_PFD0_720M] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); + hws[IMX6QDL_CLK_PLL3_PFD1_540M] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); + hws[IMX6QDL_CLK_PLL3_PFD2_508M] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); + hws[IMX6QDL_CLK_PLL3_PFD3_454M] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ - clk[IMX6QDL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); - clk[IMX6QDL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); - clk[IMX6QDL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); - clk[IMX6QDL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); - clk[IMX6QDL_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); - clk[IMX6QDL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); - clk[IMX6QDL_CLK_VIDEO_27M] = imx_clk_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20); + hws[IMX6QDL_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); + hws[IMX6QDL_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); + hws[IMX6QDL_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + hws[IMX6QDL_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + hws[IMX6QDL_CLK_TWD] = imx_clk_hw_fixed_factor("twd", "arm", 1, 2); + hws[IMX6QDL_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8); + hws[IMX6QDL_CLK_VIDEO_27M] = imx_clk_hw_fixed_factor("video_27m", "pll3_pfd1_540m", 1, 20); if (clk_on_imx6dl() || clk_on_imx6qp()) { - clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1); - clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1); + hws[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_hw_fixed_factor("gpu2d_axi", "mmdc_ch0_axi_podf", 1, 1); + hws[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_hw_fixed_factor("gpu3d_axi", "mmdc_ch0_axi_podf", 1, 1); } - clk[IMX6QDL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); - clk[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); - clk[IMX6QDL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); - clk[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); + hws[IMX6QDL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); + hws[IMX6QDL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); + hws[IMX6QDL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); + hws[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); /* name reg shift width parent_names num_parents */ - clk[IMX6QDL_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); - clk[IMX6QDL_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); - clk[IMX6QDL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clk[IMX6QDL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clk[IMX6QDL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clk[IMX6QDL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); - clk[IMX6QDL_CLK_AXI_SEL] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); - clk[IMX6QDL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clk[IMX6QDL_CLK_ASRC_SEL] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clk[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6QDL_CLK_STEP] = imx_clk_hw_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + hws[IMX6QDL_CLK_PLL1_SW] = imx_clk_hw_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); + hws[IMX6QDL_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + hws[IMX6QDL_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + hws[IMX6QDL_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + hws[IMX6QDL_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + hws[IMX6QDL_CLK_AXI_SEL] = imx_clk_hw_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); + hws[IMX6QDL_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6QDL_CLK_ASRC_SEL] = imx_clk_hw_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6QDL_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); if (clk_on_imx6q()) { - clk[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); - clk[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); + hws[IMX6QDL_CLK_GPU2D_AXI] = imx_clk_hw_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); + hws[IMX6QDL_CLK_GPU3D_AXI] = imx_clk_hw_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); } if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); - clk[IMX6QDL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels)); - clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2)); + hws[IMX6QDL_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); + hws[IMX6QDL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + hws[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_hw_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels)); + hws[IMX6QDL_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + hws[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_hw_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2)); } else if (clk_on_imx6dl()) { - clk[IMX6QDL_CLK_MLB_SEL] = imx_clk_mux("mlb_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); + hws[IMX6QDL_CLK_MLB_SEL] = imx_clk_hw_mux("mlb_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); } else { - clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); + hws[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_hw_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); } - clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); + hws[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_hw_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); if (clk_on_imx6dl()) - clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); + hws[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_hw_mux("gpu2d_core_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); else - clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); - clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); - clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); + hws[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_hw_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); + hws[IMX6QDL_CLK_IPU1_SEL] = imx_clk_hw_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); + hws[IMX6QDL_CLK_IPU2_SEL] = imx_clk_hw_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); disable_anatop_clocks(anatop_base); - imx6q_mmdc_ch1_mask_handshake(base); + imx_mmdc_mask_handshake(base, 1); if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_hw_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_hw_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); } else { /* * The LDB_DI0/1_SEL muxes are registered read-only due to a hardware @@ -658,322 +655,333 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) */ init_ldb_clks(np, base); - clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); - clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); + hws[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_hw_mux_ldb("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); + hws[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_hw_mux_ldb("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels)); } - clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL] = imx_clk_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_HSI_TX_SEL] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels)); - clk[IMX6QDL_CLK_PCIE_AXI_SEL] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); + + hws[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_hw_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_hw_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_hw_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI1_PRE_SEL] = imx_clk_hw_mux_flags("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_HSI_TX_SEL] = imx_clk_hw_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels)); + hws[IMX6QDL_CLK_PCIE_AXI_SEL] = imx_clk_hw_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); + if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels_2, ARRAY_SIZE(ipu1_di0_sels_2), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels_2, ARRAY_SIZE(ipu1_di1_sels_2), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels_2, ARRAY_SIZE(ipu2_di0_sels_2), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels_2, ARRAY_SIZE(ipu2_di1_sels_2), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels_2, ARRAY_SIZE(enfc_sels_2)); - clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels)); - clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); - clk[IMX6QDL_CLK_PRE_AXI] = imx_clk_mux("pre_axi", base + 0x18, 1, 1, pre_axi_sels, ARRAY_SIZE(pre_axi_sels)); + hws[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_hw_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels_2, ARRAY_SIZE(ipu1_di0_sels_2), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_hw_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels_2, ARRAY_SIZE(ipu1_di1_sels_2), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_hw_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels_2, ARRAY_SIZE(ipu2_di0_sels_2), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_hw_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels_2, ARRAY_SIZE(ipu2_di1_sels_2), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6QDL_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6QDL_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_hw_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_hw_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6QDL_CLK_ENFC_SEL] = imx_clk_hw_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels_2, ARRAY_SIZE(enfc_sels_2)); + hws[IMX6QDL_CLK_EIM_SEL] = imx_clk_hw_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels)); + hws[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_hw_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); + hws[IMX6QDL_CLK_PRE_AXI] = imx_clk_hw_mux("pre_axi", base + 0x18, 1, 1, pre_axi_sels, ARRAY_SIZE(pre_axi_sels)); } else { - clk[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT); - clk[IMX6QDL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); - clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup); - clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_IPU1_DI0_SEL] = imx_clk_hw_mux_flags("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU1_DI1_SEL] = imx_clk_hw_mux_flags("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI0_SEL] = imx_clk_hw_mux_flags("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_IPU2_DI1_SEL] = imx_clk_hw_mux_flags("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels), CLK_SET_RATE_PARENT); + hws[IMX6QDL_CLK_SSI1_SEL] = imx_clk_hw_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_SSI2_SEL] = imx_clk_hw_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_SSI3_SEL] = imx_clk_hw_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_USDHC1_SEL] = imx_clk_hw_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_USDHC2_SEL] = imx_clk_hw_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_hw_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_hw_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_ENFC_SEL] = imx_clk_hw_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); + hws[IMX6QDL_CLK_EIM_SEL] = imx_clk_hw_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup); + hws[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_hw_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup); } - clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); - clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); - clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); - clk[IMX6QDL_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); - clk[IMX6QDL_CLK_CKO] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); + + hws[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_hw_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); + hws[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_hw_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); + hws[IMX6QDL_CLK_CKO1_SEL] = imx_clk_hw_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); + hws[IMX6QDL_CLK_CKO2_SEL] = imx_clk_hw_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); + hws[IMX6QDL_CLK_CKO] = imx_clk_hw_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); /* name reg shift width busy: reg, shift parent_names num_parents */ - clk[IMX6QDL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); - clk[IMX6QDL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + hws[IMX6QDL_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + hws[IMX6QDL_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width */ - clk[IMX6QDL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); - clk[IMX6QDL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); - clk[IMX6QDL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); - clk[IMX6QDL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); - clk[IMX6QDL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); - clk[IMX6QDL_CLK_ASRC_PRED] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3); - clk[IMX6QDL_CLK_ASRC_PODF] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3); - clk[IMX6QDL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); - clk[IMX6QDL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + hws[IMX6QDL_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); + hws[IMX6QDL_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); + hws[IMX6QDL_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2); + hws[IMX6QDL_CLK_ESAI_PRED] = imx_clk_hw_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); + hws[IMX6QDL_CLK_ESAI_PODF] = imx_clk_hw_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); + hws[IMX6QDL_CLK_ASRC_PRED] = imx_clk_hw_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3); + hws[IMX6QDL_CLK_ASRC_PODF] = imx_clk_hw_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3); + hws[IMX6QDL_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); + hws[IMX6QDL_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_IPG_PER] = imx_clk_divider("ipg_per", "ipg_per_sel", base + 0x1c, 0, 6); - clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6); - clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "can_sel", base + 0x20, 2, 6); - clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "uart_sel", base + 0x24, 0, 6); - clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0", 2, 7); - clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); + hws[IMX6QDL_CLK_IPG_PER] = imx_clk_hw_divider("ipg_per", "ipg_per_sel", base + 0x1c, 0, 6); + hws[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_hw_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6); + hws[IMX6QDL_CLK_CAN_ROOT] = imx_clk_hw_divider("can_root", "can_sel", base + 0x20, 2, 6); + hws[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_hw_divider("uart_serial_podf", "uart_sel", base + 0x24, 0, 6); + hws[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0", 2, 7); + hws[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); } else { - clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); - clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6); - clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); - clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); - clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); - clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); + hws[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_hw_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); + hws[IMX6QDL_CLK_CAN_ROOT] = imx_clk_hw_divider("can_root", "pll3_60m", base + 0x20, 2, 6); + hws[IMX6QDL_CLK_IPG_PER] = imx_clk_hw_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); + hws[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_hw_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); + hws[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + hws[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); } + if (clk_on_imx6dl()) - clk[IMX6QDL_CLK_MLB_PODF] = imx_clk_divider("mlb_podf", "mlb_sel", base + 0x18, 23, 3); + hws[IMX6QDL_CLK_MLB_PODF] = imx_clk_hw_divider("mlb_podf", "mlb_sel", base + 0x18, 23, 3); else - clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); - clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); + hws[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_hw_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); + hws[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_hw_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); if (clk_on_imx6dl()) - clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 29, 3); + hws[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_hw_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 29, 3); else - clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); - clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); - clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); - clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0); - clk[IMX6QDL_CLK_LDB_DI1_PODF] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0); - clk[IMX6QDL_CLK_IPU1_DI0_PRE] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); - clk[IMX6QDL_CLK_IPU1_DI1_PRE] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); - clk[IMX6QDL_CLK_IPU2_DI0_PRE] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); - clk[IMX6QDL_CLK_IPU2_DI1_PRE] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3); - clk[IMX6QDL_CLK_HSI_TX_PODF] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3); - clk[IMX6QDL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); - clk[IMX6QDL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); - clk[IMX6QDL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); - clk[IMX6QDL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); - clk[IMX6QDL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); - clk[IMX6QDL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); - clk[IMX6QDL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); - clk[IMX6QDL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); - clk[IMX6QDL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); - clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); - clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); - clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); + hws[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_hw_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); + hws[IMX6QDL_CLK_IPU1_PODF] = imx_clk_hw_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); + hws[IMX6QDL_CLK_IPU2_PODF] = imx_clk_hw_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); + hws[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_hw_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0); + hws[IMX6QDL_CLK_LDB_DI1_PODF] = imx_clk_hw_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0); + hws[IMX6QDL_CLK_IPU1_DI0_PRE] = imx_clk_hw_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); + hws[IMX6QDL_CLK_IPU1_DI1_PRE] = imx_clk_hw_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); + hws[IMX6QDL_CLK_IPU2_DI0_PRE] = imx_clk_hw_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); + hws[IMX6QDL_CLK_IPU2_DI1_PRE] = imx_clk_hw_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3); + hws[IMX6QDL_CLK_HSI_TX_PODF] = imx_clk_hw_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3); + hws[IMX6QDL_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); + hws[IMX6QDL_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); + hws[IMX6QDL_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); + hws[IMX6QDL_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); + hws[IMX6QDL_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); + hws[IMX6QDL_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); + hws[IMX6QDL_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + hws[IMX6QDL_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + hws[IMX6QDL_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); + hws[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_hw_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); + hws[IMX6QDL_CLK_ENFC_PRED] = imx_clk_hw_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); + hws[IMX6QDL_CLK_ENFC_PODF] = imx_clk_hw_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3); - clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); + hws[IMX6QDL_CLK_EIM_PODF] = imx_clk_hw_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3); + hws[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_hw_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); } else { - clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); - clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); + hws[IMX6QDL_CLK_EIM_PODF] = imx_clk_hw_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); + hws[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_hw_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); } - clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); - clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); - clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); + + hws[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_hw_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); + hws[IMX6QDL_CLK_CKO1_PODF] = imx_clk_hw_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); + hws[IMX6QDL_CLK_CKO2_PODF] = imx_clk_hw_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); /* name parent_name reg shift width busy: reg, shift */ - clk[IMX6QDL_CLK_AXI] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); - clk[IMX6QDL_CLK_MMDC_CH0_AXI_PODF] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4); + hws[IMX6QDL_CLK_AXI] = imx_clk_hw_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); + hws[IMX6QDL_CLK_MMDC_CH0_AXI_PODF] = imx_clk_hw_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4); if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_MMDC_CH1_AXI_CG] = imx_clk_gate("mmdc_ch1_axi_cg", "periph2", base + 0x4, 18); - clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "mmdc_ch1_axi_cg", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6QDL_CLK_MMDC_CH1_AXI_CG] = imx_clk_hw_gate("mmdc_ch1_axi_cg", "periph2", base + 0x4, 18); + hws[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_hw_busy_divider("mmdc_ch1_axi_podf", "mmdc_ch1_axi_cg", base + 0x14, 3, 3, base + 0x48, 2); } else { - clk[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6QDL_CLK_MMDC_CH1_AXI_PODF] = imx_clk_hw_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); } - clk[IMX6QDL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); - clk[IMX6QDL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + hws[IMX6QDL_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + hws[IMX6QDL_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); /* name parent_name reg shift */ - clk[IMX6QDL_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4); - clk[IMX6QDL_CLK_ASRC] = imx_clk_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc); - clk[IMX6QDL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); - clk[IMX6QDL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); - clk[IMX6QDL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); - clk[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); - clk[IMX6QDL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); - clk[IMX6QDL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); - clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16); - clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); - clk[IMX6QDL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20); - clk[IMX6QDL_CLK_DCIC1] = imx_clk_gate2("dcic1", "ipu1_podf", base + 0x68, 24); - clk[IMX6QDL_CLK_DCIC2] = imx_clk_gate2("dcic2", "ipu2_podf", base + 0x68, 26); - clk[IMX6QDL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); - clk[IMX6QDL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); - clk[IMX6QDL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); - clk[IMX6QDL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); + hws[IMX6QDL_CLK_APBH_DMA] = imx_clk_hw_gate2("apbh_dma", "usdhc3", base + 0x68, 4); + hws[IMX6QDL_CLK_ASRC] = imx_clk_hw_gate2_shared("asrc", "asrc_podf", base + 0x68, 6, &share_count_asrc); + hws[IMX6QDL_CLK_ASRC_IPG] = imx_clk_hw_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6QDL_CLK_ASRC_MEM] = imx_clk_hw_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6QDL_CLK_CAAM_MEM] = imx_clk_hw_gate2("caam_mem", "ahb", base + 0x68, 8); + hws[IMX6QDL_CLK_CAAM_ACLK] = imx_clk_hw_gate2("caam_aclk", "ahb", base + 0x68, 10); + hws[IMX6QDL_CLK_CAAM_IPG] = imx_clk_hw_gate2("caam_ipg", "ipg", base + 0x68, 12); + hws[IMX6QDL_CLK_CAN1_IPG] = imx_clk_hw_gate2("can1_ipg", "ipg", base + 0x68, 14); + hws[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_hw_gate2("can1_serial", "can_root", base + 0x68, 16); + hws[IMX6QDL_CLK_CAN2_IPG] = imx_clk_hw_gate2("can2_ipg", "ipg", base + 0x68, 18); + hws[IMX6QDL_CLK_CAN2_SERIAL] = imx_clk_hw_gate2("can2_serial", "can_root", base + 0x68, 20); + hws[IMX6QDL_CLK_DCIC1] = imx_clk_hw_gate2("dcic1", "ipu1_podf", base + 0x68, 24); + hws[IMX6QDL_CLK_DCIC2] = imx_clk_hw_gate2("dcic2", "ipu2_podf", base + 0x68, 26); + hws[IMX6QDL_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); + hws[IMX6QDL_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); + hws[IMX6QDL_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); + hws[IMX6QDL_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); if (clk_on_imx6dl()) - clk[IMX6DL_CLK_I2C4] = imx_clk_gate2("i2c4", "ipg_per", base + 0x6c, 8); + hws[IMX6DL_CLK_I2C4] = imx_clk_hw_gate2("i2c4", "ipg_per", base + 0x6c, 8); else - clk[IMX6Q_CLK_ECSPI5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8); - clk[IMX6QDL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10); - clk[IMX6QDL_CLK_EPIT1] = imx_clk_gate2("epit1", "ipg", base + 0x6c, 12); - clk[IMX6QDL_CLK_EPIT2] = imx_clk_gate2("epit2", "ipg", base + 0x6c, 14); - clk[IMX6QDL_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai); - clk[IMX6QDL_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai); - clk[IMX6QDL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); - clk[IMX6QDL_CLK_GPT_IPG] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20); - clk[IMX6QDL_CLK_GPT_IPG_PER] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22); - clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); - clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); - clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); - clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); - clk[IMX6QDL_CLK_I2C1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6); - clk[IMX6QDL_CLK_I2C2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8); - clk[IMX6QDL_CLK_I2C3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10); - clk[IMX6QDL_CLK_IIM] = imx_clk_gate2("iim", "ipg", base + 0x70, 12); - clk[IMX6QDL_CLK_ENFC] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14); - clk[IMX6QDL_CLK_VDOA] = imx_clk_gate2("vdoa", "vdo_axi", base + 0x70, 26); - clk[IMX6QDL_CLK_IPU1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0); - clk[IMX6QDL_CLK_IPU1_DI0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2); - clk[IMX6QDL_CLK_IPU1_DI1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4); - clk[IMX6QDL_CLK_IPU2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6); - clk[IMX6QDL_CLK_IPU2_DI0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8); + hws[IMX6Q_CLK_ECSPI5] = imx_clk_hw_gate2("ecspi5", "ecspi_root", base + 0x6c, 8); + hws[IMX6QDL_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x6c, 10); + hws[IMX6QDL_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "ipg", base + 0x6c, 12); + hws[IMX6QDL_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "ipg", base + 0x6c, 14); + hws[IMX6QDL_CLK_ESAI_EXTAL] = imx_clk_hw_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai); + hws[IMX6QDL_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai); + hws[IMX6QDL_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); + hws[IMX6QDL_CLK_GPT_IPG] = imx_clk_hw_gate2("gpt_ipg", "ipg", base + 0x6c, 20); + hws[IMX6QDL_CLK_GPT_IPG_PER] = imx_clk_hw_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22); + hws[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_hw_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); + hws[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_hw_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); + hws[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_hw_gate2("hdmi_iahb", "ahb", base + 0x70, 0); + hws[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_hw_gate2("hdmi_isfr", "mipi_core_cfg", base + 0x70, 4); + hws[IMX6QDL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "ipg_per", base + 0x70, 6); + hws[IMX6QDL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "ipg_per", base + 0x70, 8); + hws[IMX6QDL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "ipg_per", base + 0x70, 10); + hws[IMX6QDL_CLK_IIM] = imx_clk_hw_gate2("iim", "ipg", base + 0x70, 12); + hws[IMX6QDL_CLK_ENFC] = imx_clk_hw_gate2("enfc", "enfc_podf", base + 0x70, 14); + hws[IMX6QDL_CLK_VDOA] = imx_clk_hw_gate2("vdoa", "vdo_axi", base + 0x70, 26); + hws[IMX6QDL_CLK_IPU1] = imx_clk_hw_gate2("ipu1", "ipu1_podf", base + 0x74, 0); + hws[IMX6QDL_CLK_IPU1_DI0] = imx_clk_hw_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2); + hws[IMX6QDL_CLK_IPU1_DI1] = imx_clk_hw_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4); + hws[IMX6QDL_CLK_IPU2] = imx_clk_hw_gate2("ipu2", "ipu2_podf", base + 0x74, 6); + hws[IMX6QDL_CLK_IPU2_DI0] = imx_clk_hw_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8); if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_sel", base + 0x74, 12); - clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_sel", base + 0x74, 14); + hws[IMX6QDL_CLK_LDB_DI0] = imx_clk_hw_gate2("ldb_di0", "ldb_di0_sel", base + 0x74, 12); + hws[IMX6QDL_CLK_LDB_DI1] = imx_clk_hw_gate2("ldb_di1", "ldb_di1_sel", base + 0x74, 14); } else { - clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12); - clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); + hws[IMX6QDL_CLK_LDB_DI0] = imx_clk_hw_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12); + hws[IMX6QDL_CLK_LDB_DI1] = imx_clk_hw_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); } - clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); - clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg); - clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg); - clk[IMX6QDL_CLK_MIPI_IPG] = imx_clk_gate2_shared("mipi_ipg", "ipg", base + 0x74, 16, &share_count_mipi_core_cfg); + hws[IMX6QDL_CLK_IPU2_DI1] = imx_clk_hw_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); + hws[IMX6QDL_CLK_HSI_TX] = imx_clk_hw_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg); + hws[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_hw_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg); + hws[IMX6QDL_CLK_MIPI_IPG] = imx_clk_hw_gate2_shared("mipi_ipg", "ipg", base + 0x74, 16, &share_count_mipi_core_cfg); + if (clk_on_imx6dl()) /* * The multiplexer and divider of the imx6q clock gpu2d get * redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl. */ - clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18); + hws[IMX6QDL_CLK_MLB] = imx_clk_hw_gate2("mlb", "mlb_podf", base + 0x74, 18); else - clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); - clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL); - clk[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); - clk[IMX6QDL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); - clk[IMX6QDL_CLK_OCRAM] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); - clk[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30); - clk[IMX6QDL_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0); - clk[IMX6QDL_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12); - clk[IMX6QDL_CLK_PWM1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16); - clk[IMX6QDL_CLK_PWM2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18); - clk[IMX6QDL_CLK_PWM3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20); - clk[IMX6QDL_CLK_PWM4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22); - clk[IMX6QDL_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); - clk[IMX6QDL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); - clk[IMX6QDL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28); - clk[IMX6QDL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); - clk[IMX6QDL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); - clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4); - clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); - clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); - clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_spdif); - clk[IMX6QDL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif); - clk[IMX6QDL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); - clk[IMX6QDL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); - clk[IMX6QDL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); - clk[IMX6QDL_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); - clk[IMX6QDL_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); - clk[IMX6QDL_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); - clk[IMX6QDL_CLK_UART_IPG] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24); - clk[IMX6QDL_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26); - clk[IMX6QDL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); - clk[IMX6QDL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); - clk[IMX6QDL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); - clk[IMX6QDL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); - clk[IMX6QDL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); - clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); - clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); - clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); + hws[IMX6QDL_CLK_MLB] = imx_clk_hw_gate2("mlb", "axi", base + 0x74, 18); + hws[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_hw_gate2_flags("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20, CLK_IS_CRITICAL); + hws[IMX6QDL_CLK_MMDC_CH1_AXI] = imx_clk_hw_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); + hws[IMX6QDL_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + hws[IMX6QDL_CLK_OCRAM] = imx_clk_hw_gate2("ocram", "ahb", base + 0x74, 28); + hws[IMX6QDL_CLK_OPENVG_AXI] = imx_clk_hw_gate2("openvg_axi", "axi", base + 0x74, 30); + hws[IMX6QDL_CLK_PCIE_AXI] = imx_clk_hw_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0); + hws[IMX6QDL_CLK_PER1_BCH] = imx_clk_hw_gate2("per1_bch", "usdhc3", base + 0x78, 12); + hws[IMX6QDL_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "ipg_per", base + 0x78, 16); + hws[IMX6QDL_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "ipg_per", base + 0x78, 18); + hws[IMX6QDL_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "ipg_per", base + 0x78, 20); + hws[IMX6QDL_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "ipg_per", base + 0x78, 22); + hws[IMX6QDL_CLK_GPMI_BCH_APB] = imx_clk_hw_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); + hws[IMX6QDL_CLK_GPMI_BCH] = imx_clk_hw_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); + hws[IMX6QDL_CLK_GPMI_IO] = imx_clk_hw_gate2("gpmi_io", "enfc", base + 0x78, 28); + hws[IMX6QDL_CLK_GPMI_APB] = imx_clk_hw_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); + hws[IMX6QDL_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); + hws[IMX6QDL_CLK_SATA] = imx_clk_hw_gate2("sata", "ahb", base + 0x7c, 4); + hws[IMX6QDL_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6); + hws[IMX6QDL_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12); + hws[IMX6QDL_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_spdif); + hws[IMX6QDL_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif); + hws[IMX6QDL_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6QDL_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6QDL_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6QDL_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6QDL_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6QDL_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6QDL_CLK_UART_IPG] = imx_clk_hw_gate2("uart_ipg", "ipg", base + 0x7c, 24); + hws[IMX6QDL_CLK_UART_SERIAL] = imx_clk_hw_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26); + hws[IMX6QDL_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0); + hws[IMX6QDL_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + hws[IMX6QDL_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + hws[IMX6QDL_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); + hws[IMX6QDL_CLK_USDHC4] = imx_clk_hw_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); + hws[IMX6QDL_CLK_EIM_SLOW] = imx_clk_hw_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); + hws[IMX6QDL_CLK_VDO_AXI] = imx_clk_hw_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); + hws[IMX6QDL_CLK_VPU_AXI] = imx_clk_hw_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); if (clk_on_imx6qp()) { - clk[IMX6QDL_CLK_PRE0] = imx_clk_gate2("pre0", "pre_axi", base + 0x80, 16); - clk[IMX6QDL_CLK_PRE1] = imx_clk_gate2("pre1", "pre_axi", base + 0x80, 18); - clk[IMX6QDL_CLK_PRE2] = imx_clk_gate2("pre2", "pre_axi", base + 0x80, 20); - clk[IMX6QDL_CLK_PRE3] = imx_clk_gate2("pre3", "pre_axi", base + 0x80, 22); - clk[IMX6QDL_CLK_PRG0_AXI] = imx_clk_gate2_shared("prg0_axi", "ipu1_podf", base + 0x80, 24, &share_count_prg0); - clk[IMX6QDL_CLK_PRG1_AXI] = imx_clk_gate2_shared("prg1_axi", "ipu2_podf", base + 0x80, 26, &share_count_prg1); - clk[IMX6QDL_CLK_PRG0_APB] = imx_clk_gate2_shared("prg0_apb", "ipg", base + 0x80, 24, &share_count_prg0); - clk[IMX6QDL_CLK_PRG1_APB] = imx_clk_gate2_shared("prg1_apb", "ipg", base + 0x80, 26, &share_count_prg1); + hws[IMX6QDL_CLK_PRE0] = imx_clk_hw_gate2("pre0", "pre_axi", base + 0x80, 16); + hws[IMX6QDL_CLK_PRE1] = imx_clk_hw_gate2("pre1", "pre_axi", base + 0x80, 18); + hws[IMX6QDL_CLK_PRE2] = imx_clk_hw_gate2("pre2", "pre_axi", base + 0x80, 20); + hws[IMX6QDL_CLK_PRE3] = imx_clk_hw_gate2("pre3", "pre_axi", base + 0x80, 22); + hws[IMX6QDL_CLK_PRG0_AXI] = imx_clk_hw_gate2_shared("prg0_axi", "ipu1_podf", base + 0x80, 24, &share_count_prg0); + hws[IMX6QDL_CLK_PRG1_AXI] = imx_clk_hw_gate2_shared("prg1_axi", "ipu2_podf", base + 0x80, 26, &share_count_prg1); + hws[IMX6QDL_CLK_PRG0_APB] = imx_clk_hw_gate2_shared("prg0_apb", "ipg", base + 0x80, 24, &share_count_prg0); + hws[IMX6QDL_CLK_PRG1_APB] = imx_clk_hw_gate2_shared("prg1_apb", "ipg", base + 0x80, 26, &share_count_prg1); } - clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); - clk[IMX6QDL_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); + hws[IMX6QDL_CLK_CKO1] = imx_clk_hw_gate("cko1", "cko1_podf", base + 0x60, 7); + hws[IMX6QDL_CLK_CKO2] = imx_clk_hw_gate("cko2", "cko2_podf", base + 0x60, 24); /* * The gpt_3m clock is not available on i.MX6Q TO1.0. Let's point it * to clock gpt_ipg_per to ease the gpt driver code. */ if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) - clk[IMX6QDL_CLK_GPT_3M] = clk[IMX6QDL_CLK_GPT_IPG_PER]; + hws[IMX6QDL_CLK_GPT_3M] = hws[IMX6QDL_CLK_GPT_IPG_PER]; - imx_check_clocks(clk, ARRAY_SIZE(clk)); + imx_check_clk_hws(hws, IMX6QDL_CLK_END); - clk_data.clks = clk; - clk_data.clk_num = ARRAY_SIZE(clk); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); - clk_register_clkdev(clk[IMX6QDL_CLK_ENET_REF], "enet_ref", NULL); + clk_hw_register_clkdev(hws[IMX6QDL_CLK_ENET_REF], "enet_ref", NULL); - clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000); + clk_set_rate(hws[IMX6QDL_CLK_PLL3_PFD1_540M]->clk, 540000000); if (clk_on_imx6dl()) - clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]); + clk_set_parent(hws[IMX6QDL_CLK_IPU1_SEL]->clk, hws[IMX6QDL_CLK_PLL3_PFD1_540M]->clk); - clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); - clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); - clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); - clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]); - clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_SEL], clk[IMX6QDL_CLK_IPU1_DI0_PRE]); - clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_SEL], clk[IMX6QDL_CLK_IPU1_DI1_PRE]); - clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_SEL], clk[IMX6QDL_CLK_IPU2_DI0_PRE]); - clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_SEL], clk[IMX6QDL_CLK_IPU2_DI1_PRE]); + clk_set_parent(hws[IMX6QDL_CLK_IPU1_DI0_PRE_SEL]->clk, hws[IMX6QDL_CLK_PLL5_VIDEO_DIV]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU1_DI1_PRE_SEL]->clk, hws[IMX6QDL_CLK_PLL5_VIDEO_DIV]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU2_DI0_PRE_SEL]->clk, hws[IMX6QDL_CLK_PLL5_VIDEO_DIV]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU2_DI1_PRE_SEL]->clk, hws[IMX6QDL_CLK_PLL5_VIDEO_DIV]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU1_DI0_SEL]->clk, hws[IMX6QDL_CLK_IPU1_DI0_PRE]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU1_DI1_SEL]->clk, hws[IMX6QDL_CLK_IPU1_DI1_PRE]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU2_DI0_SEL]->clk, hws[IMX6QDL_CLK_IPU2_DI0_PRE]->clk); + clk_set_parent(hws[IMX6QDL_CLK_IPU2_DI1_SEL]->clk, hws[IMX6QDL_CLK_IPU2_DI1_PRE]->clk); /* * The gpmi needs 100MHz frequency in the EDO/Sync mode, * We can not get the 100MHz from the pll2_pfd0_352m. * So choose pll2_pfd2_396m as enfc_sel's parent. */ - clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]); + clk_set_parent(hws[IMX6QDL_CLK_ENFC_SEL]->clk, hws[IMX6QDL_CLK_PLL2_PFD2_396M]->clk); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { - clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]); - clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]); + clk_prepare_enable(hws[IMX6QDL_CLK_USBPHY1_GATE]->clk); + clk_prepare_enable(hws[IMX6QDL_CLK_USBPHY2_GATE]->clk); } /* * Let's initially set up CLKO with OSC24M, since this configuration * is widely used by imx6q board designs to clock audio codec. */ - ret = clk_set_parent(clk[IMX6QDL_CLK_CKO2_SEL], clk[IMX6QDL_CLK_OSC]); + ret = clk_set_parent(hws[IMX6QDL_CLK_CKO2_SEL]->clk, hws[IMX6QDL_CLK_OSC]->clk); if (!ret) - ret = clk_set_parent(clk[IMX6QDL_CLK_CKO], clk[IMX6QDL_CLK_CKO2]); + ret = clk_set_parent(hws[IMX6QDL_CLK_CKO]->clk, hws[IMX6QDL_CLK_CKO2]->clk); if (ret) pr_warn("failed to set up CLKO: %d\n", ret); /* Audio-related clocks configuration */ - clk_set_parent(clk[IMX6QDL_CLK_SPDIF_SEL], clk[IMX6QDL_CLK_PLL3_PFD3_454M]); + clk_set_parent(hws[IMX6QDL_CLK_SPDIF_SEL]->clk, hws[IMX6QDL_CLK_PLL3_PFD3_454M]->clk); /* All existing boards with PCIe use LVDS1 */ if (IS_ENABLED(CONFIG_PCI_IMX6)) - clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]); + clk_set_parent(hws[IMX6QDL_CLK_LVDS1_SEL]->clk, hws[IMX6QDL_CLK_SATA_REF_100M]->clk); /* * Initialize the GPU clock muxes, so that the maximum specified clock * rates for the respective SoC are not exceeded. */ if (clk_on_imx6dl()) { - clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], - clk[IMX6QDL_CLK_PLL2_PFD1_594M]); - clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], - clk[IMX6QDL_CLK_PLL2_PFD1_594M]); + clk_set_parent(hws[IMX6QDL_CLK_GPU3D_CORE_SEL]->clk, + hws[IMX6QDL_CLK_PLL2_PFD1_594M]->clk); + clk_set_parent(hws[IMX6QDL_CLK_GPU2D_CORE_SEL]->clk, + hws[IMX6QDL_CLK_PLL2_PFD1_594M]->clk); } else if (clk_on_imx6q()) { - clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], - clk[IMX6QDL_CLK_MMDC_CH0_AXI]); - clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL], - clk[IMX6QDL_CLK_PLL2_PFD1_594M]); - clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], - clk[IMX6QDL_CLK_PLL3_USB_OTG]); + clk_set_parent(hws[IMX6QDL_CLK_GPU3D_CORE_SEL]->clk, + hws[IMX6QDL_CLK_MMDC_CH0_AXI]->clk); + clk_set_parent(hws[IMX6QDL_CLK_GPU3D_SHADER_SEL]->clk, + hws[IMX6QDL_CLK_PLL2_PFD1_594M]->clk); + clk_set_parent(hws[IMX6QDL_CLK_GPU2D_CORE_SEL]->clk, + hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk); + } + + for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) { + int index = uart_clk_ids[i]; + + uart_clks[i] = &hws[index]->clk; } imx_register_uart_clocks(uart_clks); diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index f9f1f8a95d92..4bd44d89eaaa 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -13,8 +13,6 @@ #include "clk.h" -#define CCDR 0x4 -#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17) #define CCSR 0xc #define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2) #define CACRR 0x10 @@ -97,8 +95,8 @@ static unsigned int share_count_ssi2; static unsigned int share_count_ssi3; static unsigned int share_count_spdif; -static struct clk *clks[IMX6SL_CLK_END]; -static struct clk_onecell_data clk_data; +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; static void __iomem *ccm_base; static void __iomem *anatop_base; @@ -179,74 +177,84 @@ void imx6sl_set_wait_clk(bool enter) imx6sl_enable_pll_arm(false); } -static struct clk ** const uart_clks[] __initconst = { - &clks[IMX6SL_CLK_UART], - &clks[IMX6SL_CLK_UART_SERIAL], - NULL +static const int uart_clk_ids[] __initconst = { + IMX6SL_CLK_UART, + IMX6SL_CLK_UART_SERIAL, }; +static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata; + static void __init imx6sl_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; int ret; + int i; + + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX6SL_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; + + clk_hw_data->num = IMX6SL_CLK_END; + hws = clk_hw_data->hws; - clks[IMX6SL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); - clks[IMX6SL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0); - clks[IMX6SL_CLK_OSC] = imx_obtain_fixed_clock("osc", 0); + hws[IMX6SL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); + hws[IMX6SL_CLK_CKIL] = imx_obtain_fixed_clock_hw("ckil", 0); + hws[IMX6SL_CLK_OSC] = imx_obtain_fixed_clock_hw("osc", 0); /* Clock source from external clock via CLK1 PAD */ - clks[IMX6SL_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0); + hws[IMX6SL_CLK_ANACLK1] = imx_obtain_fixed_clock_hw("anaclk1", 0); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop"); base = of_iomap(np, 0); WARN_ON(!base); anatop_base = base; - clks[IMX6SL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SL_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clks[IMX6SL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); - clks[IMX6SL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); - clks[IMX6SL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); - clks[IMX6SL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); - clks[IMX6SL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); - clks[IMX6SL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); - clks[IMX6SL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); - - clks[IMX6SL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + hws[IMX6SL_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + hws[IMX6SL_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + hws[IMX6SL_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + hws[IMX6SL_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + hws[IMX6SL_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + hws[IMX6SL_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); + + hws[IMX6SL_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); /* Do not bypass PLLs initially */ - clk_set_parent(clks[IMX6SL_PLL1_BYPASS], clks[IMX6SL_CLK_PLL1]); - clk_set_parent(clks[IMX6SL_PLL2_BYPASS], clks[IMX6SL_CLK_PLL2]); - clk_set_parent(clks[IMX6SL_PLL3_BYPASS], clks[IMX6SL_CLK_PLL3]); - clk_set_parent(clks[IMX6SL_PLL4_BYPASS], clks[IMX6SL_CLK_PLL4]); - clk_set_parent(clks[IMX6SL_PLL5_BYPASS], clks[IMX6SL_CLK_PLL5]); - clk_set_parent(clks[IMX6SL_PLL6_BYPASS], clks[IMX6SL_CLK_PLL6]); - clk_set_parent(clks[IMX6SL_PLL7_BYPASS], clks[IMX6SL_CLK_PLL7]); - - clks[IMX6SL_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); - clks[IMX6SL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); - clks[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); - clks[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); - clks[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); - clks[IMX6SL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); - clks[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); - - clks[IMX6SL_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); - clks[IMX6SL_CLK_LVDS1_OUT] = imx_clk_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12)); - clks[IMX6SL_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); + clk_set_parent(hws[IMX6SL_PLL1_BYPASS]->clk, hws[IMX6SL_CLK_PLL1]->clk); + clk_set_parent(hws[IMX6SL_PLL2_BYPASS]->clk, hws[IMX6SL_CLK_PLL2]->clk); + clk_set_parent(hws[IMX6SL_PLL3_BYPASS]->clk, hws[IMX6SL_CLK_PLL3]->clk); + clk_set_parent(hws[IMX6SL_PLL4_BYPASS]->clk, hws[IMX6SL_CLK_PLL4]->clk); + clk_set_parent(hws[IMX6SL_PLL5_BYPASS]->clk, hws[IMX6SL_CLK_PLL5]->clk); + clk_set_parent(hws[IMX6SL_PLL6_BYPASS]->clk, hws[IMX6SL_CLK_PLL6]->clk); + clk_set_parent(hws[IMX6SL_PLL7_BYPASS]->clk, hws[IMX6SL_CLK_PLL7]->clk); + + hws[IMX6SL_CLK_PLL1_SYS] = imx_clk_hw_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); + hws[IMX6SL_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + hws[IMX6SL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + hws[IMX6SL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + hws[IMX6SL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + hws[IMX6SL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + hws[IMX6SL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + + hws[IMX6SL_CLK_LVDS1_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); + hws[IMX6SL_CLK_LVDS1_OUT] = imx_clk_hw_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12)); + hws[IMX6SL_CLK_LVDS1_IN] = imx_clk_hw_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); /* * usbphy1 and usbphy2 are implemented as dummy gates using reserve @@ -255,32 +263,32 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) * turned on during boot, and software will not need to control it * anymore after that. */ - clks[IMX6SL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); - clks[IMX6SL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); - clks[IMX6SL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); - clks[IMX6SL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); + hws[IMX6SL_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + hws[IMX6SL_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + hws[IMX6SL_CLK_USBPHY1_GATE] = imx_clk_hw_gate("usbphy1_gate", "dummy", base + 0x10, 6); + hws[IMX6SL_CLK_USBPHY2_GATE] = imx_clk_hw_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* dev name parent_name flags reg shift width div: flags, div_table lock */ - clks[IMX6SL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); - clks[IMX6SL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); - clks[IMX6SL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); + hws[IMX6SL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); + hws[IMX6SL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); + hws[IMX6SL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); + hws[IMX6SL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); + hws[IMX6SL_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); /* name parent_name reg idx */ - clks[IMX6SL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0", "pll2_bus", base + 0x100, 0); - clks[IMX6SL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", base + 0x100, 1); - clks[IMX6SL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", base + 0x100, 2); - clks[IMX6SL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0", "pll3_usb_otg", base + 0xf0, 0); - clks[IMX6SL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", base + 0xf0, 1); - clks[IMX6SL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", base + 0xf0, 2); - clks[IMX6SL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", base + 0xf0, 3); + hws[IMX6SL_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0", "pll2_bus", base + 0x100, 0); + hws[IMX6SL_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1", "pll2_bus", base + 0x100, 1); + hws[IMX6SL_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2", "pll2_bus", base + 0x100, 2); + hws[IMX6SL_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0", "pll3_usb_otg", base + 0xf0, 0); + hws[IMX6SL_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1", "pll3_usb_otg", base + 0xf0, 1); + hws[IMX6SL_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2", "pll3_usb_otg", base + 0xf0, 2); + hws[IMX6SL_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ - clks[IMX6SL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2); - clks[IMX6SL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); - clks[IMX6SL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); - clks[IMX6SL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + hws[IMX6SL_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2", 1, 2); + hws[IMX6SL_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); + hws[IMX6SL_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + hws[IMX6SL_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); np = ccm_node; base = of_iomap(np, 0); @@ -288,157 +296,160 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) ccm_base = base; /* name reg shift width parent_names num_parents */ - clks[IMX6SL_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); - clks[IMX6SL_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); - clks[IMX6SL_CLK_OCRAM_ALT_SEL] = imx_clk_mux("ocram_alt_sel", base + 0x14, 7, 1, ocram_alt_sels, ARRAY_SIZE(ocram_alt_sels)); - clks[IMX6SL_CLK_OCRAM_SEL] = imx_clk_mux("ocram_sel", base + 0x14, 6, 1, ocram_sels, ARRAY_SIZE(ocram_sels)); - clks[IMX6SL_CLK_PRE_PERIPH2_SEL] = imx_clk_mux("pre_periph2_sel", base + 0x18, 21, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); - clks[IMX6SL_CLK_PRE_PERIPH_SEL] = imx_clk_mux("pre_periph_sel", base + 0x18, 18, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); - clks[IMX6SL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); - clks[IMX6SL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clks[IMX6SL_CLK_CSI_SEL] = imx_clk_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); - clks[IMX6SL_CLK_LCDIF_AXI_SEL] = imx_clk_mux("lcdif_axi_sel", base + 0x3c, 14, 2, lcdif_axi_sels, ARRAY_SIZE(lcdif_axi_sels)); - clks[IMX6SL_CLK_USDHC1_SEL] = imx_clk_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_USDHC2_SEL] = imx_clk_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_SSI1_SEL] = imx_clk_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_SSI2_SEL] = imx_clk_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_SSI3_SEL] = imx_clk_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_PERCLK_SEL] = imx_clk_fixup_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels), imx_cscmr1_fixup); - clks[IMX6SL_CLK_PXP_AXI_SEL] = imx_clk_mux("pxp_axi_sel", base + 0x34, 6, 3, pxp_axi_sels, ARRAY_SIZE(pxp_axi_sels)); - clks[IMX6SL_CLK_EPDC_AXI_SEL] = imx_clk_mux("epdc_axi_sel", base + 0x34, 15, 3, epdc_axi_sels, ARRAY_SIZE(epdc_axi_sels)); - clks[IMX6SL_CLK_GPU2D_OVG_SEL] = imx_clk_mux("gpu2d_ovg_sel", base + 0x18, 4, 2, gpu2d_ovg_sels, ARRAY_SIZE(gpu2d_ovg_sels)); - clks[IMX6SL_CLK_GPU2D_SEL] = imx_clk_mux("gpu2d_sel", base + 0x18, 8, 2, gpu2d_sels, ARRAY_SIZE(gpu2d_sels)); - clks[IMX6SL_CLK_LCDIF_PIX_SEL] = imx_clk_mux("lcdif_pix_sel", base + 0x38, 6, 3, lcdif_pix_sels, ARRAY_SIZE(lcdif_pix_sels)); - clks[IMX6SL_CLK_EPDC_PIX_SEL] = imx_clk_mux("epdc_pix_sel", base + 0x38, 15, 3, epdc_pix_sels, ARRAY_SIZE(epdc_pix_sels)); - clks[IMX6SL_CLK_SPDIF0_SEL] = imx_clk_mux("spdif0_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SL_CLK_SPDIF1_SEL] = imx_clk_mux("spdif1_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - clks[IMX6SL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + hws[IMX6SL_CLK_STEP] = imx_clk_hw_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + hws[IMX6SL_CLK_PLL1_SW] = imx_clk_hw_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); + hws[IMX6SL_CLK_OCRAM_ALT_SEL] = imx_clk_hw_mux("ocram_alt_sel", base + 0x14, 7, 1, ocram_alt_sels, ARRAY_SIZE(ocram_alt_sels)); + hws[IMX6SL_CLK_OCRAM_SEL] = imx_clk_hw_mux("ocram_sel", base + 0x14, 6, 1, ocram_sels, ARRAY_SIZE(ocram_sels)); + hws[IMX6SL_CLK_PRE_PERIPH2_SEL] = imx_clk_hw_mux("pre_periph2_sel", base + 0x18, 21, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); + hws[IMX6SL_CLK_PRE_PERIPH_SEL] = imx_clk_hw_mux("pre_periph_sel", base + 0x18, 18, 2, pre_periph_sels, ARRAY_SIZE(pre_periph_sels)); + hws[IMX6SL_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + hws[IMX6SL_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + hws[IMX6SL_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); + hws[IMX6SL_CLK_LCDIF_AXI_SEL] = imx_clk_hw_mux("lcdif_axi_sel", base + 0x3c, 14, 2, lcdif_axi_sels, ARRAY_SIZE(lcdif_axi_sels)); + hws[IMX6SL_CLK_USDHC1_SEL] = imx_clk_hw_fixup_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_USDHC2_SEL] = imx_clk_hw_fixup_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_USDHC3_SEL] = imx_clk_hw_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_USDHC4_SEL] = imx_clk_hw_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_SSI1_SEL] = imx_clk_hw_fixup_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_SSI2_SEL] = imx_clk_hw_fixup_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_SSI3_SEL] = imx_clk_hw_fixup_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_PERCLK_SEL] = imx_clk_hw_fixup_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels), imx_cscmr1_fixup); + hws[IMX6SL_CLK_PXP_AXI_SEL] = imx_clk_hw_mux("pxp_axi_sel", base + 0x34, 6, 3, pxp_axi_sels, ARRAY_SIZE(pxp_axi_sels)); + hws[IMX6SL_CLK_EPDC_AXI_SEL] = imx_clk_hw_mux("epdc_axi_sel", base + 0x34, 15, 3, epdc_axi_sels, ARRAY_SIZE(epdc_axi_sels)); + hws[IMX6SL_CLK_GPU2D_OVG_SEL] = imx_clk_hw_mux("gpu2d_ovg_sel", base + 0x18, 4, 2, gpu2d_ovg_sels, ARRAY_SIZE(gpu2d_ovg_sels)); + hws[IMX6SL_CLK_GPU2D_SEL] = imx_clk_hw_mux("gpu2d_sel", base + 0x18, 8, 2, gpu2d_sels, ARRAY_SIZE(gpu2d_sels)); + hws[IMX6SL_CLK_LCDIF_PIX_SEL] = imx_clk_hw_mux("lcdif_pix_sel", base + 0x38, 6, 3, lcdif_pix_sels, ARRAY_SIZE(lcdif_pix_sels)); + hws[IMX6SL_CLK_EPDC_PIX_SEL] = imx_clk_hw_mux("epdc_pix_sel", base + 0x38, 15, 3, epdc_pix_sels, ARRAY_SIZE(epdc_pix_sels)); + hws[IMX6SL_CLK_SPDIF0_SEL] = imx_clk_hw_mux("spdif0_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SL_CLK_SPDIF1_SEL] = imx_clk_hw_mux("spdif1_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SL_CLK_EXTERN_AUDIO_SEL] = imx_clk_hw_mux("extern_audio_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + hws[IMX6SL_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); /* name reg shift width busy: reg, shift parent_names num_parents */ - clks[IMX6SL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); - clks[IMX6SL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + hws[IMX6SL_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + hws[IMX6SL_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width */ - clks[IMX6SL_CLK_OCRAM_PODF] = imx_clk_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0); - clks[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3); - clks[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3); - clks[IMX6SL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); - clks[IMX6SL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); - clks[IMX6SL_CLK_LCDIF_AXI_PODF] = imx_clk_divider("lcdif_axi_podf", "lcdif_axi_sel", base + 0x3c, 16, 3); - clks[IMX6SL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); - clks[IMX6SL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); - clks[IMX6SL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); - clks[IMX6SL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); - clks[IMX6SL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); - clks[IMX6SL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); - clks[IMX6SL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); - clks[IMX6SL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); - clks[IMX6SL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); - clks[IMX6SL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); - clks[IMX6SL_CLK_PERCLK] = imx_clk_fixup_divider("perclk", "perclk_sel", base + 0x1c, 0, 6, imx_cscmr1_fixup); - clks[IMX6SL_CLK_PXP_AXI_PODF] = imx_clk_divider("pxp_axi_podf", "pxp_axi_sel", base + 0x34, 3, 3); - clks[IMX6SL_CLK_EPDC_AXI_PODF] = imx_clk_divider("epdc_axi_podf", "epdc_axi_sel", base + 0x34, 12, 3); - clks[IMX6SL_CLK_GPU2D_OVG_PODF] = imx_clk_divider("gpu2d_ovg_podf", "gpu2d_ovg_sel", base + 0x18, 26, 3); - clks[IMX6SL_CLK_GPU2D_PODF] = imx_clk_divider("gpu2d_podf", "gpu2d_sel", base + 0x18, 29, 3); - clks[IMX6SL_CLK_LCDIF_PIX_PRED] = imx_clk_divider("lcdif_pix_pred", "lcdif_pix_sel", base + 0x38, 3, 3); - clks[IMX6SL_CLK_EPDC_PIX_PRED] = imx_clk_divider("epdc_pix_pred", "epdc_pix_sel", base + 0x38, 12, 3); - clks[IMX6SL_CLK_LCDIF_PIX_PODF] = imx_clk_fixup_divider("lcdif_pix_podf", "lcdif_pix_pred", base + 0x1c, 20, 3, imx_cscmr1_fixup); - clks[IMX6SL_CLK_EPDC_PIX_PODF] = imx_clk_divider("epdc_pix_podf", "epdc_pix_pred", base + 0x18, 23, 3); - clks[IMX6SL_CLK_SPDIF0_PRED] = imx_clk_divider("spdif0_pred", "spdif0_sel", base + 0x30, 25, 3); - clks[IMX6SL_CLK_SPDIF0_PODF] = imx_clk_divider("spdif0_podf", "spdif0_pred", base + 0x30, 22, 3); - clks[IMX6SL_CLK_SPDIF1_PRED] = imx_clk_divider("spdif1_pred", "spdif1_sel", base + 0x30, 12, 3); - clks[IMX6SL_CLK_SPDIF1_PODF] = imx_clk_divider("spdif1_podf", "spdif1_pred", base + 0x30, 9, 3); - clks[IMX6SL_CLK_EXTERN_AUDIO_PRED] = imx_clk_divider("extern_audio_pred", "extern_audio_sel", base + 0x28, 9, 3); - clks[IMX6SL_CLK_EXTERN_AUDIO_PODF] = imx_clk_divider("extern_audio_podf", "extern_audio_pred", base + 0x28, 25, 3); - clks[IMX6SL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6); - clks[IMX6SL_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_sel", base + 0x24, 0, 6); + hws[IMX6SL_CLK_OCRAM_PODF] = imx_clk_hw_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0); + hws[IMX6SL_CLK_PERIPH_CLK2_PODF] = imx_clk_hw_divider("periph_clk2_podf", "periph_clk2_sel", base + 0x14, 27, 3); + hws[IMX6SL_CLK_PERIPH2_CLK2_PODF] = imx_clk_hw_divider("periph2_clk2_podf", "periph2_clk2_sel", base + 0x14, 0, 3); + hws[IMX6SL_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2); + hws[IMX6SL_CLK_CSI_PODF] = imx_clk_hw_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); + hws[IMX6SL_CLK_LCDIF_AXI_PODF] = imx_clk_hw_divider("lcdif_axi_podf", "lcdif_axi_sel", base + 0x3c, 16, 3); + hws[IMX6SL_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + hws[IMX6SL_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + hws[IMX6SL_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); + hws[IMX6SL_CLK_USDHC4_PODF] = imx_clk_hw_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); + hws[IMX6SL_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); + hws[IMX6SL_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); + hws[IMX6SL_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); + hws[IMX6SL_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); + hws[IMX6SL_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); + hws[IMX6SL_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); + hws[IMX6SL_CLK_PERCLK] = imx_clk_hw_fixup_divider("perclk", "perclk_sel", base + 0x1c, 0, 6, imx_cscmr1_fixup); + hws[IMX6SL_CLK_PXP_AXI_PODF] = imx_clk_hw_divider("pxp_axi_podf", "pxp_axi_sel", base + 0x34, 3, 3); + hws[IMX6SL_CLK_EPDC_AXI_PODF] = imx_clk_hw_divider("epdc_axi_podf", "epdc_axi_sel", base + 0x34, 12, 3); + hws[IMX6SL_CLK_GPU2D_OVG_PODF] = imx_clk_hw_divider("gpu2d_ovg_podf", "gpu2d_ovg_sel", base + 0x18, 26, 3); + hws[IMX6SL_CLK_GPU2D_PODF] = imx_clk_hw_divider("gpu2d_podf", "gpu2d_sel", base + 0x18, 29, 3); + hws[IMX6SL_CLK_LCDIF_PIX_PRED] = imx_clk_hw_divider("lcdif_pix_pred", "lcdif_pix_sel", base + 0x38, 3, 3); + hws[IMX6SL_CLK_EPDC_PIX_PRED] = imx_clk_hw_divider("epdc_pix_pred", "epdc_pix_sel", base + 0x38, 12, 3); + hws[IMX6SL_CLK_LCDIF_PIX_PODF] = imx_clk_hw_fixup_divider("lcdif_pix_podf", "lcdif_pix_pred", base + 0x1c, 20, 3, imx_cscmr1_fixup); + hws[IMX6SL_CLK_EPDC_PIX_PODF] = imx_clk_hw_divider("epdc_pix_podf", "epdc_pix_pred", base + 0x18, 23, 3); + hws[IMX6SL_CLK_SPDIF0_PRED] = imx_clk_hw_divider("spdif0_pred", "spdif0_sel", base + 0x30, 25, 3); + hws[IMX6SL_CLK_SPDIF0_PODF] = imx_clk_hw_divider("spdif0_podf", "spdif0_pred", base + 0x30, 22, 3); + hws[IMX6SL_CLK_SPDIF1_PRED] = imx_clk_hw_divider("spdif1_pred", "spdif1_sel", base + 0x30, 12, 3); + hws[IMX6SL_CLK_SPDIF1_PODF] = imx_clk_hw_divider("spdif1_podf", "spdif1_pred", base + 0x30, 9, 3); + hws[IMX6SL_CLK_EXTERN_AUDIO_PRED] = imx_clk_hw_divider("extern_audio_pred", "extern_audio_sel", base + 0x28, 9, 3); + hws[IMX6SL_CLK_EXTERN_AUDIO_PODF] = imx_clk_hw_divider("extern_audio_podf", "extern_audio_pred", base + 0x28, 25, 3); + hws[IMX6SL_CLK_ECSPI_ROOT] = imx_clk_hw_divider("ecspi_root", "ecspi_sel", base + 0x38, 19, 6); + hws[IMX6SL_CLK_UART_ROOT] = imx_clk_hw_divider("uart_root", "uart_sel", base + 0x24, 0, 6); /* name parent_name reg shift width busy: reg, shift */ - clks[IMX6SL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); - clks[IMX6SL_CLK_MMDC_ROOT] = imx_clk_busy_divider("mmdc", "periph2", base + 0x14, 3, 3, base + 0x48, 2); - clks[IMX6SL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + hws[IMX6SL_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + hws[IMX6SL_CLK_MMDC_ROOT] = imx_clk_hw_busy_divider("mmdc", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6SL_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); /* name parent_name reg shift */ - clks[IMX6SL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); - clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); - clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); - clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); - clks[IMX6SL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10); - clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); - clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); - clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16); - clks[IMX6SL_CLK_GPT] = imx_clk_gate2("gpt", "perclk", base + 0x6c, 20); - clks[IMX6SL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22); - clks[IMX6SL_CLK_GPU2D_OVG] = imx_clk_gate2("gpu2d_ovg", "gpu2d_ovg_podf", base + 0x6c, 26); - clks[IMX6SL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); - clks[IMX6SL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); - clks[IMX6SL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); - clks[IMX6SL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); - clks[IMX6SL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x74, 0); - clks[IMX6SL_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "pxp_axi_podf", base + 0x74, 2); - clks[IMX6SL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_axi", "epdc_axi_podf", base + 0x74, 4); - clks[IMX6SL_CLK_LCDIF_AXI] = imx_clk_gate2("lcdif_axi", "lcdif_axi_podf", base + 0x74, 6); - clks[IMX6SL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_pix_podf", base + 0x74, 8); - clks[IMX6SL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_pix_podf", base + 0x74, 10); - clks[IMX6SL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); - clks[IMX6SL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); - clks[IMX6SL_CLK_OCRAM] = imx_clk_gate2("ocram", "ocram_podf", base + 0x74, 28); - clks[IMX6SL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); - clks[IMX6SL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); - clks[IMX6SL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); - clks[IMX6SL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); - clks[IMX6SL_CLK_SDMA] = imx_clk_gate2("sdma", "ipg", base + 0x7c, 6); - clks[IMX6SL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); - clks[IMX6SL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif0_podf", base + 0x7c, 14, &share_count_spdif); - clks[IMX6SL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif); - clks[IMX6SL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SL_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SL_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SL_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SL_CLK_UART] = imx_clk_gate2("uart", "ipg", base + 0x7c, 24); - clks[IMX6SL_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_root", base + 0x7c, 26); - clks[IMX6SL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); - clks[IMX6SL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); - clks[IMX6SL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); - clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); - clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); + hws[IMX6SL_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_root", base + 0x6c, 0); + hws[IMX6SL_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); + hws[IMX6SL_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); + hws[IMX6SL_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); + hws[IMX6SL_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x6c, 10); + hws[IMX6SL_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12); + hws[IMX6SL_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14); + hws[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_hw_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16); + hws[IMX6SL_CLK_GPT] = imx_clk_hw_gate2("gpt", "perclk", base + 0x6c, 20); + hws[IMX6SL_CLK_GPT_SERIAL] = imx_clk_hw_gate2("gpt_serial", "perclk", base + 0x6c, 22); + hws[IMX6SL_CLK_GPU2D_OVG] = imx_clk_hw_gate2("gpu2d_ovg", "gpu2d_ovg_podf", base + 0x6c, 26); + hws[IMX6SL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6); + hws[IMX6SL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8); + hws[IMX6SL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10); + hws[IMX6SL_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12); + hws[IMX6SL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x74, 0); + hws[IMX6SL_CLK_PXP_AXI] = imx_clk_hw_gate2("pxp_axi", "pxp_axi_podf", base + 0x74, 2); + hws[IMX6SL_CLK_EPDC_AXI] = imx_clk_hw_gate2("epdc_axi", "epdc_axi_podf", base + 0x74, 4); + hws[IMX6SL_CLK_LCDIF_AXI] = imx_clk_hw_gate2("lcdif_axi", "lcdif_axi_podf", base + 0x74, 6); + hws[IMX6SL_CLK_LCDIF_PIX] = imx_clk_hw_gate2("lcdif_pix", "lcdif_pix_podf", base + 0x74, 8); + hws[IMX6SL_CLK_EPDC_PIX] = imx_clk_hw_gate2("epdc_pix", "epdc_pix_podf", base + 0x74, 10); + hws[IMX6SL_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + hws[IMX6SL_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); + hws[IMX6SL_CLK_OCRAM] = imx_clk_hw_gate2("ocram", "ocram_podf", base + 0x74, 28); + hws[IMX6SL_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16); + hws[IMX6SL_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18); + hws[IMX6SL_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20); + hws[IMX6SL_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22); + hws[IMX6SL_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ipg", base + 0x7c, 6); + hws[IMX6SL_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12); + hws[IMX6SL_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif0_podf", base + 0x7c, 14, &share_count_spdif); + hws[IMX6SL_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif); + hws[IMX6SL_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SL_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SL_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SL_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SL_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SL_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SL_CLK_UART] = imx_clk_hw_gate2("uart", "ipg", base + 0x7c, 24); + hws[IMX6SL_CLK_UART_SERIAL] = imx_clk_hw_gate2("uart_serial", "uart_root", base + 0x7c, 26); + hws[IMX6SL_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0); + hws[IMX6SL_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + hws[IMX6SL_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + hws[IMX6SL_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); + hws[IMX6SL_CLK_USDHC4] = imx_clk_hw_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); /* Ensure the MMDC CH0 handshake is bypassed */ - writel_relaxed(readl_relaxed(base + CCDR) | - BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); + imx_mmdc_mask_handshake(base, 0); - imx_check_clocks(clks, ARRAY_SIZE(clks)); + imx_check_clk_hws(hws, IMX6SL_CLK_END); - clk_data.clks = clks; - clk_data.clk_num = ARRAY_SIZE(clks); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); /* Ensure the AHB clk is at 132MHz. */ - ret = clk_set_rate(clks[IMX6SL_CLK_AHB], 132000000); + ret = clk_set_rate(hws[IMX6SL_CLK_AHB]->clk, 132000000); if (ret) pr_warn("%s: failed to set AHB clock rate %d!\n", __func__, ret); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { - clk_prepare_enable(clks[IMX6SL_CLK_USBPHY1_GATE]); - clk_prepare_enable(clks[IMX6SL_CLK_USBPHY2_GATE]); + clk_prepare_enable(hws[IMX6SL_CLK_USBPHY1_GATE]->clk); + clk_prepare_enable(hws[IMX6SL_CLK_USBPHY2_GATE]->clk); } /* Audio-related clocks configuration */ - clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]); + clk_set_parent(hws[IMX6SL_CLK_SPDIF0_SEL]->clk, hws[IMX6SL_CLK_PLL3_PFD3]->clk); /* set PLL5 video as lcdif pix parent clock */ - clk_set_parent(clks[IMX6SL_CLK_LCDIF_PIX_SEL], - clks[IMX6SL_CLK_PLL5_VIDEO_DIV]); + clk_set_parent(hws[IMX6SL_CLK_LCDIF_PIX_SEL]->clk, + hws[IMX6SL_CLK_PLL5_VIDEO_DIV]->clk); - clk_set_parent(clks[IMX6SL_CLK_LCDIF_AXI_SEL], - clks[IMX6SL_CLK_PLL2_PFD2]); + clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk, + hws[IMX6SL_CLK_PLL2_PFD2]->clk); + + for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) { + int index = uart_clk_ids[i]; + + uart_clks[i] = &hws[index]->clk; + } imx_register_uart_clocks(uart_clks); } diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c index 7eea448cb9a9..5f3e92c09a5e 100644 --- a/drivers/clk/imx/clk-imx6sll.c +++ b/drivers/clk/imx/clk-imx6sll.c @@ -7,6 +7,7 @@ #include <dt-bindings/clock/imx6sll-clock.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -16,7 +17,6 @@ #include "clk.h" #define CCM_ANALOG_PLL_BYPASS (0x1 << 16) -#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16) #define xPLL_CLR(offset) (offset + 0x8) static const char *pll_bypass_src_sels[] = { "osc", "dummy", }; @@ -53,8 +53,8 @@ static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0 static const char *epdc_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", }; static const char *epdc_sels[] = { "epdc_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", }; -static struct clk *clks[IMX6SLL_CLK_END]; -static struct clk_onecell_data clk_data; +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; static const struct clk_div_table post_div_table[] = { { .val = 2, .div = 1, }, @@ -76,33 +76,43 @@ static u32 share_count_ssi1; static u32 share_count_ssi2; static u32 share_count_ssi3; -static struct clk ** const uart_clks[] __initconst = { - &clks[IMX6SLL_CLK_UART1_IPG], - &clks[IMX6SLL_CLK_UART1_SERIAL], - &clks[IMX6SLL_CLK_UART2_IPG], - &clks[IMX6SLL_CLK_UART2_SERIAL], - &clks[IMX6SLL_CLK_UART3_IPG], - &clks[IMX6SLL_CLK_UART3_SERIAL], - &clks[IMX6SLL_CLK_UART4_IPG], - &clks[IMX6SLL_CLK_UART4_SERIAL], - &clks[IMX6SLL_CLK_UART5_IPG], - &clks[IMX6SLL_CLK_UART5_SERIAL], - NULL +static const int uart_clk_ids[] __initconst = { + IMX6SLL_CLK_UART1_IPG, + IMX6SLL_CLK_UART1_SERIAL, + IMX6SLL_CLK_UART2_IPG, + IMX6SLL_CLK_UART2_SERIAL, + IMX6SLL_CLK_UART3_IPG, + IMX6SLL_CLK_UART3_SERIAL, + IMX6SLL_CLK_UART4_IPG, + IMX6SLL_CLK_UART4_SERIAL, + IMX6SLL_CLK_UART5_IPG, + IMX6SLL_CLK_UART5_SERIAL, }; +static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata; + static void __init imx6sll_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; + int i; + + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX6SLL_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; + + clk_hw_data->num = IMX6SLL_CLK_END; + hws = clk_hw_data->hws; - clks[IMX6SLL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); + hws[IMX6SLL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); - clks[IMX6SLL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); - clks[IMX6SLL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); + hws[IMX6SLL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil")); + hws[IMX6SLL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc")); /* ipp_di clock is external input */ - clks[IMX6SLL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0"); - clks[IMX6SLL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1"); + hws[IMX6SLL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0")); + hws[IMX6SLL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1")); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sll-anatop"); base = of_iomap(np, 0); @@ -118,37 +128,37 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node) writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xa0)); writel_relaxed(CCM_ANALOG_PLL_BYPASS, base + xPLL_CLR(0xe0)); - clks[IMX6SLL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SLL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - - clks[IMX6SLL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); - clks[IMX6SLL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); - clks[IMX6SLL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); - clks[IMX6SLL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); - clks[IMX6SLL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); - clks[IMX6SLL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); - clks[IMX6SLL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); - - clks[IMX6SLL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SLL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); - - clks[IMX6SLL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1); - clks[IMX6SLL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); - clks[IMX6SLL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); - clks[IMX6SLL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); - clks[IMX6SLL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); - clks[IMX6SLL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); - clks[IMX6SLL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + hws[IMX6SLL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SLL_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + + hws[IMX6SLL_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "pll1_bypass_src", base + 0x00, 0x7f); + hws[IMX6SLL_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1); + hws[IMX6SLL_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", base + 0x10, 0x3); + hws[IMX6SLL_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", base + 0x70, 0x7f); + hws[IMX6SLL_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "pll5_bypass_src", base + 0xa0, 0x7f); + hws[IMX6SLL_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "pll6_bypass_src", base + 0xe0, 0x3); + hws[IMX6SLL_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", base + 0x20, 0x3); + + hws[IMX6SLL_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SLL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + + hws[IMX6SLL_CLK_PLL1_SYS] = imx_clk_hw_fixed_factor("pll1_sys", "pll1_bypass", 1, 1); + hws[IMX6SLL_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + hws[IMX6SLL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + hws[IMX6SLL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + hws[IMX6SLL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + hws[IMX6SLL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + hws[IMX6SLL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); /* * Bit 20 is the reserved and read-only bit, we do this only for: @@ -156,209 +166,213 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node) * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework many need to enable/disable usbphy's parent */ - clks[IMX6SLL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); - clks[IMX6SLL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + hws[IMX6SLL_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + hws[IMX6SLL_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { - clks[IMX6SLL_CLK_USBPHY1_GATE] = imx_clk_gate_flags("usbphy1_gate", "dummy", base + 0x10, 6, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_USBPHY2_GATE] = imx_clk_gate_flags("usbphy2_gate", "dummy", base + 0x20, 6, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_USBPHY1_GATE] = imx_clk_hw_gate_flags("usbphy1_gate", "dummy", base + 0x10, 6, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_USBPHY2_GATE] = imx_clk_hw_gate_flags("usbphy2_gate", "dummy", base + 0x20, 6, CLK_IS_CRITICAL); } /* name parent_name reg idx */ - clks[IMX6SLL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); - clks[IMX6SLL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); - clks[IMX6SLL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); - clks[IMX6SLL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); - clks[IMX6SLL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); - clks[IMX6SLL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); - clks[IMX6SLL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); - clks[IMX6SLL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); - - clks[IMX6SLL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", + hws[IMX6SLL_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); + hws[IMX6SLL_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); + hws[IMX6SLL_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); + hws[IMX6SLL_CLK_PLL2_PFD3] = imx_clk_hw_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); + hws[IMX6SLL_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); + hws[IMX6SLL_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); + hws[IMX6SLL_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); + hws[IMX6SLL_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); + + hws[IMX6SLL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SLL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", + hws[IMX6SLL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock); - clks[IMX6SLL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", + hws[IMX6SLL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SLL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", + hws[IMX6SLL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); /* name parent_name mult div */ - clks[IMX6SLL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); - clks[IMX6SLL_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); - clks[IMX6SLL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); - clks[IMX6SLL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + hws[IMX6SLL_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); + hws[IMX6SLL_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); + hws[IMX6SLL_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + hws[IMX6SLL_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); - clks[IMX6SLL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels)); - clks[IMX6SLL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0); - clks[IMX6SLL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels)); - clks[IMX6SLL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0); - clks[IMX6SLL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clks[IMX6SLL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); - clks[IMX6SLL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clks[IMX6SLL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); - clks[IMX6SLL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SLL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SLL_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SLL_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SLL_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SLL_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SLL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); - clks[IMX6SLL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - clks[IMX6SLL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); - clks[IMX6SLL_CLK_EXTERN_AUDIO_SEL] = imx_clk_mux("extern_audio_sel", base + 0x30, 7, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); - clks[IMX6SLL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels)); - clks[IMX6SLL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels)); - clks[IMX6SLL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - clks[IMX6SLL_CLK_LCDIF_PRE_SEL] = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels)); - clks[IMX6SLL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); - - clks[IMX6SLL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); - clks[IMX6SLL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); - - clks[IMX6SLL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); - clks[IMX6SLL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); - clks[IMX6SLL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); - clks[IMX6SLL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3); - clks[IMX6SLL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); - clks[IMX6SLL_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); - clks[IMX6SLL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); - clks[IMX6SLL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); - clks[IMX6SLL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); - clks[IMX6SLL_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); - clks[IMX6SLL_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); - clks[IMX6SLL_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); - clks[IMX6SLL_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); - clks[IMX6SLL_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); - clks[IMX6SLL_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); - clks[IMX6SLL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); - clks[IMX6SLL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); - clks[IMX6SLL_CLK_EXTERN_AUDIO_PRED] = imx_clk_divider("extern_audio_pred", "extern_audio_sel", base + 0x30, 12, 3); - clks[IMX6SLL_CLK_EXTERN_AUDIO_PODF] = imx_clk_divider("extern_audio_podf", "extern_audio_pred", base + 0x30, 9, 3); - clks[IMX6SLL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3); - clks[IMX6SLL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); - clks[IMX6SLL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3); - - clks[IMX6SLL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); - clks[IMX6SLL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); - clks[IMX6SLL_CLK_AXI_PODF] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); - clks[IMX6SLL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); - - clks[IMX6SLL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); - clks[IMX6SLL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); - clks[IMX6SLL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); - clks[IMX6SLL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7); - - clks[IMX6SLL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels)); - clks[IMX6SLL_CLK_LDB_DI1_SEL] = imx_clk_mux("ldb_di1_sel", base + 0x1c, 7, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels)); - clks[IMX6SLL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); - clks[IMX6SLL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1_div_sel", base + 0x20, 10, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); + hws[IMX6SLL_CLK_STEP] = imx_clk_hw_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + hws[IMX6SLL_CLK_PLL1_SW] = imx_clk_hw_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0); + hws[IMX6SLL_CLK_AXI_ALT_SEL] = imx_clk_hw_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels)); + hws[IMX6SLL_CLK_AXI_SEL] = imx_clk_hw_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0); + hws[IMX6SLL_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + hws[IMX6SLL_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); + hws[IMX6SLL_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + hws[IMX6SLL_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + hws[IMX6SLL_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SLL_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SLL_CLK_USDHC3_SEL] = imx_clk_hw_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SLL_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SLL_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SLL_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SLL_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); + hws[IMX6SLL_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + hws[IMX6SLL_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); + hws[IMX6SLL_CLK_EXTERN_AUDIO_SEL] = imx_clk_hw_mux("extern_audio_sel", base + 0x30, 7, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); + hws[IMX6SLL_CLK_EPDC_PRE_SEL] = imx_clk_hw_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels)); + hws[IMX6SLL_CLK_EPDC_SEL] = imx_clk_hw_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels)); + hws[IMX6SLL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + hws[IMX6SLL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels)); + hws[IMX6SLL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); + + hws[IMX6SLL_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + hws[IMX6SLL_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + + hws[IMX6SLL_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); + hws[IMX6SLL_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); + hws[IMX6SLL_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2); + hws[IMX6SLL_CLK_LCDIF_PODF] = imx_clk_hw_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3); + hws[IMX6SLL_CLK_PERCLK] = imx_clk_hw_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); + hws[IMX6SLL_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); + hws[IMX6SLL_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + hws[IMX6SLL_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + hws[IMX6SLL_CLK_UART_PODF] = imx_clk_hw_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); + hws[IMX6SLL_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); + hws[IMX6SLL_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); + hws[IMX6SLL_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); + hws[IMX6SLL_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); + hws[IMX6SLL_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); + hws[IMX6SLL_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); + hws[IMX6SLL_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); + hws[IMX6SLL_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + hws[IMX6SLL_CLK_EXTERN_AUDIO_PRED] = imx_clk_hw_divider("extern_audio_pred", "extern_audio_sel", base + 0x30, 12, 3); + hws[IMX6SLL_CLK_EXTERN_AUDIO_PODF] = imx_clk_hw_divider("extern_audio_podf", "extern_audio_pred", base + 0x30, 9, 3); + hws[IMX6SLL_CLK_EPDC_PODF] = imx_clk_hw_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3); + hws[IMX6SLL_CLK_ECSPI_PODF] = imx_clk_hw_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); + hws[IMX6SLL_CLK_LCDIF_PRED] = imx_clk_hw_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3); + + hws[IMX6SLL_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + hws[IMX6SLL_CLK_MMDC_PODF] = imx_clk_hw_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6SLL_CLK_AXI_PODF] = imx_clk_hw_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); + hws[IMX6SLL_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + + hws[IMX6SLL_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + hws[IMX6SLL_CLK_LDB_DI0_DIV_7] = imx_clk_hw_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); + hws[IMX6SLL_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); + hws[IMX6SLL_CLK_LDB_DI1_DIV_7] = imx_clk_hw_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7); + + hws[IMX6SLL_CLK_LDB_DI0_SEL] = imx_clk_hw_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels)); + hws[IMX6SLL_CLK_LDB_DI1_SEL] = imx_clk_hw_mux("ldb_di1_sel", base + 0x1c, 7, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels)); + hws[IMX6SLL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); + hws[IMX6SLL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1_div_sel", base + 0x20, 10, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); /* CCGR0 */ - clks[IMX6SLL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_DCP] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10); - clks[IMX6SLL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28); - clks[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28); - clks[IMX6SLL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30); + hws[IMX6SLL_CLK_AIPSTZ1] = imx_clk_hw_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_AIPSTZ2] = imx_clk_hw_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_DCP] = imx_clk_hw_gate2("dcp", "ahb", base + 0x68, 10); + hws[IMX6SLL_CLK_UART2_IPG] = imx_clk_hw_gate2("uart2_ipg", "ipg", base + 0x68, 28); + hws[IMX6SLL_CLK_UART2_SERIAL] = imx_clk_hw_gate2("uart2_serial", "uart_podf", base + 0x68, 28); + hws[IMX6SLL_CLK_GPIO2] = imx_clk_hw_gate2("gpio2", "ipg", base + 0x68, 30); /* CCGR1 */ - clks[IMX6SLL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); - clks[IMX6SLL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); - clks[IMX6SLL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); - clks[IMX6SLL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); - clks[IMX6SLL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10); - clks[IMX6SLL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10); - clks[IMX6SLL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); - clks[IMX6SLL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); - clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20); - clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22); - clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24); - clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); - clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26); - clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30); + hws[IMX6SLL_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); + hws[IMX6SLL_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); + hws[IMX6SLL_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); + hws[IMX6SLL_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); + hws[IMX6SLL_CLK_UART3_IPG] = imx_clk_hw_gate2("uart3_ipg", "ipg", base + 0x6c, 10); + hws[IMX6SLL_CLK_UART3_SERIAL] = imx_clk_hw_gate2("uart3_serial", "uart_podf", base + 0x6c, 10); + hws[IMX6SLL_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12); + hws[IMX6SLL_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14); + hws[IMX6SLL_CLK_GPT_BUS] = imx_clk_hw_gate2("gpt1_bus", "perclk", base + 0x6c, 20); + hws[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_hw_gate2("gpt1_serial", "perclk", base + 0x6c, 22); + hws[IMX6SLL_CLK_UART4_IPG] = imx_clk_hw_gate2("uart4_ipg", "ipg", base + 0x6c, 24); + hws[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_hw_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); + hws[IMX6SLL_CLK_GPIO1] = imx_clk_hw_gate2("gpio1", "ipg", base + 0x6c, 26); + hws[IMX6SLL_CLK_GPIO5] = imx_clk_hw_gate2("gpio5", "ipg", base + 0x6c, 30); /* CCGR2 */ - clks[IMX6SLL_CLK_GPIO6] = imx_clk_gate2("gpio6", "ipg", base + 0x70, 0); - clks[IMX6SLL_CLK_CSI] = imx_clk_gate2("csi", "axi", base + 0x70, 2); - clks[IMX6SLL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); - clks[IMX6SLL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); - clks[IMX6SLL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); - clks[IMX6SLL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); - clks[IMX6SLL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26); - clks[IMX6SLL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28); - clks[IMX6SLL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30); + hws[IMX6SLL_CLK_GPIO6] = imx_clk_hw_gate2("gpio6", "ipg", base + 0x70, 0); + hws[IMX6SLL_CLK_CSI] = imx_clk_hw_gate2("csi", "axi", base + 0x70, 2); + hws[IMX6SLL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6); + hws[IMX6SLL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8); + hws[IMX6SLL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10); + hws[IMX6SLL_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12); + hws[IMX6SLL_CLK_GPIO3] = imx_clk_hw_gate2("gpio3", "ipg", base + 0x70, 26); + hws[IMX6SLL_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif_apb", "axi", base + 0x70, 28); + hws[IMX6SLL_CLK_PXP] = imx_clk_hw_gate2("pxp", "axi", base + 0x70, 30); /* CCGR3 */ - clks[IMX6SLL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2); - clks[IMX6SLL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2); - clks[IMX6SLL_CLK_EPDC_AXI] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4); - clks[IMX6SLL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4); - clks[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10); - clks[IMX6SLL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12); - clks[IMX6SLL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16); - clks[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); - clks[IMX6SLL_CLK_OCRAM] = imx_clk_gate_flags("ocram","ahb", base + 0x74, 28, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_UART5_IPG] = imx_clk_hw_gate2("uart5_ipg", "ipg", base + 0x74, 2); + hws[IMX6SLL_CLK_UART5_SERIAL] = imx_clk_hw_gate2("uart5_serial", "uart_podf", base + 0x74, 2); + hws[IMX6SLL_CLK_EPDC_AXI] = imx_clk_hw_gate2("epdc_aclk", "axi", base + 0x74, 4); + hws[IMX6SLL_CLK_EPDC_PIX] = imx_clk_hw_gate2("epdc_pix", "epdc_podf", base + 0x74, 4); + hws[IMX6SLL_CLK_LCDIF_PIX] = imx_clk_hw_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10); + hws[IMX6SLL_CLK_GPIO4] = imx_clk_hw_gate2("gpio4", "ipg", base + 0x74, 12); + hws[IMX6SLL_CLK_WDOG1] = imx_clk_hw_gate2("wdog1", "ipg", base + 0x74, 16); + hws[IMX6SLL_CLK_MMDC_P0_FAST] = imx_clk_hw_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2_flags("mmdc_p1_ipg", "ipg", base + 0x74, 26, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_OCRAM] = imx_clk_hw_gate_flags("ocram", "ahb", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ - clks[IMX6SLL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); - clks[IMX6SLL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); - clks[IMX6SLL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); - clks[IMX6SLL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); + hws[IMX6SLL_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16); + hws[IMX6SLL_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18); + hws[IMX6SLL_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20); + hws[IMX6SLL_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22); /* CCGR5 */ - clks[IMX6SLL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); - clks[IMX6SLL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); - clks[IMX6SLL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10); - clks[IMX6SLL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); - clks[IMX6SLL_CLK_EXTERN_AUDIO] = imx_clk_gate2_shared("extern_audio", "extern_audio_podf", base + 0x7c, 14, &share_count_audio); - clks[IMX6SLL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); - clks[IMX6SLL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); - clks[IMX6SLL_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SLL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SLL_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SLL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SLL_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SLL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SLL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24); - clks[IMX6SLL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24); + hws[IMX6SLL_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); + hws[IMX6SLL_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6); + hws[IMX6SLL_CLK_WDOG2] = imx_clk_hw_gate2("wdog2", "ipg", base + 0x7c, 10); + hws[IMX6SLL_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12); + hws[IMX6SLL_CLK_EXTERN_AUDIO] = imx_clk_hw_gate2_shared("extern_audio", "extern_audio_podf", base + 0x7c, 14, &share_count_audio); + hws[IMX6SLL_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); + hws[IMX6SLL_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); + hws[IMX6SLL_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SLL_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SLL_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SLL_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SLL_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SLL_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SLL_CLK_UART1_IPG] = imx_clk_hw_gate2("uart1_ipg", "ipg", base + 0x7c, 24); + hws[IMX6SLL_CLK_UART1_SERIAL] = imx_clk_hw_gate2("uart1_serial", "uart_podf", base + 0x7c, 24); /* CCGR6 */ - clks[IMX6SLL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); - clks[IMX6SLL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); - clks[IMX6SLL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); - clks[IMX6SLL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); + hws[IMX6SLL_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0); + hws[IMX6SLL_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + hws[IMX6SLL_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + hws[IMX6SLL_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); /* mask handshake of mmdc */ - writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + 0x4); + imx_mmdc_mask_handshake(base, 0); - imx_check_clocks(clks, ARRAY_SIZE(clks)); + imx_check_clk_hws(hws, IMX6SLL_CLK_END); - clk_data.clks = clks; - clk_data.clk_num = ARRAY_SIZE(clks); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); + + for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) { + int index = uart_clk_ids[i]; + + uart_clks[i] = &hws[index]->clk; + } imx_register_uart_clocks(uart_clks); /* Lower the AHB clock rate before changing the clock source. */ - clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000); + clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000); /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ - clk_set_parent(clks[IMX6SLL_CLK_PERIPH_CLK2_SEL], clks[IMX6SLL_CLK_PLL3_USB_OTG]); - clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_CLK2]); - clk_set_parent(clks[IMX6SLL_CLK_PERIPH_PRE], clks[IMX6SLL_CLK_PLL2_BUS]); - clk_set_parent(clks[IMX6SLL_CLK_PERIPH], clks[IMX6SLL_CLK_PERIPH_PRE]); + clk_set_parent(hws[IMX6SLL_CLK_PERIPH_CLK2_SEL]->clk, hws[IMX6SLL_CLK_PLL3_USB_OTG]->clk); + clk_set_parent(hws[IMX6SLL_CLK_PERIPH]->clk, hws[IMX6SLL_CLK_PERIPH_CLK2]->clk); + clk_set_parent(hws[IMX6SLL_CLK_PERIPH_PRE]->clk, hws[IMX6SLL_CLK_PLL2_BUS]->clk); + clk_set_parent(hws[IMX6SLL_CLK_PERIPH]->clk, hws[IMX6SLL_CLK_PERIPH_PRE]->clk); - clk_set_rate(clks[IMX6SLL_CLK_AHB], 132000000); + clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 132000000); } CLK_OF_DECLARE_DRIVER(imx6sll, "fsl,imx6sll-ccm", imx6sll_clocks_init); diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index d243e3483e24..c4685c01929a 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c @@ -6,6 +6,7 @@ #include <dt-bindings/clock/imx6sx-clock.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -16,9 +17,6 @@ #include "clk.h" -#define CCDR 0x4 -#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16) - static const char *step_sels[] = { "osc", "pll2_pfd2_396m", }; static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; @@ -83,8 +81,8 @@ static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", }; static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", }; static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", }; -static struct clk *clks[IMX6SX_CLK_CLK_END]; -static struct clk_onecell_data clk_data; +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; static const struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, @@ -118,76 +116,86 @@ static u32 share_count_ssi3; static u32 share_count_sai1; static u32 share_count_sai2; -static struct clk ** const uart_clks[] __initconst = { - &clks[IMX6SX_CLK_UART_IPG], - &clks[IMX6SX_CLK_UART_SERIAL], - NULL +static const int uart_clk_ids[] __initconst = { + IMX6SX_CLK_UART_IPG, + IMX6SX_CLK_UART_SERIAL, }; +static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata; + static void __init imx6sx_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; + int i; + + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX6SX_CLK_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; + + clk_hw_data->num = IMX6SX_CLK_CLK_END; + hws = clk_hw_data->hws; - clks[IMX6SX_CLK_DUMMY] = imx_clk_fixed("dummy", 0); + hws[IMX6SX_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); - clks[IMX6SX_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); - clks[IMX6SX_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); + hws[IMX6SX_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil")); + hws[IMX6SX_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc")); /* ipp_di clock is external input */ - clks[IMX6SX_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0"); - clks[IMX6SX_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1"); + hws[IMX6SX_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0")); + hws[IMX6SX_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1")); /* Clock source from external clock via CLK1/2 PAD */ - clks[IMX6SX_CLK_ANACLK1] = of_clk_get_by_name(ccm_node, "anaclk1"); - clks[IMX6SX_CLK_ANACLK2] = of_clk_get_by_name(ccm_node, "anaclk2"); + hws[IMX6SX_CLK_ANACLK1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk1")); + hws[IMX6SX_CLK_ANACLK2] = __clk_get_hw(of_clk_get_by_name(ccm_node, "anaclk2")); np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); base = of_iomap(np, 0); WARN_ON(!base); of_node_put(np); - clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6SX_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); /* type name parent_name base div_mask */ - clks[IMX6SX_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); - clks[IMX6SX_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); - clks[IMX6SX_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); - clks[IMX6SX_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); - clks[IMX6SX_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); - clks[IMX6SX_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); - clks[IMX6SX_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); - - clks[IMX6SX_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + hws[IMX6SX_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + hws[IMX6SX_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + hws[IMX6SX_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + hws[IMX6SX_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + hws[IMX6SX_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + hws[IMX6SX_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); + + hws[IMX6SX_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); /* Do not bypass PLLs initially */ - clk_set_parent(clks[IMX6SX_PLL1_BYPASS], clks[IMX6SX_CLK_PLL1]); - clk_set_parent(clks[IMX6SX_PLL2_BYPASS], clks[IMX6SX_CLK_PLL2]); - clk_set_parent(clks[IMX6SX_PLL3_BYPASS], clks[IMX6SX_CLK_PLL3]); - clk_set_parent(clks[IMX6SX_PLL4_BYPASS], clks[IMX6SX_CLK_PLL4]); - clk_set_parent(clks[IMX6SX_PLL5_BYPASS], clks[IMX6SX_CLK_PLL5]); - clk_set_parent(clks[IMX6SX_PLL6_BYPASS], clks[IMX6SX_CLK_PLL6]); - clk_set_parent(clks[IMX6SX_PLL7_BYPASS], clks[IMX6SX_CLK_PLL7]); - - clks[IMX6SX_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); - clks[IMX6SX_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); - clks[IMX6SX_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); - clks[IMX6SX_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); - clks[IMX6SX_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); - clks[IMX6SX_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); - clks[IMX6SX_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + clk_set_parent(hws[IMX6SX_PLL1_BYPASS]->clk, hws[IMX6SX_CLK_PLL1]->clk); + clk_set_parent(hws[IMX6SX_PLL2_BYPASS]->clk, hws[IMX6SX_CLK_PLL2]->clk); + clk_set_parent(hws[IMX6SX_PLL3_BYPASS]->clk, hws[IMX6SX_CLK_PLL3]->clk); + clk_set_parent(hws[IMX6SX_PLL4_BYPASS]->clk, hws[IMX6SX_CLK_PLL4]->clk); + clk_set_parent(hws[IMX6SX_PLL5_BYPASS]->clk, hws[IMX6SX_CLK_PLL5]->clk); + clk_set_parent(hws[IMX6SX_PLL6_BYPASS]->clk, hws[IMX6SX_CLK_PLL6]->clk); + clk_set_parent(hws[IMX6SX_PLL7_BYPASS]->clk, hws[IMX6SX_CLK_PLL7]->clk); + + hws[IMX6SX_CLK_PLL1_SYS] = imx_clk_hw_gate("pll1_sys", "pll1_bypass", base + 0x00, 13); + hws[IMX6SX_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + hws[IMX6SX_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + hws[IMX6SX_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + hws[IMX6SX_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + hws[IMX6SX_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + hws[IMX6SX_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); /* * Bit 20 is the reserved and read-only bit, we do this only for: @@ -195,361 +203,363 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework may need to enable/disable usbphy's parent */ - clks[IMX6SX_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); - clks[IMX6SX_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + hws[IMX6SX_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + hws[IMX6SX_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ - clks[IMX6SX_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); - clks[IMX6SX_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); + hws[IMX6SX_CLK_USBPHY1_GATE] = imx_clk_hw_gate("usbphy1_gate", "dummy", base + 0x10, 6); + hws[IMX6SX_CLK_USBPHY2_GATE] = imx_clk_hw_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* FIXME 100MHz is used for pcie ref for all imx6 pcie, excepted imx6q */ - clks[IMX6SX_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 5); - clks[IMX6SX_CLK_PCIE_REF_125M] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); + hws[IMX6SX_CLK_PCIE_REF] = imx_clk_hw_fixed_factor("pcie_ref", "pll6_enet", 1, 5); + hws[IMX6SX_CLK_PCIE_REF_125M] = imx_clk_hw_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19); - clks[IMX6SX_CLK_LVDS1_OUT] = imx_clk_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12)); - clks[IMX6SX_CLK_LVDS2_OUT] = imx_clk_gate_exclusive("lvds2_out", "lvds2_sel", base + 0x160, 11, BIT(13)); - clks[IMX6SX_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); - clks[IMX6SX_CLK_LVDS2_IN] = imx_clk_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11)); + hws[IMX6SX_CLK_LVDS1_OUT] = imx_clk_hw_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x160, 10, BIT(12)); + hws[IMX6SX_CLK_LVDS2_OUT] = imx_clk_hw_gate_exclusive("lvds2_out", "lvds2_sel", base + 0x160, 11, BIT(13)); + hws[IMX6SX_CLK_LVDS1_IN] = imx_clk_hw_gate_exclusive("lvds1_in", "anaclk1", base + 0x160, 12, BIT(10)); + hws[IMX6SX_CLK_LVDS2_IN] = imx_clk_hw_gate_exclusive("lvds2_in", "anaclk2", base + 0x160, 13, BIT(11)); - clks[IMX6SX_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, + hws[IMX6SX_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); - clks[IMX6SX_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, + hws[IMX6SX_CLK_ENET2_REF] = clk_hw_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock); - clks[IMX6SX_CLK_ENET2_REF_125M] = imx_clk_gate("enet2_ref_125m", "enet2_ref", base + 0xe0, 20); + hws[IMX6SX_CLK_ENET2_REF_125M] = imx_clk_hw_gate("enet2_ref_125m", "enet2_ref", base + 0xe0, 20); - clks[IMX6SX_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); - clks[IMX6SX_CLK_ENET_PTP] = imx_clk_gate("enet_ptp_25m", "enet_ptp_ref", base + 0xe0, 21); + hws[IMX6SX_CLK_ENET_PTP_REF] = imx_clk_hw_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); + hws[IMX6SX_CLK_ENET_PTP] = imx_clk_hw_gate("enet_ptp_25m", "enet_ptp_ref", base + 0xe0, 21); /* name parent_name reg idx */ - clks[IMX6SX_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); - clks[IMX6SX_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); - clks[IMX6SX_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); - clks[IMX6SX_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); - clks[IMX6SX_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); - clks[IMX6SX_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); - clks[IMX6SX_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); - clks[IMX6SX_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); + hws[IMX6SX_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); + hws[IMX6SX_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); + hws[IMX6SX_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); + hws[IMX6SX_CLK_PLL2_PFD3] = imx_clk_hw_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); + hws[IMX6SX_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); + hws[IMX6SX_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); + hws[IMX6SX_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); + hws[IMX6SX_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); /* name parent_name mult div */ - clks[IMX6SX_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); - clks[IMX6SX_CLK_PLL3_120M] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); - clks[IMX6SX_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); - clks[IMX6SX_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); - clks[IMX6SX_CLK_TWD] = imx_clk_fixed_factor("twd", "arm", 1, 2); - clks[IMX6SX_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); - - clks[IMX6SX_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", + hws[IMX6SX_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); + hws[IMX6SX_CLK_PLL3_120M] = imx_clk_hw_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4); + hws[IMX6SX_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + hws[IMX6SX_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + hws[IMX6SX_CLK_TWD] = imx_clk_hw_fixed_factor("twd", "arm", 1, 2); + hws[IMX6SX_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8); + + hws[IMX6SX_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SX_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", + hws[IMX6SX_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT, base + 0x170, 15, 1, 0, &imx_ccm_lock); - clks[IMX6SX_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", + hws[IMX6SX_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6SX_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", + hws[IMX6SX_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); /* name reg shift width parent_names num_parents */ - clks[IMX6SX_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); - clks[IMX6SX_CLK_LVDS2_SEL] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); + hws[IMX6SX_CLK_LVDS1_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); + hws[IMX6SX_CLK_LVDS2_SEL] = imx_clk_hw_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels)); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); /* name reg shift width parent_names num_parents */ - clks[IMX6SX_CLK_STEP] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); - clks[IMX6SX_CLK_PLL1_SW] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); - clks[IMX6SX_CLK_OCRAM_SEL] = imx_clk_mux("ocram_sel", base + 0x14, 6, 2, ocram_sels, ARRAY_SIZE(ocram_sels)); - clks[IMX6SX_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clks[IMX6SX_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); - clks[IMX6SX_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clks[IMX6SX_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); - clks[IMX6SX_CLK_PCIE_AXI_SEL] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); - clks[IMX6SX_CLK_GPU_AXI_SEL] = imx_clk_mux("gpu_axi_sel", base + 0x18, 8, 2, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); - clks[IMX6SX_CLK_GPU_CORE_SEL] = imx_clk_mux("gpu_core_sel", base + 0x18, 4, 2, gpu_core_sels, ARRAY_SIZE(gpu_core_sels)); - clks[IMX6SX_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); - clks[IMX6SX_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SX_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SX_CLK_USDHC3_SEL] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SX_CLK_USDHC4_SEL] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6SX_CLK_SSI3_SEL] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SX_CLK_SSI2_SEL] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SX_CLK_SSI1_SEL] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); - clks[IMX6SX_CLK_QSPI1_SEL] = imx_clk_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); - clks[IMX6SX_CLK_VID_SEL] = imx_clk_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); - clks[IMX6SX_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SX_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); - clks[IMX6SX_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - clks[IMX6SX_CLK_QSPI2_SEL] = imx_clk_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SX_CLK_AUDIO_SEL] = imx_clk_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); - clks[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); - clks[IMX6SX_CLK_ENET_SEL] = imx_clk_mux("enet_sel", base + 0x34, 9, 3, enet_sels, ARRAY_SIZE(enet_sels)); - clks[IMX6SX_CLK_M4_PRE_SEL] = imx_clk_mux("m4_pre_sel", base + 0x34, 6, 3, m4_pre_sels, ARRAY_SIZE(m4_pre_sels)); - clks[IMX6SX_CLK_M4_SEL] = imx_clk_mux("m4_sel", base + 0x34, 0, 3, m4_sels, ARRAY_SIZE(m4_sels)); - clks[IMX6SX_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - clks[IMX6SX_CLK_LCDIF2_PRE_SEL] = imx_clk_mux("lcdif2_pre_sel", base + 0x38, 6, 3, lcdif2_pre_sels, ARRAY_SIZE(lcdif2_pre_sels)); - clks[IMX6SX_CLK_LCDIF2_SEL] = imx_clk_mux("lcdif2_sel", base + 0x38, 0, 3, lcdif2_sels, ARRAY_SIZE(lcdif2_sels)); - clks[IMX6SX_CLK_DISPLAY_SEL] = imx_clk_mux("display_sel", base + 0x3c, 14, 2, display_sels, ARRAY_SIZE(display_sels)); - clks[IMX6SX_CLK_CSI_SEL] = imx_clk_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); - clks[IMX6SX_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); - clks[IMX6SX_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); - clks[IMX6SX_CLK_CKO] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); - - clks[IMX6SX_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_LCDIF1_PRE_SEL] = imx_clk_mux_flags("lcdif1_pre_sel", base + 0x38, 15, 3, lcdif1_pre_sels, ARRAY_SIZE(lcdif1_pre_sels), CLK_SET_RATE_PARENT); - clks[IMX6SX_CLK_LCDIF1_SEL] = imx_clk_mux_flags("lcdif1_sel", base + 0x38, 9, 3, lcdif1_sels, ARRAY_SIZE(lcdif1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_STEP] = imx_clk_hw_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + hws[IMX6SX_CLK_PLL1_SW] = imx_clk_hw_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); + hws[IMX6SX_CLK_OCRAM_SEL] = imx_clk_hw_mux("ocram_sel", base + 0x14, 6, 2, ocram_sels, ARRAY_SIZE(ocram_sels)); + hws[IMX6SX_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + hws[IMX6SX_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); + hws[IMX6SX_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + hws[IMX6SX_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + hws[IMX6SX_CLK_PCIE_AXI_SEL] = imx_clk_hw_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels)); + hws[IMX6SX_CLK_GPU_AXI_SEL] = imx_clk_hw_mux("gpu_axi_sel", base + 0x18, 8, 2, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels)); + hws[IMX6SX_CLK_GPU_CORE_SEL] = imx_clk_hw_mux("gpu_core_sel", base + 0x18, 4, 2, gpu_core_sels, ARRAY_SIZE(gpu_core_sels)); + hws[IMX6SX_CLK_EIM_SLOW_SEL] = imx_clk_hw_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); + hws[IMX6SX_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SX_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SX_CLK_USDHC3_SEL] = imx_clk_hw_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SX_CLK_USDHC4_SEL] = imx_clk_hw_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6SX_CLK_SSI3_SEL] = imx_clk_hw_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SX_CLK_SSI2_SEL] = imx_clk_hw_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SX_CLK_SSI1_SEL] = imx_clk_hw_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels)); + hws[IMX6SX_CLK_QSPI1_SEL] = imx_clk_hw_mux_flags("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); + hws[IMX6SX_CLK_VID_SEL] = imx_clk_hw_mux("vid_sel", base + 0x20, 21, 3, vid_sels, ARRAY_SIZE(vid_sels)); + hws[IMX6SX_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SX_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); + hws[IMX6SX_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + hws[IMX6SX_CLK_QSPI2_SEL] = imx_clk_hw_mux_flags("qspi2_sel", base + 0x2c, 15, 3, qspi2_sels, ARRAY_SIZE(qspi2_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SX_CLK_AUDIO_SEL] = imx_clk_hw_mux("audio_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); + hws[IMX6SX_CLK_ENET_PRE_SEL] = imx_clk_hw_mux("enet_pre_sel", base + 0x34, 15, 3, enet_pre_sels, ARRAY_SIZE(enet_pre_sels)); + hws[IMX6SX_CLK_ENET_SEL] = imx_clk_hw_mux("enet_sel", base + 0x34, 9, 3, enet_sels, ARRAY_SIZE(enet_sels)); + hws[IMX6SX_CLK_M4_PRE_SEL] = imx_clk_hw_mux("m4_pre_sel", base + 0x34, 6, 3, m4_pre_sels, ARRAY_SIZE(m4_pre_sels)); + hws[IMX6SX_CLK_M4_SEL] = imx_clk_hw_mux("m4_sel", base + 0x34, 0, 3, m4_sels, ARRAY_SIZE(m4_sels)); + hws[IMX6SX_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + hws[IMX6SX_CLK_LCDIF2_PRE_SEL] = imx_clk_hw_mux("lcdif2_pre_sel", base + 0x38, 6, 3, lcdif2_pre_sels, ARRAY_SIZE(lcdif2_pre_sels)); + hws[IMX6SX_CLK_LCDIF2_SEL] = imx_clk_hw_mux("lcdif2_sel", base + 0x38, 0, 3, lcdif2_sels, ARRAY_SIZE(lcdif2_sels)); + hws[IMX6SX_CLK_DISPLAY_SEL] = imx_clk_hw_mux("display_sel", base + 0x3c, 14, 2, display_sels, ARRAY_SIZE(display_sels)); + hws[IMX6SX_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels)); + hws[IMX6SX_CLK_CKO1_SEL] = imx_clk_hw_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); + hws[IMX6SX_CLK_CKO2_SEL] = imx_clk_hw_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); + hws[IMX6SX_CLK_CKO] = imx_clk_hw_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); + + hws[IMX6SX_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_LDB_DI1_SEL] = imx_clk_hw_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di1_sels, ARRAY_SIZE(ldb_di1_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_LDB_DI0_SEL] = imx_clk_hw_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_LCDIF1_PRE_SEL] = imx_clk_hw_mux_flags("lcdif1_pre_sel", base + 0x38, 15, 3, lcdif1_pre_sels, ARRAY_SIZE(lcdif1_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6SX_CLK_LCDIF1_SEL] = imx_clk_hw_mux_flags("lcdif1_sel", base + 0x38, 9, 3, lcdif1_sels, ARRAY_SIZE(lcdif1_sels), CLK_SET_RATE_PARENT); /* name parent_name reg shift width */ - clks[IMX6SX_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); - clks[IMX6SX_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); - clks[IMX6SX_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); - clks[IMX6SX_CLK_GPU_CORE_PODF] = imx_clk_divider("gpu_core_podf", "gpu_core_sel", base + 0x18, 29, 3); - clks[IMX6SX_CLK_GPU_AXI_PODF] = imx_clk_divider("gpu_axi_podf", "gpu_axi_sel", base + 0x18, 26, 3); - clks[IMX6SX_CLK_LCDIF1_PODF] = imx_clk_divider("lcdif1_podf", "lcdif1_pred", base + 0x18, 23, 3); - clks[IMX6SX_CLK_QSPI1_PODF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); - clks[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); - clks[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3); - clks[IMX6SX_CLK_PERCLK] = imx_clk_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_VID_PODF] = imx_clk_divider("vid_podf", "vid_sel", base + 0x20, 24, 2); - clks[IMX6SX_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6); - clks[IMX6SX_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); - clks[IMX6SX_CLK_USDHC3_PODF] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); - clks[IMX6SX_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); - clks[IMX6SX_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); - clks[IMX6SX_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); - clks[IMX6SX_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); - clks[IMX6SX_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); - clks[IMX6SX_CLK_SSI3_PRED] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); - clks[IMX6SX_CLK_SSI3_PODF] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); - clks[IMX6SX_CLK_SSI1_PRED] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); - clks[IMX6SX_CLK_SSI1_PODF] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); - clks[IMX6SX_CLK_QSPI2_PRED] = imx_clk_divider("qspi2_pred", "qspi2_sel", base + 0x2c, 18, 3); - clks[IMX6SX_CLK_QSPI2_PODF] = imx_clk_divider("qspi2_podf", "qspi2_pred", base + 0x2c, 21, 6); - clks[IMX6SX_CLK_SSI2_PRED] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); - clks[IMX6SX_CLK_SSI2_PODF] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); - clks[IMX6SX_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); - clks[IMX6SX_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); - clks[IMX6SX_CLK_AUDIO_PRED] = imx_clk_divider("audio_pred", "audio_sel", base + 0x30, 12, 3); - clks[IMX6SX_CLK_AUDIO_PODF] = imx_clk_divider("audio_podf", "audio_pred", base + 0x30, 9, 3); - clks[IMX6SX_CLK_ENET_PODF] = imx_clk_divider("enet_podf", "enet_pre_sel", base + 0x34, 12, 3); - clks[IMX6SX_CLK_M4_PODF] = imx_clk_divider("m4_podf", "m4_sel", base + 0x34, 3, 3); - clks[IMX6SX_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); - clks[IMX6SX_CLK_LCDIF1_PRED] = imx_clk_divider("lcdif1_pred", "lcdif1_pre_sel", base + 0x38, 12, 3); - clks[IMX6SX_CLK_LCDIF2_PRED] = imx_clk_divider("lcdif2_pred", "lcdif2_pre_sel", base + 0x38, 3, 3); - clks[IMX6SX_CLK_DISPLAY_PODF] = imx_clk_divider("display_podf", "display_sel", base + 0x3c, 16, 3); - clks[IMX6SX_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); - clks[IMX6SX_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); - clks[IMX6SX_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); - - clks[IMX6SX_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); - clks[IMX6SX_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); - clks[IMX6SX_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); - clks[IMX6SX_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7); + hws[IMX6SX_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); + hws[IMX6SX_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); + hws[IMX6SX_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2); + hws[IMX6SX_CLK_GPU_CORE_PODF] = imx_clk_hw_divider("gpu_core_podf", "gpu_core_sel", base + 0x18, 29, 3); + hws[IMX6SX_CLK_GPU_AXI_PODF] = imx_clk_hw_divider("gpu_axi_podf", "gpu_axi_sel", base + 0x18, 26, 3); + hws[IMX6SX_CLK_LCDIF1_PODF] = imx_clk_hw_divider("lcdif1_podf", "lcdif1_pred", base + 0x18, 23, 3); + hws[IMX6SX_CLK_QSPI1_PODF] = imx_clk_hw_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); + hws[IMX6SX_CLK_EIM_SLOW_PODF] = imx_clk_hw_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); + hws[IMX6SX_CLK_LCDIF2_PODF] = imx_clk_hw_divider("lcdif2_podf", "lcdif2_pred", base + 0x1c, 20, 3); + hws[IMX6SX_CLK_PERCLK] = imx_clk_hw_divider_flags("perclk", "perclk_sel", base + 0x1c, 0, 6, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_VID_PODF] = imx_clk_hw_divider("vid_podf", "vid_sel", base + 0x20, 24, 2); + hws[IMX6SX_CLK_CAN_PODF] = imx_clk_hw_divider("can_podf", "can_sel", base + 0x20, 2, 6); + hws[IMX6SX_CLK_USDHC4_PODF] = imx_clk_hw_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); + hws[IMX6SX_CLK_USDHC3_PODF] = imx_clk_hw_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3); + hws[IMX6SX_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + hws[IMX6SX_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + hws[IMX6SX_CLK_UART_PODF] = imx_clk_hw_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); + hws[IMX6SX_CLK_ESAI_PRED] = imx_clk_hw_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); + hws[IMX6SX_CLK_ESAI_PODF] = imx_clk_hw_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); + hws[IMX6SX_CLK_SSI3_PRED] = imx_clk_hw_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3); + hws[IMX6SX_CLK_SSI3_PODF] = imx_clk_hw_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6); + hws[IMX6SX_CLK_SSI1_PRED] = imx_clk_hw_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3); + hws[IMX6SX_CLK_SSI1_PODF] = imx_clk_hw_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6); + hws[IMX6SX_CLK_QSPI2_PRED] = imx_clk_hw_divider("qspi2_pred", "qspi2_sel", base + 0x2c, 18, 3); + hws[IMX6SX_CLK_QSPI2_PODF] = imx_clk_hw_divider("qspi2_podf", "qspi2_pred", base + 0x2c, 21, 6); + hws[IMX6SX_CLK_SSI2_PRED] = imx_clk_hw_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3); + hws[IMX6SX_CLK_SSI2_PODF] = imx_clk_hw_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6); + hws[IMX6SX_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); + hws[IMX6SX_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + hws[IMX6SX_CLK_AUDIO_PRED] = imx_clk_hw_divider("audio_pred", "audio_sel", base + 0x30, 12, 3); + hws[IMX6SX_CLK_AUDIO_PODF] = imx_clk_hw_divider("audio_podf", "audio_pred", base + 0x30, 9, 3); + hws[IMX6SX_CLK_ENET_PODF] = imx_clk_hw_divider("enet_podf", "enet_pre_sel", base + 0x34, 12, 3); + hws[IMX6SX_CLK_M4_PODF] = imx_clk_hw_divider("m4_podf", "m4_sel", base + 0x34, 3, 3); + hws[IMX6SX_CLK_ECSPI_PODF] = imx_clk_hw_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); + hws[IMX6SX_CLK_LCDIF1_PRED] = imx_clk_hw_divider("lcdif1_pred", "lcdif1_pre_sel", base + 0x38, 12, 3); + hws[IMX6SX_CLK_LCDIF2_PRED] = imx_clk_hw_divider("lcdif2_pred", "lcdif2_pre_sel", base + 0x38, 3, 3); + hws[IMX6SX_CLK_DISPLAY_PODF] = imx_clk_hw_divider("display_podf", "display_sel", base + 0x3c, 16, 3); + hws[IMX6SX_CLK_CSI_PODF] = imx_clk_hw_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); + hws[IMX6SX_CLK_CKO1_PODF] = imx_clk_hw_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); + hws[IMX6SX_CLK_CKO2_PODF] = imx_clk_hw_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); + + hws[IMX6SX_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + hws[IMX6SX_CLK_LDB_DI0_DIV_7] = imx_clk_hw_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); + hws[IMX6SX_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); + hws[IMX6SX_CLK_LDB_DI1_DIV_7] = imx_clk_hw_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7); /* name reg shift width busy: reg, shift parent_names num_parents */ - clks[IMX6SX_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); - clks[IMX6SX_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + hws[IMX6SX_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + hws[IMX6SX_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); /* name parent_name reg shift width busy: reg, shift */ - clks[IMX6SX_CLK_OCRAM_PODF] = imx_clk_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0); - clks[IMX6SX_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); - clks[IMX6SX_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); - clks[IMX6SX_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + hws[IMX6SX_CLK_OCRAM_PODF] = imx_clk_hw_busy_divider("ocram_podf", "ocram_sel", base + 0x14, 16, 3, base + 0x48, 0); + hws[IMX6SX_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + hws[IMX6SX_CLK_MMDC_PODF] = imx_clk_hw_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6SX_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); /* name parent_name reg shift */ /* CCGR0 */ - clks[IMX6SX_CLK_AIPS_TZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_AIPS_TZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_APBH_DMA] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4); - clks[IMX6SX_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); - clks[IMX6SX_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); - clks[IMX6SX_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); - clks[IMX6SX_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); - clks[IMX6SX_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); - clks[IMX6SX_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); - clks[IMX6SX_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16); - clks[IMX6SX_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); - clks[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20); - clks[IMX6SX_CLK_DCIC1] = imx_clk_gate2("dcic1", "display_podf", base + 0x68, 24); - clks[IMX6SX_CLK_DCIC2] = imx_clk_gate2("dcic2", "display_podf", base + 0x68, 26); - clks[IMX6SX_CLK_AIPS_TZ3] = imx_clk_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_AIPS_TZ1] = imx_clk_hw_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_AIPS_TZ2] = imx_clk_hw_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_APBH_DMA] = imx_clk_hw_gate2("apbh_dma", "usdhc3", base + 0x68, 4); + hws[IMX6SX_CLK_ASRC_MEM] = imx_clk_hw_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6SX_CLK_ASRC_IPG] = imx_clk_hw_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6SX_CLK_CAAM_MEM] = imx_clk_hw_gate2("caam_mem", "ahb", base + 0x68, 8); + hws[IMX6SX_CLK_CAAM_ACLK] = imx_clk_hw_gate2("caam_aclk", "ahb", base + 0x68, 10); + hws[IMX6SX_CLK_CAAM_IPG] = imx_clk_hw_gate2("caam_ipg", "ipg", base + 0x68, 12); + hws[IMX6SX_CLK_CAN1_IPG] = imx_clk_hw_gate2("can1_ipg", "ipg", base + 0x68, 14); + hws[IMX6SX_CLK_CAN1_SERIAL] = imx_clk_hw_gate2("can1_serial", "can_podf", base + 0x68, 16); + hws[IMX6SX_CLK_CAN2_IPG] = imx_clk_hw_gate2("can2_ipg", "ipg", base + 0x68, 18); + hws[IMX6SX_CLK_CAN2_SERIAL] = imx_clk_hw_gate2("can2_serial", "can_podf", base + 0x68, 20); + hws[IMX6SX_CLK_DCIC1] = imx_clk_hw_gate2("dcic1", "display_podf", base + 0x68, 24); + hws[IMX6SX_CLK_DCIC2] = imx_clk_hw_gate2("dcic2", "display_podf", base + 0x68, 26); + hws[IMX6SX_CLK_AIPS_TZ3] = imx_clk_hw_gate2_flags("aips_tz3", "ahb", base + 0x68, 30, CLK_IS_CRITICAL); /* CCGR1 */ - clks[IMX6SX_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); - clks[IMX6SX_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); - clks[IMX6SX_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); - clks[IMX6SX_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); - clks[IMX6SX_CLK_ECSPI5] = imx_clk_gate2("ecspi5", "ecspi_podf", base + 0x6c, 8); - clks[IMX6SX_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); - clks[IMX6SX_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); - clks[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai); - clks[IMX6SX_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai); - clks[IMX6SX_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); - clks[IMX6SX_CLK_WAKEUP] = imx_clk_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_GPT_BUS] = imx_clk_gate2("gpt_bus", "perclk", base + 0x6c, 20); - clks[IMX6SX_CLK_GPT_SERIAL] = imx_clk_gate2("gpt_serial", "perclk", base + 0x6c, 22); - clks[IMX6SX_CLK_GPU] = imx_clk_gate2("gpu", "gpu_core_podf", base + 0x6c, 26); - clks[IMX6SX_CLK_OCRAM_S] = imx_clk_gate2("ocram_s", "ahb", base + 0x6c, 28); - clks[IMX6SX_CLK_CANFD] = imx_clk_gate2("canfd", "can_podf", base + 0x6c, 30); + hws[IMX6SX_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); + hws[IMX6SX_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); + hws[IMX6SX_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); + hws[IMX6SX_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); + hws[IMX6SX_CLK_ECSPI5] = imx_clk_hw_gate2("ecspi5", "ecspi_podf", base + 0x6c, 8); + hws[IMX6SX_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12); + hws[IMX6SX_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14); + hws[IMX6SX_CLK_ESAI_EXTAL] = imx_clk_hw_gate2_shared("esai_extal", "esai_podf", base + 0x6c, 16, &share_count_esai); + hws[IMX6SX_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x6c, 16, &share_count_esai); + hws[IMX6SX_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); + hws[IMX6SX_CLK_WAKEUP] = imx_clk_hw_gate2_flags("wakeup", "ipg", base + 0x6c, 18, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_GPT_BUS] = imx_clk_hw_gate2("gpt_bus", "perclk", base + 0x6c, 20); + hws[IMX6SX_CLK_GPT_SERIAL] = imx_clk_hw_gate2("gpt_serial", "perclk", base + 0x6c, 22); + hws[IMX6SX_CLK_GPU] = imx_clk_hw_gate2("gpu", "gpu_core_podf", base + 0x6c, 26); + hws[IMX6SX_CLK_OCRAM_S] = imx_clk_hw_gate2("ocram_s", "ahb", base + 0x6c, 28); + hws[IMX6SX_CLK_CANFD] = imx_clk_hw_gate2("canfd", "can_podf", base + 0x6c, 30); /* CCGR2 */ - clks[IMX6SX_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2); - clks[IMX6SX_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); - clks[IMX6SX_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); - clks[IMX6SX_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); - clks[IMX6SX_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); - clks[IMX6SX_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14); - clks[IMX6SX_CLK_IPMUX1] = imx_clk_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_IPMUX2] = imx_clk_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_IPMUX3] = imx_clk_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_TZASC1] = imx_clk_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "display_podf", base + 0x70, 28); - clks[IMX6SX_CLK_PXP_AXI] = imx_clk_gate2("pxp_axi", "display_podf", base + 0x70, 30); + hws[IMX6SX_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x70, 2); + hws[IMX6SX_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6); + hws[IMX6SX_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8); + hws[IMX6SX_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10); + hws[IMX6SX_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12); + hws[IMX6SX_CLK_IOMUXC] = imx_clk_hw_gate2("iomuxc", "lcdif1_podf", base + 0x70, 14); + hws[IMX6SX_CLK_IPMUX1] = imx_clk_hw_gate2_flags("ipmux1", "ahb", base + 0x70, 16, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_IPMUX2] = imx_clk_hw_gate2_flags("ipmux2", "ahb", base + 0x70, 18, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_IPMUX3] = imx_clk_hw_gate2_flags("ipmux3", "ahb", base + 0x70, 20, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_TZASC1] = imx_clk_hw_gate2_flags("tzasc1", "mmdc_podf", base + 0x70, 22, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif_apb", "display_podf", base + 0x70, 28); + hws[IMX6SX_CLK_PXP_AXI] = imx_clk_hw_gate2("pxp_axi", "display_podf", base + 0x70, 30); /* CCGR3 */ - clks[IMX6SX_CLK_M4] = imx_clk_gate2("m4", "m4_podf", base + 0x74, 2); - clks[IMX6SX_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4); - clks[IMX6SX_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "enet_sel", base + 0x74, 4); - clks[IMX6SX_CLK_DISPLAY_AXI] = imx_clk_gate2("display_axi", "display_podf", base + 0x74, 6); - clks[IMX6SX_CLK_LCDIF2_PIX] = imx_clk_gate2("lcdif2_pix", "lcdif2_sel", base + 0x74, 8); - clks[IMX6SX_CLK_LCDIF1_PIX] = imx_clk_gate2("lcdif1_pix", "lcdif1_sel", base + 0x74, 10); - clks[IMX6SX_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12); - clks[IMX6SX_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14); - clks[IMX6SX_CLK_MLB] = imx_clk_gate2("mlb", "ahb", base + 0x74, 18); - clks[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); - clks[IMX6SX_CLK_OCRAM] = imx_clk_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_M4] = imx_clk_hw_gate2("m4", "m4_podf", base + 0x74, 2); + hws[IMX6SX_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x74, 4); + hws[IMX6SX_CLK_ENET_AHB] = imx_clk_hw_gate2("enet_ahb", "enet_sel", base + 0x74, 4); + hws[IMX6SX_CLK_DISPLAY_AXI] = imx_clk_hw_gate2("display_axi", "display_podf", base + 0x74, 6); + hws[IMX6SX_CLK_LCDIF2_PIX] = imx_clk_hw_gate2("lcdif2_pix", "lcdif2_sel", base + 0x74, 8); + hws[IMX6SX_CLK_LCDIF1_PIX] = imx_clk_hw_gate2("lcdif1_pix", "lcdif1_sel", base + 0x74, 10); + hws[IMX6SX_CLK_LDB_DI0] = imx_clk_hw_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12); + hws[IMX6SX_CLK_QSPI1] = imx_clk_hw_gate2("qspi1", "qspi1_podf", base + 0x74, 14); + hws[IMX6SX_CLK_MLB] = imx_clk_hw_gate2("mlb", "ahb", base + 0x74, 18); + hws[IMX6SX_CLK_MMDC_P0_FAST] = imx_clk_hw_gate2_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2_flags("mmdc_p1_ipg", "ipg", base + 0x74, 26, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_OCRAM] = imx_clk_hw_gate2_flags("ocram", "ocram_podf", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ - clks[IMX6SX_CLK_PCIE_AXI] = imx_clk_gate2("pcie_axi", "display_podf", base + 0x78, 0); - clks[IMX6SX_CLK_QSPI2] = imx_clk_gate2("qspi2", "qspi2_podf", base + 0x78, 10); - clks[IMX6SX_CLK_PER1_BCH] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12); - clks[IMX6SX_CLK_PER2_MAIN] = imx_clk_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); - clks[IMX6SX_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); - clks[IMX6SX_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); - clks[IMX6SX_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); - clks[IMX6SX_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); - clks[IMX6SX_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); - clks[IMX6SX_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "qspi2_podf", base + 0x78, 28); - clks[IMX6SX_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); + hws[IMX6SX_CLK_PCIE_AXI] = imx_clk_hw_gate2("pcie_axi", "display_podf", base + 0x78, 0); + hws[IMX6SX_CLK_QSPI2] = imx_clk_hw_gate2("qspi2", "qspi2_podf", base + 0x78, 10); + hws[IMX6SX_CLK_PER1_BCH] = imx_clk_hw_gate2("per1_bch", "usdhc3", base + 0x78, 12); + hws[IMX6SX_CLK_PER2_MAIN] = imx_clk_hw_gate2_flags("per2_main", "ahb", base + 0x78, 14, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16); + hws[IMX6SX_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18); + hws[IMX6SX_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20); + hws[IMX6SX_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22); + hws[IMX6SX_CLK_GPMI_BCH_APB] = imx_clk_hw_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24); + hws[IMX6SX_CLK_GPMI_BCH] = imx_clk_hw_gate2("gpmi_bch", "usdhc4", base + 0x78, 26); + hws[IMX6SX_CLK_GPMI_IO] = imx_clk_hw_gate2("gpmi_io", "qspi2_podf", base + 0x78, 28); + hws[IMX6SX_CLK_GPMI_APB] = imx_clk_hw_gate2("gpmi_apb", "usdhc3", base + 0x78, 30); /* CCGR5 */ - clks[IMX6SX_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); - clks[IMX6SX_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); - clks[IMX6SX_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); - clks[IMX6SX_CLK_AUDIO] = imx_clk_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio); - clks[IMX6SX_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); - clks[IMX6SX_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); - clks[IMX6SX_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SX_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SX_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SX_CLK_SSI1] = imx_clk_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); - clks[IMX6SX_CLK_SSI2] = imx_clk_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); - clks[IMX6SX_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); - clks[IMX6SX_CLK_UART_IPG] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24); - clks[IMX6SX_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_podf", base + 0x7c, 26); - clks[IMX6SX_CLK_SAI1_IPG] = imx_clk_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1); - clks[IMX6SX_CLK_SAI2_IPG] = imx_clk_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2); - clks[IMX6SX_CLK_SAI1] = imx_clk_gate2_shared("sai1", "ssi1_podf", base + 0x7c, 28, &share_count_sai1); - clks[IMX6SX_CLK_SAI2] = imx_clk_gate2_shared("sai2", "ssi2_podf", base + 0x7c, 30, &share_count_sai2); + hws[IMX6SX_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); + hws[IMX6SX_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6); + hws[IMX6SX_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12); + hws[IMX6SX_CLK_AUDIO] = imx_clk_hw_gate2_shared("audio", "audio_podf", base + 0x7c, 14, &share_count_audio); + hws[IMX6SX_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); + hws[IMX6SX_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); + hws[IMX6SX_CLK_SSI1_IPG] = imx_clk_hw_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SX_CLK_SSI2_IPG] = imx_clk_hw_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SX_CLK_SSI3_IPG] = imx_clk_hw_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SX_CLK_SSI1] = imx_clk_hw_gate2_shared("ssi1", "ssi1_podf", base + 0x7c, 18, &share_count_ssi1); + hws[IMX6SX_CLK_SSI2] = imx_clk_hw_gate2_shared("ssi2", "ssi2_podf", base + 0x7c, 20, &share_count_ssi2); + hws[IMX6SX_CLK_SSI3] = imx_clk_hw_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3); + hws[IMX6SX_CLK_UART_IPG] = imx_clk_hw_gate2("uart_ipg", "ipg", base + 0x7c, 24); + hws[IMX6SX_CLK_UART_SERIAL] = imx_clk_hw_gate2("uart_serial", "uart_podf", base + 0x7c, 26); + hws[IMX6SX_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1); + hws[IMX6SX_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2); + hws[IMX6SX_CLK_SAI1] = imx_clk_hw_gate2_shared("sai1", "ssi1_podf", base + 0x7c, 28, &share_count_sai1); + hws[IMX6SX_CLK_SAI2] = imx_clk_hw_gate2_shared("sai2", "ssi2_podf", base + 0x7c, 30, &share_count_sai2); /* CCGR6 */ - clks[IMX6SX_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); - clks[IMX6SX_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); - clks[IMX6SX_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); - clks[IMX6SX_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); - clks[IMX6SX_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); - clks[IMX6SX_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); - clks[IMX6SX_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16); - clks[IMX6SX_CLK_VADC] = imx_clk_gate2("vadc", "vid_podf", base + 0x80, 20); - clks[IMX6SX_CLK_GIS] = imx_clk_gate2("gis", "display_podf", base + 0x80, 22); - clks[IMX6SX_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24); - clks[IMX6SX_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26); - clks[IMX6SX_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28); - clks[IMX6SX_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30); - - clks[IMX6SX_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); - clks[IMX6SX_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); + hws[IMX6SX_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0); + hws[IMX6SX_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + hws[IMX6SX_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + hws[IMX6SX_CLK_USDHC3] = imx_clk_hw_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); + hws[IMX6SX_CLK_USDHC4] = imx_clk_hw_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); + hws[IMX6SX_CLK_EIM_SLOW] = imx_clk_hw_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10); + hws[IMX6SX_CLK_PWM8] = imx_clk_hw_gate2("pwm8", "perclk", base + 0x80, 16); + hws[IMX6SX_CLK_VADC] = imx_clk_hw_gate2("vadc", "vid_podf", base + 0x80, 20); + hws[IMX6SX_CLK_GIS] = imx_clk_hw_gate2("gis", "display_podf", base + 0x80, 22); + hws[IMX6SX_CLK_I2C4] = imx_clk_hw_gate2("i2c4", "perclk", base + 0x80, 24); + hws[IMX6SX_CLK_PWM5] = imx_clk_hw_gate2("pwm5", "perclk", base + 0x80, 26); + hws[IMX6SX_CLK_PWM6] = imx_clk_hw_gate2("pwm6", "perclk", base + 0x80, 28); + hws[IMX6SX_CLK_PWM7] = imx_clk_hw_gate2("pwm7", "perclk", base + 0x80, 30); + + hws[IMX6SX_CLK_CKO1] = imx_clk_hw_gate("cko1", "cko1_podf", base + 0x60, 7); + hws[IMX6SX_CLK_CKO2] = imx_clk_hw_gate("cko2", "cko2_podf", base + 0x60, 24); /* mask handshake of mmdc */ - writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); + imx_mmdc_mask_handshake(base, 0); - imx_check_clocks(clks, ARRAY_SIZE(clks)); + imx_check_clk_hws(hws, IMX6SX_CLK_CLK_END); - clk_data.clks = clks; - clk_data.clk_num = ARRAY_SIZE(clks); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { - clk_prepare_enable(clks[IMX6SX_CLK_USBPHY1_GATE]); - clk_prepare_enable(clks[IMX6SX_CLK_USBPHY2_GATE]); + clk_prepare_enable(hws[IMX6SX_CLK_USBPHY1_GATE]->clk); + clk_prepare_enable(hws[IMX6SX_CLK_USBPHY2_GATE]->clk); } /* Set the default 132MHz for EIM module */ - clk_set_parent(clks[IMX6SX_CLK_EIM_SLOW_SEL], clks[IMX6SX_CLK_PLL2_PFD2]); - clk_set_rate(clks[IMX6SX_CLK_EIM_SLOW], 132000000); + clk_set_parent(hws[IMX6SX_CLK_EIM_SLOW_SEL]->clk, hws[IMX6SX_CLK_PLL2_PFD2]->clk); + clk_set_rate(hws[IMX6SX_CLK_EIM_SLOW]->clk, 132000000); /* set parent clock for LCDIF1 pixel clock */ - clk_set_parent(clks[IMX6SX_CLK_LCDIF1_PRE_SEL], clks[IMX6SX_CLK_PLL5_VIDEO_DIV]); - clk_set_parent(clks[IMX6SX_CLK_LCDIF1_SEL], clks[IMX6SX_CLK_LCDIF1_PODF]); + clk_set_parent(hws[IMX6SX_CLK_LCDIF1_PRE_SEL]->clk, hws[IMX6SX_CLK_PLL5_VIDEO_DIV]->clk); + clk_set_parent(hws[IMX6SX_CLK_LCDIF1_SEL]->clk, hws[IMX6SX_CLK_LCDIF1_PODF]->clk); /* Set the parent clks of PCIe lvds1 and pcie_axi to be pcie ref, axi */ - if (clk_set_parent(clks[IMX6SX_CLK_LVDS1_SEL], clks[IMX6SX_CLK_PCIE_REF_125M])) + if (clk_set_parent(hws[IMX6SX_CLK_LVDS1_SEL]->clk, hws[IMX6SX_CLK_PCIE_REF_125M]->clk)) pr_err("Failed to set pcie bus parent clk.\n"); - if (clk_set_parent(clks[IMX6SX_CLK_PCIE_AXI_SEL], clks[IMX6SX_CLK_AXI])) - pr_err("Failed to set pcie parent clk.\n"); /* * Init enet system AHB clock, set to 200MHz * pll2_pfd2_396m-> ENET_PODF-> ENET_AHB */ - clk_set_parent(clks[IMX6SX_CLK_ENET_PRE_SEL], clks[IMX6SX_CLK_PLL2_PFD2]); - clk_set_parent(clks[IMX6SX_CLK_ENET_SEL], clks[IMX6SX_CLK_ENET_PODF]); - clk_set_rate(clks[IMX6SX_CLK_ENET_PODF], 200000000); - clk_set_rate(clks[IMX6SX_CLK_ENET_REF], 125000000); - clk_set_rate(clks[IMX6SX_CLK_ENET2_REF], 125000000); + clk_set_parent(hws[IMX6SX_CLK_ENET_PRE_SEL]->clk, hws[IMX6SX_CLK_PLL2_PFD2]->clk); + clk_set_parent(hws[IMX6SX_CLK_ENET_SEL]->clk, hws[IMX6SX_CLK_ENET_PODF]->clk); + clk_set_rate(hws[IMX6SX_CLK_ENET_PODF]->clk, 200000000); + clk_set_rate(hws[IMX6SX_CLK_ENET_REF]->clk, 125000000); + clk_set_rate(hws[IMX6SX_CLK_ENET2_REF]->clk, 125000000); /* Audio clocks */ - clk_set_rate(clks[IMX6SX_CLK_PLL4_AUDIO_DIV], 393216000); + clk_set_rate(hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk, 393216000); - clk_set_parent(clks[IMX6SX_CLK_SPDIF_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); - clk_set_rate(clks[IMX6SX_CLK_SPDIF_PODF], 98304000); + clk_set_parent(hws[IMX6SX_CLK_SPDIF_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk); + clk_set_rate(hws[IMX6SX_CLK_SPDIF_PODF]->clk, 98304000); - clk_set_parent(clks[IMX6SX_CLK_AUDIO_SEL], clks[IMX6SX_CLK_PLL3_USB_OTG]); - clk_set_rate(clks[IMX6SX_CLK_AUDIO_PODF], 24000000); + clk_set_parent(hws[IMX6SX_CLK_AUDIO_SEL]->clk, hws[IMX6SX_CLK_PLL3_USB_OTG]->clk); + clk_set_rate(hws[IMX6SX_CLK_AUDIO_PODF]->clk, 24000000); - clk_set_parent(clks[IMX6SX_CLK_SSI1_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); - clk_set_parent(clks[IMX6SX_CLK_SSI2_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); - clk_set_parent(clks[IMX6SX_CLK_SSI3_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); - clk_set_rate(clks[IMX6SX_CLK_SSI1_PODF], 24576000); - clk_set_rate(clks[IMX6SX_CLK_SSI2_PODF], 24576000); - clk_set_rate(clks[IMX6SX_CLK_SSI3_PODF], 24576000); + clk_set_parent(hws[IMX6SX_CLK_SSI1_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk); + clk_set_parent(hws[IMX6SX_CLK_SSI2_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk); + clk_set_parent(hws[IMX6SX_CLK_SSI3_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk); + clk_set_rate(hws[IMX6SX_CLK_SSI1_PODF]->clk, 24576000); + clk_set_rate(hws[IMX6SX_CLK_SSI2_PODF]->clk, 24576000); + clk_set_rate(hws[IMX6SX_CLK_SSI3_PODF]->clk, 24576000); - clk_set_parent(clks[IMX6SX_CLK_ESAI_SEL], clks[IMX6SX_CLK_PLL4_AUDIO_DIV]); - clk_set_rate(clks[IMX6SX_CLK_ESAI_PODF], 24576000); + clk_set_parent(hws[IMX6SX_CLK_ESAI_SEL]->clk, hws[IMX6SX_CLK_PLL4_AUDIO_DIV]->clk); + clk_set_rate(hws[IMX6SX_CLK_ESAI_PODF]->clk, 24576000); /* Set parent clock for vadc */ - clk_set_parent(clks[IMX6SX_CLK_VID_SEL], clks[IMX6SX_CLK_PLL3_USB_OTG]); + clk_set_parent(hws[IMX6SX_CLK_VID_SEL]->clk, hws[IMX6SX_CLK_PLL3_USB_OTG]->clk); /* default parent of can_sel clock is invalid, manually set it here */ - clk_set_parent(clks[IMX6SX_CLK_CAN_SEL], clks[IMX6SX_CLK_PLL3_60M]); + clk_set_parent(hws[IMX6SX_CLK_CAN_SEL]->clk, hws[IMX6SX_CLK_PLL3_60M]->clk); /* Update gpu clock from default 528M to 720M */ - clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); - clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); + clk_set_parent(hws[IMX6SX_CLK_GPU_CORE_SEL]->clk, hws[IMX6SX_CLK_PLL3_PFD0]->clk); + clk_set_parent(hws[IMX6SX_CLK_GPU_AXI_SEL]->clk, hws[IMX6SX_CLK_PLL3_PFD0]->clk); - clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]); - clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]); + clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk); + clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk); + + for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) { + int index = uart_clk_ids[i]; + + uart_clks[i] = &hws[index]->clk; + } imx_register_uart_clocks(uart_clks); } diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c index 8fd52e103cc2..bc931988fe7b 100644 --- a/drivers/clk/imx/clk-imx6ul.c +++ b/drivers/clk/imx/clk-imx6ul.c @@ -6,6 +6,7 @@ #include <dt-bindings/clock/imx6ul-clock.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -16,9 +17,6 @@ #include "clk.h" -#define BM_CCM_CCDR_MMDC_CH0_MASK (0x2 << 16) -#define CCDR 0x4 - static const char *pll_bypass_src_sels[] = { "osc", "dummy", }; static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", }; static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", }; @@ -70,8 +68,8 @@ static const char *cko2_sels[] = { "dummy", "dummy", "dummy", "usdhc1", "dummy", "dummy", "dummy", "dummy", "dummy", "uart_serial", "spdif", "dummy", "dummy", }; static const char *cko_sels[] = { "cko1", "cko2", }; -static struct clk *clks[IMX6UL_CLK_END]; -static struct clk_onecell_data clk_data; +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; static const struct clk_div_table clk_enet_ref_table[] = { { .val = 0, .div = 20, }, @@ -118,61 +116,69 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) struct device_node *np; void __iomem *base; - clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0); + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX6UL_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; + + clk_hw_data->num = IMX6UL_CLK_END; + hws = clk_hw_data->hws; + + hws[IMX6UL_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); - clks[IMX6UL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); - clks[IMX6UL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc"); + hws[IMX6UL_CLK_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil")); + hws[IMX6UL_CLK_OSC] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc")); /* ipp_di clock is external input */ - clks[IMX6UL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0"); - clks[IMX6UL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1"); + hws[IMX6UL_CLK_IPP_DI0] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di0")); + hws[IMX6UL_CLK_IPP_DI1] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ipp_di1")); np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop"); base = of_iomap(np, 0); of_node_put(np); WARN_ON(!base); - clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - - clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); - clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); - clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); - clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); - clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); - clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); - clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); - - clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_CLK_CSI_SEL] = imx_clk_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL2_BYPASS_SRC] = imx_clk_hw_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL3_BYPASS_SRC] = imx_clk_hw_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL4_BYPASS_SRC] = imx_clk_hw_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL5_BYPASS_SRC] = imx_clk_hw_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_hw_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + hws[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_hw_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); + + hws[IMX6UL_CLK_PLL1] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll1", "osc", base + 0x00, 0x7f); + hws[IMX6UL_CLK_PLL2] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll2", "osc", base + 0x30, 0x1); + hws[IMX6UL_CLK_PLL3] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll3", "osc", base + 0x10, 0x3); + hws[IMX6UL_CLK_PLL4] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll4", "osc", base + 0x70, 0x7f); + hws[IMX6UL_CLK_PLL5] = imx_clk_hw_pllv3(IMX_PLLV3_AV, "pll5", "osc", base + 0xa0, 0x7f); + hws[IMX6UL_CLK_PLL6] = imx_clk_hw_pllv3(IMX_PLLV3_ENET, "pll6", "osc", base + 0xe0, 0x3); + hws[IMX6UL_CLK_PLL7] = imx_clk_hw_pllv3(IMX_PLLV3_USB, "pll7", "osc", base + 0x20, 0x3); + + hws[IMX6UL_PLL1_BYPASS] = imx_clk_hw_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL2_BYPASS] = imx_clk_hw_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL3_BYPASS] = imx_clk_hw_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL4_BYPASS] = imx_clk_hw_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT); /* Do not bypass PLLs initially */ - clk_set_parent(clks[IMX6UL_PLL1_BYPASS], clks[IMX6UL_CLK_PLL1]); - clk_set_parent(clks[IMX6UL_PLL2_BYPASS], clks[IMX6UL_CLK_PLL2]); - clk_set_parent(clks[IMX6UL_PLL3_BYPASS], clks[IMX6UL_CLK_PLL3]); - clk_set_parent(clks[IMX6UL_PLL4_BYPASS], clks[IMX6UL_CLK_PLL4]); - clk_set_parent(clks[IMX6UL_PLL5_BYPASS], clks[IMX6UL_CLK_PLL5]); - clk_set_parent(clks[IMX6UL_PLL6_BYPASS], clks[IMX6UL_CLK_PLL6]); - clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]); - - clks[IMX6UL_CLK_PLL1_SYS] = imx_clk_fixed_factor("pll1_sys", "pll1_bypass", 1, 1); - clks[IMX6UL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); - clks[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); - clks[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); - clks[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); - clks[IMX6UL_CLK_PLL6_ENET] = imx_clk_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); - clks[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); + clk_set_parent(hws[IMX6UL_PLL1_BYPASS]->clk, hws[IMX6UL_CLK_PLL1]->clk); + clk_set_parent(hws[IMX6UL_PLL2_BYPASS]->clk, hws[IMX6UL_CLK_PLL2]->clk); + clk_set_parent(hws[IMX6UL_PLL3_BYPASS]->clk, hws[IMX6UL_CLK_PLL3]->clk); + clk_set_parent(hws[IMX6UL_PLL4_BYPASS]->clk, hws[IMX6UL_CLK_PLL4]->clk); + clk_set_parent(hws[IMX6UL_PLL5_BYPASS]->clk, hws[IMX6UL_CLK_PLL5]->clk); + clk_set_parent(hws[IMX6UL_PLL6_BYPASS]->clk, hws[IMX6UL_CLK_PLL6]->clk); + clk_set_parent(hws[IMX6UL_PLL7_BYPASS]->clk, hws[IMX6UL_CLK_PLL7]->clk); + + hws[IMX6UL_CLK_PLL1_SYS] = imx_clk_hw_fixed_factor("pll1_sys", "pll1_bypass", 1, 1); + hws[IMX6UL_CLK_PLL2_BUS] = imx_clk_hw_gate("pll2_bus", "pll2_bypass", base + 0x30, 13); + hws[IMX6UL_CLK_PLL3_USB_OTG] = imx_clk_hw_gate("pll3_usb_otg", "pll3_bypass", base + 0x10, 13); + hws[IMX6UL_CLK_PLL4_AUDIO] = imx_clk_hw_gate("pll4_audio", "pll4_bypass", base + 0x70, 13); + hws[IMX6UL_CLK_PLL5_VIDEO] = imx_clk_hw_gate("pll5_video", "pll5_bypass", base + 0xa0, 13); + hws[IMX6UL_CLK_PLL6_ENET] = imx_clk_hw_gate("pll6_enet", "pll6_bypass", base + 0xe0, 13); + hws[IMX6UL_CLK_PLL7_USB_HOST] = imx_clk_hw_gate("pll7_usb_host", "pll7_bypass", base + 0x20, 13); /* * Bit 20 is the reserved and read-only bit, we do this only for: @@ -180,291 +186,289 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) * - Keep refcount when do usbphy clk_enable/disable, in that case, * the clk framework many need to enable/disable usbphy's parent */ - clks[IMX6UL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); - clks[IMX6UL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); + hws[IMX6UL_CLK_USBPHY1] = imx_clk_hw_gate("usbphy1", "pll3_usb_otg", base + 0x10, 20); + hws[IMX6UL_CLK_USBPHY2] = imx_clk_hw_gate("usbphy2", "pll7_usb_host", base + 0x20, 20); /* * usbphy*_gate needs to be on after system boots up, and software * never needs to control it anymore. */ - clks[IMX6UL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6); - clks[IMX6UL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6); + hws[IMX6UL_CLK_USBPHY1_GATE] = imx_clk_hw_gate("usbphy1_gate", "dummy", base + 0x10, 6); + hws[IMX6UL_CLK_USBPHY2_GATE] = imx_clk_hw_gate("usbphy2_gate", "dummy", base + 0x20, 6); /* name parent_name reg idx */ - clks[IMX6UL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); - clks[IMX6UL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); - clks[IMX6UL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); - clks[IMX6UL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); - clks[IMX6UL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); - clks[IMX6UL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); - clks[IMX6UL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); - clks[IMX6UL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); - - clks[IMX6UL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, + hws[IMX6UL_CLK_PLL2_PFD0] = imx_clk_hw_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0); + hws[IMX6UL_CLK_PLL2_PFD1] = imx_clk_hw_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1); + hws[IMX6UL_CLK_PLL2_PFD2] = imx_clk_hw_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2); + hws[IMX6UL_CLK_PLL2_PFD3] = imx_clk_hw_pfd("pll2_pfd3_594m", "pll2_bus", base + 0x100, 3); + hws[IMX6UL_CLK_PLL3_PFD0] = imx_clk_hw_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0); + hws[IMX6UL_CLK_PLL3_PFD1] = imx_clk_hw_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1); + hws[IMX6UL_CLK_PLL3_PFD2] = imx_clk_hw_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2); + hws[IMX6UL_CLK_PLL3_PFD3] = imx_clk_hw_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3); + + hws[IMX6UL_CLK_ENET_REF] = clk_hw_register_divider_table(NULL, "enet_ref", "pll6_enet", 0, base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock); - clks[IMX6UL_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, + hws[IMX6UL_CLK_ENET2_REF] = clk_hw_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0, base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock); - clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20); - clks[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); - clks[IMX6UL_CLK_ENET_PTP] = imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21); + hws[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_hw_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20); + hws[IMX6UL_CLK_ENET_PTP_REF] = imx_clk_hw_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20); + hws[IMX6UL_CLK_ENET_PTP] = imx_clk_hw_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21); - clks[IMX6UL_CLK_PLL4_POST_DIV] = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio", + hws[IMX6UL_CLK_PLL4_POST_DIV] = clk_hw_register_divider_table(NULL, "pll4_post_div", "pll4_audio", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6UL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div", + hws[IMX6UL_CLK_PLL4_AUDIO_DIV] = clk_hw_register_divider(NULL, "pll4_audio_div", "pll4_post_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock); - clks[IMX6UL_CLK_PLL5_POST_DIV] = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video", + hws[IMX6UL_CLK_PLL5_POST_DIV] = clk_hw_register_divider_table(NULL, "pll5_post_div", "pll5_video", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX6UL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", + hws[IMX6UL_CLK_PLL5_VIDEO_DIV] = clk_hw_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock); /* name parent_name mult div */ - clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); - clks[IMX6UL_CLK_PLL3_80M] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); - clks[IMX6UL_CLK_PLL3_60M] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); - clks[IMX6UL_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); + hws[IMX6UL_CLK_PLL2_198M] = imx_clk_hw_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2); + hws[IMX6UL_CLK_PLL3_80M] = imx_clk_hw_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6); + hws[IMX6UL_CLK_PLL3_60M] = imx_clk_hw_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8); + hws[IMX6UL_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); - clks[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels)); - clks[IMX6UL_CLK_STEP] = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels)); - clks[IMX6UL_CLK_PLL1_SW] = imx_clk_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0); - clks[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels)); - clks[IMX6UL_CLK_AXI_SEL] = imx_clk_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0); - clks[IMX6UL_CLK_PERIPH_PRE] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); - clks[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); - clks[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); - clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); - clks[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); - clks[IMX6UL_CLK_GPMI_SEL] = imx_clk_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels)); - clks[IMX6UL_CLK_BCH_SEL] = imx_clk_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels)); - clks[IMX6UL_CLK_USDHC2_SEL] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6UL_CLK_USDHC1_SEL] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); - clks[IMX6UL_CLK_SAI3_SEL] = imx_clk_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels)); - clks[IMX6UL_CLK_SAI2_SEL] = imx_clk_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels)); - clks[IMX6UL_CLK_SAI1_SEL] = imx_clk_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels)); - clks[IMX6UL_CLK_QSPI1_SEL] = imx_clk_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); - clks[IMX6UL_CLK_PERCLK_SEL] = imx_clk_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); - clks[IMX6UL_CLK_CAN_SEL] = imx_clk_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); + hws[IMX6UL_CA7_SECONDARY_SEL] = imx_clk_hw_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels)); + hws[IMX6UL_CLK_STEP] = imx_clk_hw_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels)); + hws[IMX6UL_CLK_PLL1_SW] = imx_clk_hw_mux_flags("pll1_sw", base + 0x0c, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0); + hws[IMX6UL_CLK_AXI_ALT_SEL] = imx_clk_hw_mux("axi_alt_sel", base + 0x14, 7, 1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels)); + hws[IMX6UL_CLK_AXI_SEL] = imx_clk_hw_mux_flags("axi_sel", base + 0x14, 6, 1, axi_sels, ARRAY_SIZE(axi_sels), 0); + hws[IMX6UL_CLK_PERIPH_PRE] = imx_clk_hw_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); + hws[IMX6UL_CLK_PERIPH2_PRE] = imx_clk_hw_mux("periph2_pre", base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels)); + hws[IMX6UL_CLK_PERIPH_CLK2_SEL] = imx_clk_hw_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); + hws[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_hw_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels)); + hws[IMX6UL_CLK_EIM_SLOW_SEL] = imx_clk_hw_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels)); + hws[IMX6UL_CLK_GPMI_SEL] = imx_clk_hw_mux("gpmi_sel", base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels)); + hws[IMX6UL_CLK_BCH_SEL] = imx_clk_hw_mux("bch_sel", base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels)); + hws[IMX6UL_CLK_USDHC2_SEL] = imx_clk_hw_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6UL_CLK_USDHC1_SEL] = imx_clk_hw_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); + hws[IMX6UL_CLK_SAI3_SEL] = imx_clk_hw_mux("sai3_sel", base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels)); + hws[IMX6UL_CLK_SAI2_SEL] = imx_clk_hw_mux("sai2_sel", base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels)); + hws[IMX6UL_CLK_SAI1_SEL] = imx_clk_hw_mux("sai1_sel", base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels)); + hws[IMX6UL_CLK_QSPI1_SEL] = imx_clk_hw_mux("qspi1_sel", base + 0x1c, 7, 3, qspi1_sels, ARRAY_SIZE(qspi1_sels)); + hws[IMX6UL_CLK_PERCLK_SEL] = imx_clk_hw_mux("perclk_sel", base + 0x1c, 6, 1, perclk_sels, ARRAY_SIZE(perclk_sels)); + hws[IMX6UL_CLK_CAN_SEL] = imx_clk_hw_mux("can_sel", base + 0x20, 8, 2, can_sels, ARRAY_SIZE(can_sels)); if (clk_on_imx6ull()) - clks[IMX6ULL_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, esai_sels, ARRAY_SIZE(esai_sels)); - clks[IMX6UL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); - clks[IMX6UL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels)); - clks[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels)); - clks[IMX6UL_CLK_SPDIF_SEL] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); + hws[IMX6ULL_CLK_ESAI_SEL] = imx_clk_hw_mux("esai_sel", base + 0x20, 19, 2, esai_sels, ARRAY_SIZE(esai_sels)); + hws[IMX6UL_CLK_UART_SEL] = imx_clk_hw_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); + hws[IMX6UL_CLK_ENFC_SEL] = imx_clk_hw_mux("enfc_sel", base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels)); + hws[IMX6UL_CLK_LDB_DI0_SEL] = imx_clk_hw_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels)); + hws[IMX6UL_CLK_SPDIF_SEL] = imx_clk_hw_mux("spdif_sel", base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels)); if (clk_on_imx6ul()) { - clks[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels)); - clks[IMX6UL_CLK_SIM_SEL] = imx_clk_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels)); + hws[IMX6UL_CLK_SIM_PRE_SEL] = imx_clk_hw_mux("sim_pre_sel", base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels)); + hws[IMX6UL_CLK_SIM_SEL] = imx_clk_hw_mux("sim_sel", base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels)); } else if (clk_on_imx6ull()) { - clks[IMX6ULL_CLK_EPDC_PRE_SEL] = imx_clk_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels)); - clks[IMX6ULL_CLK_EPDC_SEL] = imx_clk_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels)); + hws[IMX6ULL_CLK_EPDC_PRE_SEL] = imx_clk_hw_mux("epdc_pre_sel", base + 0x34, 15, 3, epdc_pre_sels, ARRAY_SIZE(epdc_pre_sels)); + hws[IMX6ULL_CLK_EPDC_SEL] = imx_clk_hw_mux("epdc_sel", base + 0x34, 9, 3, epdc_sels, ARRAY_SIZE(epdc_sels)); } - clks[IMX6UL_CLK_ECSPI_SEL] = imx_clk_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); - clks[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT); - clks[IMX6UL_CLK_LCDIF_SEL] = imx_clk_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); - - clks[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); - clks[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); - - clks[IMX6UL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); - clks[IMX6UL_CLK_CKO2_SEL] = imx_clk_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); - clks[IMX6UL_CLK_CKO] = imx_clk_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); - - clks[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); - clks[IMX6UL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); - clks[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7); - clks[IMX6UL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "qspi1_sel", 1, 7); - - clks[IMX6UL_CLK_PERIPH] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); - clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); - - clks[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); - clks[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); - clks[IMX6UL_CLK_IPG] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2); - clks[IMX6UL_CLK_LCDIF_PODF] = imx_clk_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3); - clks[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); - clks[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); - clks[IMX6UL_CLK_PERCLK] = imx_clk_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); - clks[IMX6UL_CLK_CAN_PODF] = imx_clk_divider("can_podf", "can_sel", base + 0x20, 2, 6); - clks[IMX6UL_CLK_GPMI_PODF] = imx_clk_divider("gpmi_podf", "gpmi_sel", base + 0x24, 22, 3); - clks[IMX6UL_CLK_BCH_PODF] = imx_clk_divider("bch_podf", "bch_sel", base + 0x24, 19, 3); - clks[IMX6UL_CLK_USDHC2_PODF] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); - clks[IMX6UL_CLK_USDHC1_PODF] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); - clks[IMX6UL_CLK_UART_PODF] = imx_clk_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); - clks[IMX6UL_CLK_SAI3_PRED] = imx_clk_divider("sai3_pred", "sai3_sel", base + 0x28, 22, 3); - clks[IMX6UL_CLK_SAI3_PODF] = imx_clk_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6); - clks[IMX6UL_CLK_SAI1_PRED] = imx_clk_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3); - clks[IMX6UL_CLK_SAI1_PODF] = imx_clk_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6); + hws[IMX6UL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels)); + hws[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT); + hws[IMX6UL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels)); + + hws[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels)); + hws[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels)); + + hws[IMX6UL_CLK_CKO1_SEL] = imx_clk_hw_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); + hws[IMX6UL_CLK_CKO2_SEL] = imx_clk_hw_mux("cko2_sel", base + 0x60, 16, 5, cko2_sels, ARRAY_SIZE(cko2_sels)); + hws[IMX6UL_CLK_CKO] = imx_clk_hw_mux("cko", base + 0x60, 8, 1, cko_sels, ARRAY_SIZE(cko_sels)); + + hws[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + hws[IMX6UL_CLK_LDB_DI0_DIV_7] = imx_clk_hw_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7); + hws[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_hw_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7); + hws[IMX6UL_CLK_LDB_DI1_DIV_7] = imx_clk_hw_fixed_factor("ldb_di1_div_7", "qspi1_sel", 1, 7); + + hws[IMX6UL_CLK_PERIPH] = imx_clk_hw_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels)); + hws[IMX6UL_CLK_PERIPH2] = imx_clk_hw_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels)); + + hws[IMX6UL_CLK_PERIPH_CLK2] = imx_clk_hw_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3); + hws[IMX6UL_CLK_PERIPH2_CLK2] = imx_clk_hw_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3); + hws[IMX6UL_CLK_IPG] = imx_clk_hw_divider("ipg", "ahb", base + 0x14, 8, 2); + hws[IMX6UL_CLK_LCDIF_PODF] = imx_clk_hw_divider("lcdif_podf", "lcdif_pred", base + 0x18, 23, 3); + hws[IMX6UL_CLK_QSPI1_PDOF] = imx_clk_hw_divider("qspi1_podf", "qspi1_sel", base + 0x1c, 26, 3); + hws[IMX6UL_CLK_EIM_SLOW_PODF] = imx_clk_hw_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3); + hws[IMX6UL_CLK_PERCLK] = imx_clk_hw_divider("perclk", "perclk_sel", base + 0x1c, 0, 6); + hws[IMX6UL_CLK_CAN_PODF] = imx_clk_hw_divider("can_podf", "can_sel", base + 0x20, 2, 6); + hws[IMX6UL_CLK_GPMI_PODF] = imx_clk_hw_divider("gpmi_podf", "gpmi_sel", base + 0x24, 22, 3); + hws[IMX6UL_CLK_BCH_PODF] = imx_clk_hw_divider("bch_podf", "bch_sel", base + 0x24, 19, 3); + hws[IMX6UL_CLK_USDHC2_PODF] = imx_clk_hw_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3); + hws[IMX6UL_CLK_USDHC1_PODF] = imx_clk_hw_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3); + hws[IMX6UL_CLK_UART_PODF] = imx_clk_hw_divider("uart_podf", "uart_sel", base + 0x24, 0, 6); + hws[IMX6UL_CLK_SAI3_PRED] = imx_clk_hw_divider("sai3_pred", "sai3_sel", base + 0x28, 22, 3); + hws[IMX6UL_CLK_SAI3_PODF] = imx_clk_hw_divider("sai3_podf", "sai3_pred", base + 0x28, 16, 6); + hws[IMX6UL_CLK_SAI1_PRED] = imx_clk_hw_divider("sai1_pred", "sai1_sel", base + 0x28, 6, 3); + hws[IMX6UL_CLK_SAI1_PODF] = imx_clk_hw_divider("sai1_podf", "sai1_pred", base + 0x28, 0, 6); if (clk_on_imx6ull()) { - clks[IMX6ULL_CLK_ESAI_PRED] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); - clks[IMX6ULL_CLK_ESAI_PODF] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); + hws[IMX6ULL_CLK_ESAI_PRED] = imx_clk_hw_divider("esai_pred", "esai_sel", base + 0x28, 9, 3); + hws[IMX6ULL_CLK_ESAI_PODF] = imx_clk_hw_divider("esai_podf", "esai_pred", base + 0x28, 25, 3); } - clks[IMX6UL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); - clks[IMX6UL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); - clks[IMX6UL_CLK_SAI2_PRED] = imx_clk_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3); - clks[IMX6UL_CLK_SAI2_PODF] = imx_clk_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6); - clks[IMX6UL_CLK_SPDIF_PRED] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); - clks[IMX6UL_CLK_SPDIF_PODF] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); + hws[IMX6UL_CLK_ENFC_PRED] = imx_clk_hw_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); + hws[IMX6UL_CLK_ENFC_PODF] = imx_clk_hw_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); + hws[IMX6UL_CLK_SAI2_PRED] = imx_clk_hw_divider("sai2_pred", "sai2_sel", base + 0x2c, 6, 3); + hws[IMX6UL_CLK_SAI2_PODF] = imx_clk_hw_divider("sai2_podf", "sai2_pred", base + 0x2c, 0, 6); + hws[IMX6UL_CLK_SPDIF_PRED] = imx_clk_hw_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3); + hws[IMX6UL_CLK_SPDIF_PODF] = imx_clk_hw_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3); if (clk_on_imx6ul()) - clks[IMX6UL_CLK_SIM_PODF] = imx_clk_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3); + hws[IMX6UL_CLK_SIM_PODF] = imx_clk_hw_divider("sim_podf", "sim_pre_sel", base + 0x34, 12, 3); else if (clk_on_imx6ull()) - clks[IMX6ULL_CLK_EPDC_PODF] = imx_clk_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3); - clks[IMX6UL_CLK_ECSPI_PODF] = imx_clk_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); - clks[IMX6UL_CLK_LCDIF_PRED] = imx_clk_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3); - clks[IMX6UL_CLK_CSI_PODF] = imx_clk_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); + hws[IMX6ULL_CLK_EPDC_PODF] = imx_clk_hw_divider("epdc_podf", "epdc_pre_sel", base + 0x34, 12, 3); + hws[IMX6UL_CLK_ECSPI_PODF] = imx_clk_hw_divider("ecspi_podf", "ecspi_sel", base + 0x38, 19, 6); + hws[IMX6UL_CLK_LCDIF_PRED] = imx_clk_hw_divider("lcdif_pred", "lcdif_pre_sel", base + 0x38, 12, 3); + hws[IMX6UL_CLK_CSI_PODF] = imx_clk_hw_divider("csi_podf", "csi_sel", base + 0x3c, 11, 3); - clks[IMX6UL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); - clks[IMX6UL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); + hws[IMX6UL_CLK_CKO1_PODF] = imx_clk_hw_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); + hws[IMX6UL_CLK_CKO2_PODF] = imx_clk_hw_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); - clks[IMX6UL_CLK_ARM] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); - clks[IMX6UL_CLK_MMDC_PODF] = imx_clk_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); - clks[IMX6UL_CLK_AXI_PODF] = imx_clk_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); - clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); + hws[IMX6UL_CLK_ARM] = imx_clk_hw_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16); + hws[IMX6UL_CLK_MMDC_PODF] = imx_clk_hw_busy_divider("mmdc_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2); + hws[IMX6UL_CLK_AXI_PODF] = imx_clk_hw_busy_divider("axi_podf", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0); + hws[IMX6UL_CLK_AHB] = imx_clk_hw_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1); /* CCGR0 */ - clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); - clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); - clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4); - clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); - clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6UL_CLK_AIPSTZ1] = imx_clk_hw_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_AIPSTZ2] = imx_clk_hw_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_APBHDMA] = imx_clk_hw_gate2("apbh_dma", "bch_podf", base + 0x68, 4); + hws[IMX6UL_CLK_ASRC_IPG] = imx_clk_hw_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc); + hws[IMX6UL_CLK_ASRC_MEM] = imx_clk_hw_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc); if (clk_on_imx6ul()) { - clks[IMX6UL_CLK_CAAM_MEM] = imx_clk_gate2("caam_mem", "ahb", base + 0x68, 8); - clks[IMX6UL_CLK_CAAM_ACLK] = imx_clk_gate2("caam_aclk", "ahb", base + 0x68, 10); - clks[IMX6UL_CLK_CAAM_IPG] = imx_clk_gate2("caam_ipg", "ipg", base + 0x68, 12); + hws[IMX6UL_CLK_CAAM_MEM] = imx_clk_hw_gate2("caam_mem", "ahb", base + 0x68, 8); + hws[IMX6UL_CLK_CAAM_ACLK] = imx_clk_hw_gate2("caam_aclk", "ahb", base + 0x68, 10); + hws[IMX6UL_CLK_CAAM_IPG] = imx_clk_hw_gate2("caam_ipg", "ipg", base + 0x68, 12); } else if (clk_on_imx6ull()) { - clks[IMX6ULL_CLK_DCP_CLK] = imx_clk_gate2("dcp", "ahb", base + 0x68, 10); - clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x68, 12); - clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x68, 12); + hws[IMX6ULL_CLK_DCP_CLK] = imx_clk_hw_gate2("dcp", "ahb", base + 0x68, 10); + hws[IMX6UL_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x68, 12); + hws[IMX6UL_CLK_ENET_AHB] = imx_clk_hw_gate2("enet_ahb", "ahb", base + 0x68, 12); } - clks[IMX6UL_CLK_CAN1_IPG] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14); - clks[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_podf", base + 0x68, 16); - clks[IMX6UL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18); - clks[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_podf", base + 0x68, 20); - clks[IMX6UL_CLK_GPT2_BUS] = imx_clk_gate2("gpt2_bus", "perclk", base + 0x68, 24); - clks[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_gate2("gpt2_serial", "perclk", base + 0x68, 26); - clks[IMX6UL_CLK_UART2_IPG] = imx_clk_gate2("uart2_ipg", "ipg", base + 0x68, 28); - clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28); + hws[IMX6UL_CLK_CAN1_IPG] = imx_clk_hw_gate2("can1_ipg", "ipg", base + 0x68, 14); + hws[IMX6UL_CLK_CAN1_SERIAL] = imx_clk_hw_gate2("can1_serial", "can_podf", base + 0x68, 16); + hws[IMX6UL_CLK_CAN2_IPG] = imx_clk_hw_gate2("can2_ipg", "ipg", base + 0x68, 18); + hws[IMX6UL_CLK_CAN2_SERIAL] = imx_clk_hw_gate2("can2_serial", "can_podf", base + 0x68, 20); + hws[IMX6UL_CLK_GPT2_BUS] = imx_clk_hw_gate2("gpt2_bus", "perclk", base + 0x68, 24); + hws[IMX6UL_CLK_GPT2_SERIAL] = imx_clk_hw_gate2("gpt2_serial", "perclk", base + 0x68, 26); + hws[IMX6UL_CLK_UART2_IPG] = imx_clk_hw_gate2("uart2_ipg", "ipg", base + 0x68, 28); + hws[IMX6UL_CLK_UART2_SERIAL] = imx_clk_hw_gate2("uart2_serial", "uart_podf", base + 0x68, 28); if (clk_on_imx6ull()) - clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18); - clks[IMX6UL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30); + hws[IMX6UL_CLK_AIPSTZ3] = imx_clk_hw_gate2("aips_tz3", "ahb", base + 0x80, 18); + hws[IMX6UL_CLK_GPIO2] = imx_clk_hw_gate2("gpio2", "ipg", base + 0x68, 30); /* CCGR1 */ - clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); - clks[IMX6UL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); - clks[IMX6UL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); - clks[IMX6UL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); - clks[IMX6UL_CLK_ADC2] = imx_clk_gate2("adc2", "ipg", base + 0x6c, 8); - clks[IMX6UL_CLK_UART3_IPG] = imx_clk_gate2("uart3_ipg", "ipg", base + 0x6c, 10); - clks[IMX6UL_CLK_UART3_SERIAL] = imx_clk_gate2("uart3_serial", "uart_podf", base + 0x6c, 10); - clks[IMX6UL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); - clks[IMX6UL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); - clks[IMX6UL_CLK_ADC1] = imx_clk_gate2("adc1", "ipg", base + 0x6c, 16); - clks[IMX6UL_CLK_GPT1_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20); - clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22); - clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24); - clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); - clks[IMX6UL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26); - clks[IMX6UL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30); + hws[IMX6UL_CLK_ECSPI1] = imx_clk_hw_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0); + hws[IMX6UL_CLK_ECSPI2] = imx_clk_hw_gate2("ecspi2", "ecspi_podf", base + 0x6c, 2); + hws[IMX6UL_CLK_ECSPI3] = imx_clk_hw_gate2("ecspi3", "ecspi_podf", base + 0x6c, 4); + hws[IMX6UL_CLK_ECSPI4] = imx_clk_hw_gate2("ecspi4", "ecspi_podf", base + 0x6c, 6); + hws[IMX6UL_CLK_ADC2] = imx_clk_hw_gate2("adc2", "ipg", base + 0x6c, 8); + hws[IMX6UL_CLK_UART3_IPG] = imx_clk_hw_gate2("uart3_ipg", "ipg", base + 0x6c, 10); + hws[IMX6UL_CLK_UART3_SERIAL] = imx_clk_hw_gate2("uart3_serial", "uart_podf", base + 0x6c, 10); + hws[IMX6UL_CLK_EPIT1] = imx_clk_hw_gate2("epit1", "perclk", base + 0x6c, 12); + hws[IMX6UL_CLK_EPIT2] = imx_clk_hw_gate2("epit2", "perclk", base + 0x6c, 14); + hws[IMX6UL_CLK_ADC1] = imx_clk_hw_gate2("adc1", "ipg", base + 0x6c, 16); + hws[IMX6UL_CLK_GPT1_BUS] = imx_clk_hw_gate2("gpt1_bus", "perclk", base + 0x6c, 20); + hws[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_hw_gate2("gpt1_serial", "perclk", base + 0x6c, 22); + hws[IMX6UL_CLK_UART4_IPG] = imx_clk_hw_gate2("uart4_ipg", "ipg", base + 0x6c, 24); + hws[IMX6UL_CLK_UART4_SERIAL] = imx_clk_hw_gate2("uart4_serial", "uart_podf", base + 0x6c, 24); + hws[IMX6UL_CLK_GPIO1] = imx_clk_hw_gate2("gpio1", "ipg", base + 0x6c, 26); + hws[IMX6UL_CLK_GPIO5] = imx_clk_hw_gate2("gpio5", "ipg", base + 0x6c, 30); /* CCGR2 */ if (clk_on_imx6ull()) { - clks[IMX6ULL_CLK_ESAI_EXTAL] = imx_clk_gate2_shared("esai_extal", "esai_podf", base + 0x70, 0, &share_count_esai); - clks[IMX6ULL_CLK_ESAI_IPG] = imx_clk_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai); - clks[IMX6ULL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai); + hws[IMX6ULL_CLK_ESAI_EXTAL] = imx_clk_hw_gate2_shared("esai_extal", "esai_podf", base + 0x70, 0, &share_count_esai); + hws[IMX6ULL_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai); + hws[IMX6ULL_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai); } - clks[IMX6UL_CLK_CSI] = imx_clk_gate2("csi", "csi_podf", base + 0x70, 2); - clks[IMX6UL_CLK_I2C1] = imx_clk_gate2("i2c1", "perclk", base + 0x70, 6); - clks[IMX6UL_CLK_I2C2] = imx_clk_gate2("i2c2", "perclk", base + 0x70, 8); - clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10); - clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12); - clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14); - clks[IMX6UL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26); - clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28); - clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30); + hws[IMX6UL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x70, 2); + hws[IMX6UL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6); + hws[IMX6UL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8); + hws[IMX6UL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10); + hws[IMX6UL_CLK_OCOTP] = imx_clk_hw_gate2("ocotp", "ipg", base + 0x70, 12); + hws[IMX6UL_CLK_IOMUXC] = imx_clk_hw_gate2("iomuxc", "lcdif_podf", base + 0x70, 14); + hws[IMX6UL_CLK_GPIO3] = imx_clk_hw_gate2("gpio3", "ipg", base + 0x70, 26); + hws[IMX6UL_CLK_LCDIF_APB] = imx_clk_hw_gate2("lcdif_apb", "axi", base + 0x70, 28); + hws[IMX6UL_CLK_PXP] = imx_clk_hw_gate2("pxp", "axi", base + 0x70, 30); /* CCGR3 */ - clks[IMX6UL_CLK_UART5_IPG] = imx_clk_gate2("uart5_ipg", "ipg", base + 0x74, 2); - clks[IMX6UL_CLK_UART5_SERIAL] = imx_clk_gate2("uart5_serial", "uart_podf", base + 0x74, 2); + hws[IMX6UL_CLK_UART5_IPG] = imx_clk_hw_gate2("uart5_ipg", "ipg", base + 0x74, 2); + hws[IMX6UL_CLK_UART5_SERIAL] = imx_clk_hw_gate2("uart5_serial", "uart_podf", base + 0x74, 2); if (clk_on_imx6ul()) { - clks[IMX6UL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x74, 4); - clks[IMX6UL_CLK_ENET_AHB] = imx_clk_gate2("enet_ahb", "ahb", base + 0x74, 4); + hws[IMX6UL_CLK_ENET] = imx_clk_hw_gate2("enet", "ipg", base + 0x74, 4); + hws[IMX6UL_CLK_ENET_AHB] = imx_clk_hw_gate2("enet_ahb", "ahb", base + 0x74, 4); } else if (clk_on_imx6ull()) { - clks[IMX6ULL_CLK_EPDC_ACLK] = imx_clk_gate2("epdc_aclk", "axi", base + 0x74, 4); - clks[IMX6ULL_CLK_EPDC_PIX] = imx_clk_gate2("epdc_pix", "epdc_podf", base + 0x74, 4); + hws[IMX6ULL_CLK_EPDC_ACLK] = imx_clk_hw_gate2("epdc_aclk", "axi", base + 0x74, 4); + hws[IMX6ULL_CLK_EPDC_PIX] = imx_clk_hw_gate2("epdc_pix", "epdc_podf", base + 0x74, 4); } - clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6); - clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6); - clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10); - clks[IMX6UL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12); - clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14); - clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16); - clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); - clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); - clks[IMX6UL_CLK_MMDC_P1_IPG] = imx_clk_gate2("mmdc_p1_ipg", "ipg", base + 0x74, 26); - clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_UART6_IPG] = imx_clk_hw_gate2("uart6_ipg", "ipg", base + 0x74, 6); + hws[IMX6UL_CLK_UART6_SERIAL] = imx_clk_hw_gate2("uart6_serial", "uart_podf", base + 0x74, 6); + hws[IMX6UL_CLK_LCDIF_PIX] = imx_clk_hw_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10); + hws[IMX6UL_CLK_GPIO4] = imx_clk_hw_gate2("gpio4", "ipg", base + 0x74, 12); + hws[IMX6UL_CLK_QSPI] = imx_clk_hw_gate2("qspi1", "qspi1_podf", base + 0x74, 14); + hws[IMX6UL_CLK_WDOG1] = imx_clk_hw_gate2("wdog1", "ipg", base + 0x74, 16); + hws[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_hw_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_hw_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_MMDC_P1_IPG] = imx_clk_hw_gate2_flags("mmdc_p1_ipg", "ipg", base + 0x74, 26, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_AXI] = imx_clk_hw_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL); /* CCGR4 */ - clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12); - clks[IMX6UL_CLK_PWM1] = imx_clk_gate2("pwm1", "perclk", base + 0x78, 16); - clks[IMX6UL_CLK_PWM2] = imx_clk_gate2("pwm2", "perclk", base + 0x78, 18); - clks[IMX6UL_CLK_PWM3] = imx_clk_gate2("pwm3", "perclk", base + 0x78, 20); - clks[IMX6UL_CLK_PWM4] = imx_clk_gate2("pwm4", "perclk", base + 0x78, 22); - clks[IMX6UL_CLK_GPMI_BCH_APB] = imx_clk_gate2("gpmi_bch_apb", "bch_podf", base + 0x78, 24); - clks[IMX6UL_CLK_GPMI_BCH] = imx_clk_gate2("gpmi_bch", "gpmi_podf", base + 0x78, 26); - clks[IMX6UL_CLK_GPMI_IO] = imx_clk_gate2("gpmi_io", "enfc_podf", base + 0x78, 28); - clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30); + hws[IMX6UL_CLK_PER_BCH] = imx_clk_hw_gate2("per_bch", "bch_podf", base + 0x78, 12); + hws[IMX6UL_CLK_PWM1] = imx_clk_hw_gate2("pwm1", "perclk", base + 0x78, 16); + hws[IMX6UL_CLK_PWM2] = imx_clk_hw_gate2("pwm2", "perclk", base + 0x78, 18); + hws[IMX6UL_CLK_PWM3] = imx_clk_hw_gate2("pwm3", "perclk", base + 0x78, 20); + hws[IMX6UL_CLK_PWM4] = imx_clk_hw_gate2("pwm4", "perclk", base + 0x78, 22); + hws[IMX6UL_CLK_GPMI_BCH_APB] = imx_clk_hw_gate2("gpmi_bch_apb", "bch_podf", base + 0x78, 24); + hws[IMX6UL_CLK_GPMI_BCH] = imx_clk_hw_gate2("gpmi_bch", "gpmi_podf", base + 0x78, 26); + hws[IMX6UL_CLK_GPMI_IO] = imx_clk_hw_gate2("gpmi_io", "enfc_podf", base + 0x78, 28); + hws[IMX6UL_CLK_GPMI_APB] = imx_clk_hw_gate2("gpmi_apb", "bch_podf", base + 0x78, 30); /* CCGR5 */ - clks[IMX6UL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); - clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6); - clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8); - clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10); - clks[IMX6UL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12); - clks[IMX6UL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); - clks[IMX6UL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); - clks[IMX6UL_CLK_SAI3] = imx_clk_gate2_shared("sai3", "sai3_podf", base + 0x7c, 22, &share_count_sai3); - clks[IMX6UL_CLK_SAI3_IPG] = imx_clk_gate2_shared("sai3_ipg", "ipg", base + 0x7c, 22, &share_count_sai3); - clks[IMX6UL_CLK_UART1_IPG] = imx_clk_gate2("uart1_ipg", "ipg", base + 0x7c, 24); - clks[IMX6UL_CLK_UART1_SERIAL] = imx_clk_gate2("uart1_serial", "uart_podf", base + 0x7c, 24); - clks[IMX6UL_CLK_UART7_IPG] = imx_clk_gate2("uart7_ipg", "ipg", base + 0x7c, 26); - clks[IMX6UL_CLK_UART7_SERIAL] = imx_clk_gate2("uart7_serial", "uart_podf", base + 0x7c, 26); - clks[IMX6UL_CLK_SAI1] = imx_clk_gate2_shared("sai1", "sai1_podf", base + 0x7c, 28, &share_count_sai1); - clks[IMX6UL_CLK_SAI1_IPG] = imx_clk_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1); - clks[IMX6UL_CLK_SAI2] = imx_clk_gate2_shared("sai2", "sai2_podf", base + 0x7c, 30, &share_count_sai2); - clks[IMX6UL_CLK_SAI2_IPG] = imx_clk_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2); + hws[IMX6UL_CLK_ROM] = imx_clk_hw_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL); + hws[IMX6UL_CLK_SDMA] = imx_clk_hw_gate2("sdma", "ahb", base + 0x7c, 6); + hws[IMX6UL_CLK_KPP] = imx_clk_hw_gate2("kpp", "ipg", base + 0x7c, 8); + hws[IMX6UL_CLK_WDOG2] = imx_clk_hw_gate2("wdog2", "ipg", base + 0x7c, 10); + hws[IMX6UL_CLK_SPBA] = imx_clk_hw_gate2("spba", "ipg", base + 0x7c, 12); + hws[IMX6UL_CLK_SPDIF] = imx_clk_hw_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_audio); + hws[IMX6UL_CLK_SPDIF_GCLK] = imx_clk_hw_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_audio); + hws[IMX6UL_CLK_SAI3] = imx_clk_hw_gate2_shared("sai3", "sai3_podf", base + 0x7c, 22, &share_count_sai3); + hws[IMX6UL_CLK_SAI3_IPG] = imx_clk_hw_gate2_shared("sai3_ipg", "ipg", base + 0x7c, 22, &share_count_sai3); + hws[IMX6UL_CLK_UART1_IPG] = imx_clk_hw_gate2("uart1_ipg", "ipg", base + 0x7c, 24); + hws[IMX6UL_CLK_UART1_SERIAL] = imx_clk_hw_gate2("uart1_serial", "uart_podf", base + 0x7c, 24); + hws[IMX6UL_CLK_UART7_IPG] = imx_clk_hw_gate2("uart7_ipg", "ipg", base + 0x7c, 26); + hws[IMX6UL_CLK_UART7_SERIAL] = imx_clk_hw_gate2("uart7_serial", "uart_podf", base + 0x7c, 26); + hws[IMX6UL_CLK_SAI1] = imx_clk_hw_gate2_shared("sai1", "sai1_podf", base + 0x7c, 28, &share_count_sai1); + hws[IMX6UL_CLK_SAI1_IPG] = imx_clk_hw_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1); + hws[IMX6UL_CLK_SAI2] = imx_clk_hw_gate2_shared("sai2", "sai2_podf", base + 0x7c, 30, &share_count_sai2); + hws[IMX6UL_CLK_SAI2_IPG] = imx_clk_hw_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2); /* CCGR6 */ - clks[IMX6UL_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0); - clks[IMX6UL_CLK_USDHC1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); - clks[IMX6UL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); + hws[IMX6UL_CLK_USBOH3] = imx_clk_hw_gate2("usboh3", "ipg", base + 0x80, 0); + hws[IMX6UL_CLK_USDHC1] = imx_clk_hw_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2); + hws[IMX6UL_CLK_USDHC2] = imx_clk_hw_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); if (clk_on_imx6ul()) { - clks[IMX6UL_CLK_SIM1] = imx_clk_gate2("sim1", "sim_sel", base + 0x80, 6); - clks[IMX6UL_CLK_SIM2] = imx_clk_gate2("sim2", "sim_sel", base + 0x80, 8); + hws[IMX6UL_CLK_SIM1] = imx_clk_hw_gate2("sim1", "sim_sel", base + 0x80, 6); + hws[IMX6UL_CLK_SIM2] = imx_clk_hw_gate2("sim2", "sim_sel", base + 0x80, 8); } - clks[IMX6UL_CLK_EIM] = imx_clk_gate2("eim", "eim_slow_podf", base + 0x80, 10); - clks[IMX6UL_CLK_PWM8] = imx_clk_gate2("pwm8", "perclk", base + 0x80, 16); - clks[IMX6UL_CLK_UART8_IPG] = imx_clk_gate2("uart8_ipg", "ipg", base + 0x80, 14); - clks[IMX6UL_CLK_UART8_SERIAL] = imx_clk_gate2("uart8_serial", "uart_podf", base + 0x80, 14); - clks[IMX6UL_CLK_WDOG3] = imx_clk_gate2("wdog3", "ipg", base + 0x80, 20); - clks[IMX6UL_CLK_I2C4] = imx_clk_gate2("i2c4", "perclk", base + 0x80, 24); - clks[IMX6UL_CLK_PWM5] = imx_clk_gate2("pwm5", "perclk", base + 0x80, 26); - clks[IMX6UL_CLK_PWM6] = imx_clk_gate2("pwm6", "perclk", base + 0x80, 28); - clks[IMX6UL_CLK_PWM7] = imx_clk_gate2("pwm7", "perclk", base + 0x80, 30); + hws[IMX6UL_CLK_EIM] = imx_clk_hw_gate2("eim", "eim_slow_podf", base + 0x80, 10); + hws[IMX6UL_CLK_PWM8] = imx_clk_hw_gate2("pwm8", "perclk", base + 0x80, 16); + hws[IMX6UL_CLK_UART8_IPG] = imx_clk_hw_gate2("uart8_ipg", "ipg", base + 0x80, 14); + hws[IMX6UL_CLK_UART8_SERIAL] = imx_clk_hw_gate2("uart8_serial", "uart_podf", base + 0x80, 14); + hws[IMX6UL_CLK_WDOG3] = imx_clk_hw_gate2("wdog3", "ipg", base + 0x80, 20); + hws[IMX6UL_CLK_I2C4] = imx_clk_hw_gate2("i2c4", "perclk", base + 0x80, 24); + hws[IMX6UL_CLK_PWM5] = imx_clk_hw_gate2("pwm5", "perclk", base + 0x80, 26); + hws[IMX6UL_CLK_PWM6] = imx_clk_hw_gate2("pwm6", "perclk", base + 0x80, 28); + hws[IMX6UL_CLK_PWM7] = imx_clk_hw_gate2("pwm7", "perclk", base + 0x80, 30); /* CCOSR */ - clks[IMX6UL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); - clks[IMX6UL_CLK_CKO2] = imx_clk_gate("cko2", "cko2_podf", base + 0x60, 24); + hws[IMX6UL_CLK_CKO1] = imx_clk_hw_gate("cko1", "cko1_podf", base + 0x60, 7); + hws[IMX6UL_CLK_CKO2] = imx_clk_hw_gate("cko2", "cko2_podf", base + 0x60, 24); /* mask handshake of mmdc */ - writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR); + imx_mmdc_mask_handshake(base, 0); - imx_check_clocks(clks, ARRAY_SIZE(clks)); + imx_check_clk_hws(hws, IMX6UL_CLK_END); - clk_data.clks = clks; - clk_data.clk_num = ARRAY_SIZE(clks); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); /* * Lower the AHB clock rate before changing the parent clock source, @@ -473,39 +477,39 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node) * AXI clock rate, so we need to lower AHB rate first to make sure at * any time, AHB rate is <= 133MHz. */ - clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000); + clk_set_rate(hws[IMX6UL_CLK_AHB]->clk, 99000000); /* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */ - clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]); - clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]); - clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]); - clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]); + clk_set_parent(hws[IMX6UL_CLK_PERIPH_CLK2_SEL]->clk, hws[IMX6UL_CLK_OSC]->clk); + clk_set_parent(hws[IMX6UL_CLK_PERIPH]->clk, hws[IMX6UL_CLK_PERIPH_CLK2]->clk); + clk_set_parent(hws[IMX6UL_CLK_PERIPH_PRE]->clk, hws[IMX6UL_CLK_PLL2_BUS]->clk); + clk_set_parent(hws[IMX6UL_CLK_PERIPH]->clk, hws[IMX6UL_CLK_PERIPH_PRE]->clk); /* Make sure AHB rate is 132MHz */ - clk_set_rate(clks[IMX6UL_CLK_AHB], 132000000); + clk_set_rate(hws[IMX6UL_CLK_AHB]->clk, 132000000); /* set perclk to from OSC */ - clk_set_parent(clks[IMX6UL_CLK_PERCLK_SEL], clks[IMX6UL_CLK_OSC]); + clk_set_parent(hws[IMX6UL_CLK_PERCLK_SEL]->clk, hws[IMX6UL_CLK_OSC]->clk); - clk_set_rate(clks[IMX6UL_CLK_ENET_REF], 50000000); - clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000); - clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000); + clk_set_rate(hws[IMX6UL_CLK_ENET_REF]->clk, 50000000); + clk_set_rate(hws[IMX6UL_CLK_ENET2_REF]->clk, 50000000); + clk_set_rate(hws[IMX6UL_CLK_CSI]->clk, 24000000); if (clk_on_imx6ull()) - clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]); + clk_prepare_enable(hws[IMX6UL_CLK_AIPSTZ3]->clk); if (IS_ENABLED(CONFIG_USB_MXS_PHY)) { - clk_prepare_enable(clks[IMX6UL_CLK_USBPHY1_GATE]); - clk_prepare_enable(clks[IMX6UL_CLK_USBPHY2_GATE]); + clk_prepare_enable(hws[IMX6UL_CLK_USBPHY1_GATE]->clk); + clk_prepare_enable(hws[IMX6UL_CLK_USBPHY2_GATE]->clk); } - clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]); + clk_set_parent(hws[IMX6UL_CLK_CAN_SEL]->clk, hws[IMX6UL_CLK_PLL3_60M]->clk); if (clk_on_imx6ul()) - clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]); + clk_set_parent(hws[IMX6UL_CLK_SIM_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_USB_OTG]->clk); else if (clk_on_imx6ull()) - clk_set_parent(clks[IMX6ULL_CLK_EPDC_PRE_SEL], clks[IMX6UL_CLK_PLL3_PFD2]); + clk_set_parent(hws[IMX6ULL_CLK_EPDC_PRE_SEL]->clk, hws[IMX6UL_CLK_PLL3_PFD2]->clk); - clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]); + clk_set_parent(hws[IMX6UL_CLK_ENFC_SEL]->clk, hws[IMX6UL_CLK_PLL2_PFD2]->clk); } CLK_OF_DECLARE(imx6ul, "fsl,imx6ul-ccm", imx6ul_clocks_init); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 8f3aa994c8f7..fbea774ef687 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -6,6 +6,7 @@ #include <dt-bindings/clock/imx7d-clock.h> #include <linux/clk.h> #include <linux/clkdev.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> @@ -39,7 +40,6 @@ static const struct clk_div_table post_div_table[] = { { } }; -static struct clk *clks[IMX7D_CLK_END]; static const char *arm_a7_sel[] = { "osc", "pll_arm_main_clk", "pll_enet_500m_clk", "pll_dram_main_clk", "pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_audio_post_div", @@ -373,517 +373,533 @@ static const char *pll_enet_bypass_sel[] = { "pll_enet_main", "pll_enet_main_src static const char *pll_audio_bypass_sel[] = { "pll_audio_main", "pll_audio_main_src", }; static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_src", }; -static struct clk_onecell_data clk_data; - -static struct clk ** const uart_clks[] __initconst = { - &clks[IMX7D_UART1_ROOT_CLK], - &clks[IMX7D_UART2_ROOT_CLK], - &clks[IMX7D_UART3_ROOT_CLK], - &clks[IMX7D_UART4_ROOT_CLK], - &clks[IMX7D_UART5_ROOT_CLK], - &clks[IMX7D_UART6_ROOT_CLK], - &clks[IMX7D_UART7_ROOT_CLK], - NULL +static struct clk_hw **hws; +static struct clk_hw_onecell_data *clk_hw_data; + +static const int uart_clk_ids[] __initconst = { + IMX7D_UART1_ROOT_CLK, + IMX7D_UART2_ROOT_CLK, + IMX7D_UART3_ROOT_CLK, + IMX7D_UART4_ROOT_CLK, + IMX7D_UART5_ROOT_CLK, + IMX7D_UART6_ROOT_CLK, + IMX7D_UART7_ROOT_CLK, }; +static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata; + static void __init imx7d_clocks_init(struct device_node *ccm_node) { struct device_node *np; void __iomem *base; + int i; + + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws, + IMX7D_CLK_END), GFP_KERNEL); + if (WARN_ON(!clk_hw_data)) + return; + + clk_hw_data->num = IMX7D_CLK_END; + hws = clk_hw_data->hws; - clks[IMX7D_CLK_DUMMY] = imx_clk_fixed("dummy", 0); - clks[IMX7D_OSC_24M_CLK] = of_clk_get_by_name(ccm_node, "osc"); - clks[IMX7D_CKIL] = of_clk_get_by_name(ccm_node, "ckil"); + hws[IMX7D_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0); + hws[IMX7D_OSC_24M_CLK] = __clk_get_hw(of_clk_get_by_name(ccm_node, "osc")); + hws[IMX7D_CKIL] = __clk_get_hw(of_clk_get_by_name(ccm_node, "ckil")); np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop"); base = of_iomap(np, 0); WARN_ON(!base); of_node_put(np); - clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_SYS_MAIN_SRC] = imx_clk_mux("pll_sys_main_src", base + 0xb0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_ENET_MAIN_SRC] = imx_clk_mux("pll_enet_main_src", base + 0xe0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_AUDIO_MAIN_SRC] = imx_clk_mux("pll_audio_main_src", base + 0xf0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - clks[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); - - clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f); - clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f); - clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1); - clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0); - clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f); - clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f); - - clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT); - clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT); - clks[IMX7D_PLL_SYS_MAIN_BYPASS] = imx_clk_mux_flags("pll_sys_main_bypass", base + 0xb0, 16, 1, pll_sys_bypass_sel, ARRAY_SIZE(pll_sys_bypass_sel), CLK_SET_RATE_PARENT); - clks[IMX7D_PLL_ENET_MAIN_BYPASS] = imx_clk_mux_flags("pll_enet_main_bypass", base + 0xe0, 16, 1, pll_enet_bypass_sel, ARRAY_SIZE(pll_enet_bypass_sel), CLK_SET_RATE_PARENT); - clks[IMX7D_PLL_AUDIO_MAIN_BYPASS] = imx_clk_mux_flags("pll_audio_main_bypass", base + 0xf0, 16, 1, pll_audio_bypass_sel, ARRAY_SIZE(pll_audio_bypass_sel), CLK_SET_RATE_PARENT); - clks[IMX7D_PLL_VIDEO_MAIN_BYPASS] = imx_clk_mux_flags("pll_video_main_bypass", base + 0x130, 16, 1, pll_video_bypass_sel, ARRAY_SIZE(pll_video_bypass_sel), CLK_SET_RATE_PARENT); - - clks[IMX7D_PLL_ARM_MAIN_CLK] = imx_clk_gate("pll_arm_main_clk", "pll_arm_main_bypass", base + 0x60, 13); - clks[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_gate("pll_dram_main_clk", "pll_dram_test_div", base + 0x70, 13); - clks[IMX7D_PLL_SYS_MAIN_CLK] = imx_clk_gate("pll_sys_main_clk", "pll_sys_main_bypass", base + 0xb0, 13); - clks[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13); - clks[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13); - - clks[IMX7D_PLL_DRAM_TEST_DIV] = clk_register_divider_table(NULL, "pll_dram_test_div", "pll_dram_main_bypass", + hws[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_hw_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + hws[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_hw_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + hws[IMX7D_PLL_SYS_MAIN_SRC] = imx_clk_hw_mux("pll_sys_main_src", base + 0xb0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + hws[IMX7D_PLL_ENET_MAIN_SRC] = imx_clk_hw_mux("pll_enet_main_src", base + 0xe0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + hws[IMX7D_PLL_AUDIO_MAIN_SRC] = imx_clk_hw_mux("pll_audio_main_src", base + 0xf0, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + hws[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_hw_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); + + hws[IMX7D_PLL_ARM_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f); + hws[IMX7D_PLL_DRAM_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f); + hws[IMX7D_PLL_SYS_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1); + hws[IMX7D_PLL_ENET_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0); + hws[IMX7D_PLL_AUDIO_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f); + hws[IMX7D_PLL_VIDEO_MAIN] = imx_clk_hw_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f); + + hws[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT); + hws[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT); + hws[IMX7D_PLL_SYS_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_sys_main_bypass", base + 0xb0, 16, 1, pll_sys_bypass_sel, ARRAY_SIZE(pll_sys_bypass_sel), CLK_SET_RATE_PARENT); + hws[IMX7D_PLL_ENET_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_enet_main_bypass", base + 0xe0, 16, 1, pll_enet_bypass_sel, ARRAY_SIZE(pll_enet_bypass_sel), CLK_SET_RATE_PARENT); + hws[IMX7D_PLL_AUDIO_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_audio_main_bypass", base + 0xf0, 16, 1, pll_audio_bypass_sel, ARRAY_SIZE(pll_audio_bypass_sel), CLK_SET_RATE_PARENT); + hws[IMX7D_PLL_VIDEO_MAIN_BYPASS] = imx_clk_hw_mux_flags("pll_video_main_bypass", base + 0x130, 16, 1, pll_video_bypass_sel, ARRAY_SIZE(pll_video_bypass_sel), CLK_SET_RATE_PARENT); + + hws[IMX7D_PLL_ARM_MAIN_CLK] = imx_clk_hw_gate("pll_arm_main_clk", "pll_arm_main_bypass", base + 0x60, 13); + hws[IMX7D_PLL_DRAM_MAIN_CLK] = imx_clk_hw_gate("pll_dram_main_clk", "pll_dram_test_div", base + 0x70, 13); + hws[IMX7D_PLL_SYS_MAIN_CLK] = imx_clk_hw_gate("pll_sys_main_clk", "pll_sys_main_bypass", base + 0xb0, 13); + hws[IMX7D_PLL_AUDIO_MAIN_CLK] = imx_clk_hw_gate("pll_audio_main_clk", "pll_audio_main_bypass", base + 0xf0, 13); + hws[IMX7D_PLL_VIDEO_MAIN_CLK] = imx_clk_hw_gate("pll_video_main_clk", "pll_video_main_bypass", base + 0x130, 13); + + hws[IMX7D_PLL_DRAM_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_dram_test_div", "pll_dram_main_bypass", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 21, 2, 0, test_div_table, &imx_ccm_lock); - clks[IMX7D_PLL_AUDIO_TEST_DIV] = clk_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk", + hws[IMX7D_PLL_AUDIO_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_audio_test_div", "pll_audio_main_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 19, 2, 0, test_div_table, &imx_ccm_lock); - clks[IMX7D_PLL_AUDIO_POST_DIV] = clk_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div", + hws[IMX7D_PLL_AUDIO_POST_DIV] = clk_hw_register_divider_table(NULL, "pll_audio_post_div", "pll_audio_test_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xf0, 22, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX7D_PLL_VIDEO_TEST_DIV] = clk_register_divider_table(NULL, "pll_video_test_div", "pll_video_main_clk", + hws[IMX7D_PLL_VIDEO_TEST_DIV] = clk_hw_register_divider_table(NULL, "pll_video_test_div", "pll_video_main_clk", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 19, 2, 0, test_div_table, &imx_ccm_lock); - clks[IMX7D_PLL_VIDEO_POST_DIV] = clk_register_divider_table(NULL, "pll_video_post_div", "pll_video_test_div", + hws[IMX7D_PLL_VIDEO_POST_DIV] = clk_hw_register_divider_table(NULL, "pll_video_post_div", "pll_video_test_div", CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x130, 22, 2, 0, post_div_table, &imx_ccm_lock); - clks[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0); - clks[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1); - clks[IMX7D_PLL_SYS_PFD2_270M_CLK] = imx_clk_pfd("pll_sys_pfd2_270m_clk", "pll_sys_main_clk", base + 0xc0, 2); - - clks[IMX7D_PLL_SYS_PFD3_CLK] = imx_clk_pfd("pll_sys_pfd3_clk", "pll_sys_main_clk", base + 0xc0, 3); - clks[IMX7D_PLL_SYS_PFD4_CLK] = imx_clk_pfd("pll_sys_pfd4_clk", "pll_sys_main_clk", base + 0xd0, 0); - clks[IMX7D_PLL_SYS_PFD5_CLK] = imx_clk_pfd("pll_sys_pfd5_clk", "pll_sys_main_clk", base + 0xd0, 1); - clks[IMX7D_PLL_SYS_PFD6_CLK] = imx_clk_pfd("pll_sys_pfd6_clk", "pll_sys_main_clk", base + 0xd0, 2); - clks[IMX7D_PLL_SYS_PFD7_CLK] = imx_clk_pfd("pll_sys_pfd7_clk", "pll_sys_main_clk", base + 0xd0, 3); - - clks[IMX7D_PLL_SYS_MAIN_480M] = imx_clk_fixed_factor("pll_sys_main_480m", "pll_sys_main_clk", 1, 1); - clks[IMX7D_PLL_SYS_MAIN_240M] = imx_clk_fixed_factor("pll_sys_main_240m", "pll_sys_main_clk", 1, 2); - clks[IMX7D_PLL_SYS_MAIN_120M] = imx_clk_fixed_factor("pll_sys_main_120m", "pll_sys_main_clk", 1, 4); - clks[IMX7D_PLL_DRAM_MAIN_533M] = imx_clk_fixed_factor("pll_dram_533m", "pll_dram_main_clk", 1, 2); - - clks[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_gate_dis_flags("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4, CLK_IS_CRITICAL); - clks[IMX7D_PLL_SYS_MAIN_240M_CLK] = imx_clk_gate_dis("pll_sys_main_240m_clk", "pll_sys_main_240m", base + 0xb0, 5); - clks[IMX7D_PLL_SYS_MAIN_120M_CLK] = imx_clk_gate_dis("pll_sys_main_120m_clk", "pll_sys_main_120m", base + 0xb0, 6); - clks[IMX7D_PLL_DRAM_MAIN_533M_CLK] = imx_clk_gate("pll_dram_533m_clk", "pll_dram_533m", base + 0x70, 12); - - clks[IMX7D_PLL_SYS_PFD0_196M] = imx_clk_fixed_factor("pll_sys_pfd0_196m", "pll_sys_pfd0_392m_clk", 1, 2); - clks[IMX7D_PLL_SYS_PFD1_166M] = imx_clk_fixed_factor("pll_sys_pfd1_166m", "pll_sys_pfd1_332m_clk", 1, 2); - clks[IMX7D_PLL_SYS_PFD2_135M] = imx_clk_fixed_factor("pll_sys_pfd2_135m", "pll_sys_pfd2_270m_clk", 1, 2); - - clks[IMX7D_PLL_SYS_PFD0_196M_CLK] = imx_clk_gate_dis("pll_sys_pfd0_196m_clk", "pll_sys_pfd0_196m", base + 0xb0, 26); - clks[IMX7D_PLL_SYS_PFD1_166M_CLK] = imx_clk_gate_dis("pll_sys_pfd1_166m_clk", "pll_sys_pfd1_166m", base + 0xb0, 27); - clks[IMX7D_PLL_SYS_PFD2_135M_CLK] = imx_clk_gate_dis("pll_sys_pfd2_135m_clk", "pll_sys_pfd2_135m", base + 0xb0, 28); - - clks[IMX7D_PLL_ENET_MAIN_CLK] = imx_clk_fixed_factor("pll_enet_main_clk", "pll_enet_main_bypass", 1, 1); - clks[IMX7D_PLL_ENET_MAIN_500M] = imx_clk_fixed_factor("pll_enet_500m", "pll_enet_main_clk", 1, 2); - clks[IMX7D_PLL_ENET_MAIN_250M] = imx_clk_fixed_factor("pll_enet_250m", "pll_enet_main_clk", 1, 4); - clks[IMX7D_PLL_ENET_MAIN_125M] = imx_clk_fixed_factor("pll_enet_125m", "pll_enet_main_clk", 1, 8); - clks[IMX7D_PLL_ENET_MAIN_100M] = imx_clk_fixed_factor("pll_enet_100m", "pll_enet_main_clk", 1, 10); - clks[IMX7D_PLL_ENET_MAIN_50M] = imx_clk_fixed_factor("pll_enet_50m", "pll_enet_main_clk", 1, 20); - clks[IMX7D_PLL_ENET_MAIN_40M] = imx_clk_fixed_factor("pll_enet_40m", "pll_enet_main_clk", 1, 25); - clks[IMX7D_PLL_ENET_MAIN_25M] = imx_clk_fixed_factor("pll_enet_25m", "pll_enet_main_clk", 1, 40); - - clks[IMX7D_PLL_ENET_MAIN_500M_CLK] = imx_clk_gate("pll_enet_500m_clk", "pll_enet_500m", base + 0xe0, 12); - clks[IMX7D_PLL_ENET_MAIN_250M_CLK] = imx_clk_gate("pll_enet_250m_clk", "pll_enet_250m", base + 0xe0, 11); - clks[IMX7D_PLL_ENET_MAIN_125M_CLK] = imx_clk_gate("pll_enet_125m_clk", "pll_enet_125m", base + 0xe0, 10); - clks[IMX7D_PLL_ENET_MAIN_100M_CLK] = imx_clk_gate("pll_enet_100m_clk", "pll_enet_100m", base + 0xe0, 9); - clks[IMX7D_PLL_ENET_MAIN_50M_CLK] = imx_clk_gate("pll_enet_50m_clk", "pll_enet_50m", base + 0xe0, 8); - clks[IMX7D_PLL_ENET_MAIN_40M_CLK] = imx_clk_gate("pll_enet_40m_clk", "pll_enet_40m", base + 0xe0, 7); - clks[IMX7D_PLL_ENET_MAIN_25M_CLK] = imx_clk_gate("pll_enet_25m_clk", "pll_enet_25m", base + 0xe0, 6); - - clks[IMX7D_LVDS1_OUT_SEL] = imx_clk_mux("lvds1_sel", base + 0x170, 0, 5, lvds1_sel, ARRAY_SIZE(lvds1_sel)); - clks[IMX7D_LVDS1_OUT_CLK] = imx_clk_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x170, 5, BIT(6)); + hws[IMX7D_PLL_SYS_PFD0_392M_CLK] = imx_clk_hw_pfd("pll_sys_pfd0_392m_clk", "pll_sys_main_clk", base + 0xc0, 0); + hws[IMX7D_PLL_SYS_PFD1_332M_CLK] = imx_clk_hw_pfd("pll_sys_pfd1_332m_clk", "pll_sys_main_clk", base + 0xc0, 1); + hws[IMX7D_PLL_SYS_PFD2_270M_CLK] = imx_clk_hw_pfd("pll_sys_pfd2_270m_clk", "pll_sys_main_clk", base + 0xc0, 2); + + hws[IMX7D_PLL_SYS_PFD3_CLK] = imx_clk_hw_pfd("pll_sys_pfd3_clk", "pll_sys_main_clk", base + 0xc0, 3); + hws[IMX7D_PLL_SYS_PFD4_CLK] = imx_clk_hw_pfd("pll_sys_pfd4_clk", "pll_sys_main_clk", base + 0xd0, 0); + hws[IMX7D_PLL_SYS_PFD5_CLK] = imx_clk_hw_pfd("pll_sys_pfd5_clk", "pll_sys_main_clk", base + 0xd0, 1); + hws[IMX7D_PLL_SYS_PFD6_CLK] = imx_clk_hw_pfd("pll_sys_pfd6_clk", "pll_sys_main_clk", base + 0xd0, 2); + hws[IMX7D_PLL_SYS_PFD7_CLK] = imx_clk_hw_pfd("pll_sys_pfd7_clk", "pll_sys_main_clk", base + 0xd0, 3); + + hws[IMX7D_PLL_SYS_MAIN_480M] = imx_clk_hw_fixed_factor("pll_sys_main_480m", "pll_sys_main_clk", 1, 1); + hws[IMX7D_PLL_SYS_MAIN_240M] = imx_clk_hw_fixed_factor("pll_sys_main_240m", "pll_sys_main_clk", 1, 2); + hws[IMX7D_PLL_SYS_MAIN_120M] = imx_clk_hw_fixed_factor("pll_sys_main_120m", "pll_sys_main_clk", 1, 4); + hws[IMX7D_PLL_DRAM_MAIN_533M] = imx_clk_hw_fixed_factor("pll_dram_533m", "pll_dram_main_clk", 1, 2); + + hws[IMX7D_PLL_SYS_MAIN_480M_CLK] = imx_clk_hw_gate_dis_flags("pll_sys_main_480m_clk", "pll_sys_main_480m", base + 0xb0, 4, CLK_IS_CRITICAL); + hws[IMX7D_PLL_SYS_MAIN_240M_CLK] = imx_clk_hw_gate_dis("pll_sys_main_240m_clk", "pll_sys_main_240m", base + 0xb0, 5); + hws[IMX7D_PLL_SYS_MAIN_120M_CLK] = imx_clk_hw_gate_dis("pll_sys_main_120m_clk", "pll_sys_main_120m", base + 0xb0, 6); + hws[IMX7D_PLL_DRAM_MAIN_533M_CLK] = imx_clk_hw_gate("pll_dram_533m_clk", "pll_dram_533m", base + 0x70, 12); + + hws[IMX7D_PLL_SYS_PFD0_196M] = imx_clk_hw_fixed_factor("pll_sys_pfd0_196m", "pll_sys_pfd0_392m_clk", 1, 2); + hws[IMX7D_PLL_SYS_PFD1_166M] = imx_clk_hw_fixed_factor("pll_sys_pfd1_166m", "pll_sys_pfd1_332m_clk", 1, 2); + hws[IMX7D_PLL_SYS_PFD2_135M] = imx_clk_hw_fixed_factor("pll_sys_pfd2_135m", "pll_sys_pfd2_270m_clk", 1, 2); + + hws[IMX7D_PLL_SYS_PFD0_196M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd0_196m_clk", "pll_sys_pfd0_196m", base + 0xb0, 26); + hws[IMX7D_PLL_SYS_PFD1_166M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd1_166m_clk", "pll_sys_pfd1_166m", base + 0xb0, 27); + hws[IMX7D_PLL_SYS_PFD2_135M_CLK] = imx_clk_hw_gate_dis("pll_sys_pfd2_135m_clk", "pll_sys_pfd2_135m", base + 0xb0, 28); + + hws[IMX7D_PLL_ENET_MAIN_CLK] = imx_clk_hw_fixed_factor("pll_enet_main_clk", "pll_enet_main_bypass", 1, 1); + hws[IMX7D_PLL_ENET_MAIN_500M] = imx_clk_hw_fixed_factor("pll_enet_500m", "pll_enet_main_clk", 1, 2); + hws[IMX7D_PLL_ENET_MAIN_250M] = imx_clk_hw_fixed_factor("pll_enet_250m", "pll_enet_main_clk", 1, 4); + hws[IMX7D_PLL_ENET_MAIN_125M] = imx_clk_hw_fixed_factor("pll_enet_125m", "pll_enet_main_clk", 1, 8); + hws[IMX7D_PLL_ENET_MAIN_100M] = imx_clk_hw_fixed_factor("pll_enet_100m", "pll_enet_main_clk", 1, 10); + hws[IMX7D_PLL_ENET_MAIN_50M] = imx_clk_hw_fixed_factor("pll_enet_50m", "pll_enet_main_clk", 1, 20); + hws[IMX7D_PLL_ENET_MAIN_40M] = imx_clk_hw_fixed_factor("pll_enet_40m", "pll_enet_main_clk", 1, 25); + hws[IMX7D_PLL_ENET_MAIN_25M] = imx_clk_hw_fixed_factor("pll_enet_25m", "pll_enet_main_clk", 1, 40); + + hws[IMX7D_PLL_ENET_MAIN_500M_CLK] = imx_clk_hw_gate("pll_enet_500m_clk", "pll_enet_500m", base + 0xe0, 12); + hws[IMX7D_PLL_ENET_MAIN_250M_CLK] = imx_clk_hw_gate("pll_enet_250m_clk", "pll_enet_250m", base + 0xe0, 11); + hws[IMX7D_PLL_ENET_MAIN_125M_CLK] = imx_clk_hw_gate("pll_enet_125m_clk", "pll_enet_125m", base + 0xe0, 10); + hws[IMX7D_PLL_ENET_MAIN_100M_CLK] = imx_clk_hw_gate("pll_enet_100m_clk", "pll_enet_100m", base + 0xe0, 9); + hws[IMX7D_PLL_ENET_MAIN_50M_CLK] = imx_clk_hw_gate("pll_enet_50m_clk", "pll_enet_50m", base + 0xe0, 8); + hws[IMX7D_PLL_ENET_MAIN_40M_CLK] = imx_clk_hw_gate("pll_enet_40m_clk", "pll_enet_40m", base + 0xe0, 7); + hws[IMX7D_PLL_ENET_MAIN_25M_CLK] = imx_clk_hw_gate("pll_enet_25m_clk", "pll_enet_25m", base + 0xe0, 6); + + hws[IMX7D_LVDS1_OUT_SEL] = imx_clk_hw_mux("lvds1_sel", base + 0x170, 0, 5, lvds1_sel, ARRAY_SIZE(lvds1_sel)); + hws[IMX7D_LVDS1_OUT_CLK] = imx_clk_hw_gate_exclusive("lvds1_out", "lvds1_sel", base + 0x170, 5, BIT(6)); np = ccm_node; base = of_iomap(np, 0); WARN_ON(!base); - clks[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel)); - clks[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel)); - clks[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel)); - clks[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel)); - clks[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel)); - clks[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_mux2("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel)); - clks[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel)); - clks[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_mux2("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel)); - clks[IMX7D_DRAM_ROOT_SRC] = imx_clk_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel)); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_mux2("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel)); - clks[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel)); - clks[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_mux2("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel)); - clks[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_mux2("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel)); - clks[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_mux2("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel)); - clks[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_mux2("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel)); - clks[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_mux2("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel)); - clks[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_mux2("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel)); - clks[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_mux2("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel)); - clks[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_mux2("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel)); - clks[IMX7D_SAI1_ROOT_SRC] = imx_clk_mux2("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel)); - clks[IMX7D_SAI2_ROOT_SRC] = imx_clk_mux2("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel)); - clks[IMX7D_SAI3_ROOT_SRC] = imx_clk_mux2("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel)); - clks[IMX7D_SPDIF_ROOT_SRC] = imx_clk_mux2("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel)); - clks[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_mux2("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel)); - clks[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_mux2("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel)); - clks[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_mux2("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel)); - clks[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_mux2("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel)); - clks[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_mux2("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel)); - clks[IMX7D_EIM_ROOT_SRC] = imx_clk_mux2("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel)); - clks[IMX7D_NAND_ROOT_SRC] = imx_clk_mux2("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel)); - clks[IMX7D_QSPI_ROOT_SRC] = imx_clk_mux2("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel)); - clks[IMX7D_USDHC1_ROOT_SRC] = imx_clk_mux2("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel)); - clks[IMX7D_USDHC2_ROOT_SRC] = imx_clk_mux2("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel)); - clks[IMX7D_USDHC3_ROOT_SRC] = imx_clk_mux2("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel)); - clks[IMX7D_CAN1_ROOT_SRC] = imx_clk_mux2("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel)); - clks[IMX7D_CAN2_ROOT_SRC] = imx_clk_mux2("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel)); - clks[IMX7D_I2C1_ROOT_SRC] = imx_clk_mux2("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel)); - clks[IMX7D_I2C2_ROOT_SRC] = imx_clk_mux2("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel)); - clks[IMX7D_I2C3_ROOT_SRC] = imx_clk_mux2("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel)); - clks[IMX7D_I2C4_ROOT_SRC] = imx_clk_mux2("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel)); - clks[IMX7D_UART1_ROOT_SRC] = imx_clk_mux2("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel)); - clks[IMX7D_UART2_ROOT_SRC] = imx_clk_mux2("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel)); - clks[IMX7D_UART3_ROOT_SRC] = imx_clk_mux2("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel)); - clks[IMX7D_UART4_ROOT_SRC] = imx_clk_mux2("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel)); - clks[IMX7D_UART5_ROOT_SRC] = imx_clk_mux2("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel)); - clks[IMX7D_UART6_ROOT_SRC] = imx_clk_mux2("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel)); - clks[IMX7D_UART7_ROOT_SRC] = imx_clk_mux2("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel)); - clks[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_mux2("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel)); - clks[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_mux2("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel)); - clks[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_mux2("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel)); - clks[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_mux2("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel)); - clks[IMX7D_PWM1_ROOT_SRC] = imx_clk_mux2("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel)); - clks[IMX7D_PWM2_ROOT_SRC] = imx_clk_mux2("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel)); - clks[IMX7D_PWM3_ROOT_SRC] = imx_clk_mux2("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel)); - clks[IMX7D_PWM4_ROOT_SRC] = imx_clk_mux2("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel)); - clks[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_mux2("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel)); - clks[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_mux2("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel)); - clks[IMX7D_SIM1_ROOT_SRC] = imx_clk_mux2("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel)); - clks[IMX7D_SIM2_ROOT_SRC] = imx_clk_mux2("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel)); - clks[IMX7D_GPT1_ROOT_SRC] = imx_clk_mux2("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel)); - clks[IMX7D_GPT2_ROOT_SRC] = imx_clk_mux2("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel)); - clks[IMX7D_GPT3_ROOT_SRC] = imx_clk_mux2("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel)); - clks[IMX7D_GPT4_ROOT_SRC] = imx_clk_mux2("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel)); - clks[IMX7D_TRACE_ROOT_SRC] = imx_clk_mux2("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel)); - clks[IMX7D_WDOG_ROOT_SRC] = imx_clk_mux2("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel)); - clks[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_mux2("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel)); - clks[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_mux2("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel)); - clks[IMX7D_WRCLK_ROOT_SRC] = imx_clk_mux2("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel)); - clks[IMX7D_CLKO1_ROOT_SRC] = imx_clk_mux2("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel)); - clks[IMX7D_CLKO2_ROOT_SRC] = imx_clk_mux2("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel)); - - clks[IMX7D_ARM_A7_ROOT_CG] = imx_clk_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28); - clks[IMX7D_ARM_M4_ROOT_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); - clks[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_gate3("axi_cg", "axi_src", base + 0x8800, 28); - clks[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28); - clks[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28); - clks[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_gate3("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28); - clks[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_gate3("ahb_cg", "ahb_src", base + 0x9000, 28); - clks[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_gate3("dram_phym_cg", "dram_phym_src", base + 0x9800, 28); - clks[IMX7D_DRAM_ROOT_CG] = imx_clk_gate3("dram_cg", "dram_src", base + 0x9880, 28); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_gate3("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28); - clks[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_gate3("dram_alt_cg", "dram_alt_src", base + 0xa080, 28); - clks[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_gate3("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28); - clks[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_gate3("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28); - clks[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_gate3("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28); - clks[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_gate3("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28); - clks[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_gate3("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28); - clks[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_gate3("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28); - clks[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_gate3("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28); - clks[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_gate3("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28); - clks[IMX7D_SAI1_ROOT_CG] = imx_clk_gate3("sai1_cg", "sai1_src", base + 0xa500, 28); - clks[IMX7D_SAI2_ROOT_CG] = imx_clk_gate3("sai2_cg", "sai2_src", base + 0xa580, 28); - clks[IMX7D_SAI3_ROOT_CG] = imx_clk_gate3("sai3_cg", "sai3_src", base + 0xa600, 28); - clks[IMX7D_SPDIF_ROOT_CG] = imx_clk_gate3("spdif_cg", "spdif_src", base + 0xa680, 28); - clks[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_gate3("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28); - clks[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_gate3("enet1_time_cg", "enet1_time_src", base + 0xa780, 28); - clks[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_gate3("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28); - clks[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_gate3("enet2_time_cg", "enet2_time_src", base + 0xa880, 28); - clks[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_gate3("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28); - clks[IMX7D_EIM_ROOT_CG] = imx_clk_gate3("eim_cg", "eim_src", base + 0xa980, 28); - clks[IMX7D_NAND_ROOT_CG] = imx_clk_gate3("nand_cg", "nand_src", base + 0xaa00, 28); - clks[IMX7D_QSPI_ROOT_CG] = imx_clk_gate3("qspi_cg", "qspi_src", base + 0xaa80, 28); - clks[IMX7D_USDHC1_ROOT_CG] = imx_clk_gate3("usdhc1_cg", "usdhc1_src", base + 0xab00, 28); - clks[IMX7D_USDHC2_ROOT_CG] = imx_clk_gate3("usdhc2_cg", "usdhc2_src", base + 0xab80, 28); - clks[IMX7D_USDHC3_ROOT_CG] = imx_clk_gate3("usdhc3_cg", "usdhc3_src", base + 0xac00, 28); - clks[IMX7D_CAN1_ROOT_CG] = imx_clk_gate3("can1_cg", "can1_src", base + 0xac80, 28); - clks[IMX7D_CAN2_ROOT_CG] = imx_clk_gate3("can2_cg", "can2_src", base + 0xad00, 28); - clks[IMX7D_I2C1_ROOT_CG] = imx_clk_gate3("i2c1_cg", "i2c1_src", base + 0xad80, 28); - clks[IMX7D_I2C2_ROOT_CG] = imx_clk_gate3("i2c2_cg", "i2c2_src", base + 0xae00, 28); - clks[IMX7D_I2C3_ROOT_CG] = imx_clk_gate3("i2c3_cg", "i2c3_src", base + 0xae80, 28); - clks[IMX7D_I2C4_ROOT_CG] = imx_clk_gate3("i2c4_cg", "i2c4_src", base + 0xaf00, 28); - clks[IMX7D_UART1_ROOT_CG] = imx_clk_gate3("uart1_cg", "uart1_src", base + 0xaf80, 28); - clks[IMX7D_UART2_ROOT_CG] = imx_clk_gate3("uart2_cg", "uart2_src", base + 0xb000, 28); - clks[IMX7D_UART3_ROOT_CG] = imx_clk_gate3("uart3_cg", "uart3_src", base + 0xb080, 28); - clks[IMX7D_UART4_ROOT_CG] = imx_clk_gate3("uart4_cg", "uart4_src", base + 0xb100, 28); - clks[IMX7D_UART5_ROOT_CG] = imx_clk_gate3("uart5_cg", "uart5_src", base + 0xb180, 28); - clks[IMX7D_UART6_ROOT_CG] = imx_clk_gate3("uart6_cg", "uart6_src", base + 0xb200, 28); - clks[IMX7D_UART7_ROOT_CG] = imx_clk_gate3("uart7_cg", "uart7_src", base + 0xb280, 28); - clks[IMX7D_ECSPI1_ROOT_CG] = imx_clk_gate3("ecspi1_cg", "ecspi1_src", base + 0xb300, 28); - clks[IMX7D_ECSPI2_ROOT_CG] = imx_clk_gate3("ecspi2_cg", "ecspi2_src", base + 0xb380, 28); - clks[IMX7D_ECSPI3_ROOT_CG] = imx_clk_gate3("ecspi3_cg", "ecspi3_src", base + 0xb400, 28); - clks[IMX7D_ECSPI4_ROOT_CG] = imx_clk_gate3("ecspi4_cg", "ecspi4_src", base + 0xb480, 28); - clks[IMX7D_PWM1_ROOT_CG] = imx_clk_gate3("pwm1_cg", "pwm1_src", base + 0xb500, 28); - clks[IMX7D_PWM2_ROOT_CG] = imx_clk_gate3("pwm2_cg", "pwm2_src", base + 0xb580, 28); - clks[IMX7D_PWM3_ROOT_CG] = imx_clk_gate3("pwm3_cg", "pwm3_src", base + 0xb600, 28); - clks[IMX7D_PWM4_ROOT_CG] = imx_clk_gate3("pwm4_cg", "pwm4_src", base + 0xb680, 28); - clks[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_gate3("flextimer1_cg", "flextimer1_src", base + 0xb700, 28); - clks[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_gate3("flextimer2_cg", "flextimer2_src", base + 0xb780, 28); - clks[IMX7D_SIM1_ROOT_CG] = imx_clk_gate3("sim1_cg", "sim1_src", base + 0xb800, 28); - clks[IMX7D_SIM2_ROOT_CG] = imx_clk_gate3("sim2_cg", "sim2_src", base + 0xb880, 28); - clks[IMX7D_GPT1_ROOT_CG] = imx_clk_gate3("gpt1_cg", "gpt1_src", base + 0xb900, 28); - clks[IMX7D_GPT2_ROOT_CG] = imx_clk_gate3("gpt2_cg", "gpt2_src", base + 0xb980, 28); - clks[IMX7D_GPT3_ROOT_CG] = imx_clk_gate3("gpt3_cg", "gpt3_src", base + 0xbA00, 28); - clks[IMX7D_GPT4_ROOT_CG] = imx_clk_gate3("gpt4_cg", "gpt4_src", base + 0xbA80, 28); - clks[IMX7D_TRACE_ROOT_CG] = imx_clk_gate3("trace_cg", "trace_src", base + 0xbb00, 28); - clks[IMX7D_WDOG_ROOT_CG] = imx_clk_gate3("wdog_cg", "wdog_src", base + 0xbb80, 28); - clks[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_gate3("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28); - clks[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_gate3("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28); - clks[IMX7D_WRCLK_ROOT_CG] = imx_clk_gate3("wrclk_cg", "wrclk_src", base + 0xbd00, 28); - clks[IMX7D_CLKO1_ROOT_CG] = imx_clk_gate3("clko1_cg", "clko1_src", base + 0xbd80, 28); - clks[IMX7D_CLKO2_ROOT_CG] = imx_clk_gate3("clko2_cg", "clko2_src", base + 0xbe00, 28); - - clks[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_divider2("axi_pre_div", "axi_cg", base + 0x8800, 16, 3); - clks[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_divider2("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3); - clks[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_divider2("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3); - clks[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_divider2("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3); - clks[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_divider2("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3); - clks[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_divider2("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3); - clks[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_divider2("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3); - clks[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_divider2("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3); - clks[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_divider2("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3); - clks[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3); - clks[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_divider2("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3); - clks[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3); - clks[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_divider2("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3); - clks[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_divider2("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3); - clks[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_divider2("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3); - clks[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_divider2("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3); - clks[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_divider2("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3); - clks[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_divider2("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3); - clks[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3); - clks[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3); - clks[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3); - clks[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_divider2("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3); - clks[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_divider2("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3); - clks[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_divider2("eim_pre_div", "eim_cg", base + 0xa980, 16, 3); - clks[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_divider2("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3); - clks[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_divider2("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3); - clks[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_divider2("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3); - clks[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_divider2("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3); - clks[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_divider2("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3); - clks[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_divider2("can1_pre_div", "can1_cg", base + 0xac80, 16, 3); - clks[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_divider2("can2_pre_div", "can2_cg", base + 0xad00, 16, 3); - clks[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_divider2("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3); - clks[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_divider2("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3); - clks[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_divider2("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3); - clks[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_divider2("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3); - clks[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_divider2("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3); - clks[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_divider2("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3); - clks[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_divider2("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3); - clks[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_divider2("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3); - clks[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_divider2("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3); - clks[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_divider2("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3); - clks[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_divider2("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3); - clks[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_divider2("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3); - clks[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_divider2("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3); - clks[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_divider2("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3); - clks[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_divider2("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3); - clks[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_divider2("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3); - clks[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_divider2("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3); - clks[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_divider2("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3); - clks[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_divider2("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3); - clks[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_divider2("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3); - clks[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_divider2("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3); - clks[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_divider2("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3); - clks[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_divider2("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3); - clks[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_divider2("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3); - clks[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_divider2("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3); - clks[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_divider2("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3); - clks[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_divider2("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3); - clks[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_divider2("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3); - clks[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_divider2("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3); - clks[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3); - clks[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_divider2("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3); - clks[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_divider2("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3); - clks[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_divider2("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3); - clks[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_divider2("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3); - - clks[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3); - clks[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); - clks[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6); - clks[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6); - clks[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); - clks[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6); - clks[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6); - clks[IMX7D_IPG_ROOT_CLK] = imx_clk_divider_flags("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_PARENT); - clks[IMX7D_DRAM_ROOT_DIV] = imx_clk_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); - clks[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); - clks[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_divider2("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6); - clks[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_divider2("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6); - clks[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_divider2("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6); - clks[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_divider2("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6); - clks[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6); - clks[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6); - clks[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6); - clks[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_divider2("mipi_dphy_post_div", "mipi_dphy_pre_div", base + 0xa480, 0, 6); - clks[IMX7D_SAI1_ROOT_DIV] = imx_clk_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6); - clks[IMX7D_SAI2_ROOT_DIV] = imx_clk_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6); - clks[IMX7D_SAI3_ROOT_DIV] = imx_clk_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6); - clks[IMX7D_SPDIF_ROOT_DIV] = imx_clk_divider2("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6); - clks[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_divider2("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6); - clks[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6); - clks[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6); - clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6); - clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_divider2("enet_phy_ref_root_clk", "enet_phy_ref_pre_div", base + 0xa900, 0, 6); - clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6); - clks[IMX7D_NAND_ROOT_CLK] = imx_clk_divider2("nand_root_clk", "nand_pre_div", base + 0xaa00, 0, 6); - clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6); - clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6); - clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6); - clks[IMX7D_USDHC3_ROOT_DIV] = imx_clk_divider2("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6); - clks[IMX7D_CAN1_ROOT_DIV] = imx_clk_divider2("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6); - clks[IMX7D_CAN2_ROOT_DIV] = imx_clk_divider2("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6); - clks[IMX7D_I2C1_ROOT_DIV] = imx_clk_divider2("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6); - clks[IMX7D_I2C2_ROOT_DIV] = imx_clk_divider2("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6); - clks[IMX7D_I2C3_ROOT_DIV] = imx_clk_divider2("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6); - clks[IMX7D_I2C4_ROOT_DIV] = imx_clk_divider2("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6); - clks[IMX7D_UART1_ROOT_DIV] = imx_clk_divider2("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6); - clks[IMX7D_UART2_ROOT_DIV] = imx_clk_divider2("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6); - clks[IMX7D_UART3_ROOT_DIV] = imx_clk_divider2("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6); - clks[IMX7D_UART4_ROOT_DIV] = imx_clk_divider2("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6); - clks[IMX7D_UART5_ROOT_DIV] = imx_clk_divider2("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6); - clks[IMX7D_UART6_ROOT_DIV] = imx_clk_divider2("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6); - clks[IMX7D_UART7_ROOT_DIV] = imx_clk_divider2("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6); - clks[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_divider2("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6); - clks[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_divider2("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6); - clks[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_divider2("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6); - clks[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_divider2("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6); - clks[IMX7D_PWM1_ROOT_DIV] = imx_clk_divider2("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6); - clks[IMX7D_PWM2_ROOT_DIV] = imx_clk_divider2("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6); - clks[IMX7D_PWM3_ROOT_DIV] = imx_clk_divider2("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6); - clks[IMX7D_PWM4_ROOT_DIV] = imx_clk_divider2("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6); - clks[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_divider2("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6); - clks[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_divider2("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6); - clks[IMX7D_SIM1_ROOT_DIV] = imx_clk_divider2("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6); - clks[IMX7D_SIM2_ROOT_DIV] = imx_clk_divider2("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6); - clks[IMX7D_GPT1_ROOT_DIV] = imx_clk_divider2("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6); - clks[IMX7D_GPT2_ROOT_DIV] = imx_clk_divider2("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6); - clks[IMX7D_GPT3_ROOT_DIV] = imx_clk_divider2("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6); - clks[IMX7D_GPT4_ROOT_DIV] = imx_clk_divider2("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6); - clks[IMX7D_TRACE_ROOT_DIV] = imx_clk_divider2("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6); - clks[IMX7D_WDOG_ROOT_DIV] = imx_clk_divider2("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6); - clks[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_divider2("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6); - clks[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_divider2("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6); - clks[IMX7D_WRCLK_ROOT_DIV] = imx_clk_divider2("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6); - clks[IMX7D_CLKO1_ROOT_DIV] = imx_clk_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6); - clks[IMX7D_CLKO2_ROOT_DIV] = imx_clk_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6); - - clks[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_gate2_flags("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0, CLK_OPS_PARENT_ENABLE); - clks[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0); - clks[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_gate2_flags("main_axi_root_clk", "axi_post_div", base + 0x4040, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); - clks[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); - clks[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); - clks[IMX7D_OCRAM_CLK] = imx_clk_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); - clks[IMX7D_OCRAM_S_CLK] = imx_clk_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); - clks[IMX7D_DRAM_ROOT_CLK] = imx_clk_gate2_flags("dram_root_clk", "dram_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); - clks[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); - clks[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); - clks[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); - clks[IMX7D_OCOTP_CLK] = imx_clk_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0); - clks[IMX7D_SNVS_CLK] = imx_clk_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0); - clks[IMX7D_MU_ROOT_CLK] = imx_clk_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0); - clks[IMX7D_CAAM_CLK] = imx_clk_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0); - clks[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0); - clks[IMX7D_SDMA_CORE_CLK] = imx_clk_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0); - clks[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0); - clks[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0); - clks[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0); - clks[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0); - clks[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0); - clks[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0); - clks[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0); - clks[IMX7D_ENET1_IPG_ROOT_CLK] = imx_clk_gate2_shared2("enet1_ipg_root_clk", "enet_axi_post_div", base + 0x4700, 0, &share_count_enet1); - clks[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_gate2_shared2("enet1_time_root_clk", "enet1_time_post_div", base + 0x4700, 0, &share_count_enet1); - clks[IMX7D_ENET2_IPG_ROOT_CLK] = imx_clk_gate2_shared2("enet2_ipg_root_clk", "enet_axi_post_div", base + 0x4710, 0, &share_count_enet2); - clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate2_shared2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4710, 0, &share_count_enet2); - clks[IMX7D_SAI1_ROOT_CLK] = imx_clk_gate2_shared2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0, &share_count_sai1); - clks[IMX7D_SAI1_IPG_CLK] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_root_clk", base + 0x48c0, 0, &share_count_sai1); - clks[IMX7D_SAI2_ROOT_CLK] = imx_clk_gate2_shared2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0, &share_count_sai2); - clks[IMX7D_SAI2_IPG_CLK] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root_clk", base + 0x48d0, 0, &share_count_sai2); - clks[IMX7D_SAI3_ROOT_CLK] = imx_clk_gate2_shared2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0, &share_count_sai3); - clks[IMX7D_SAI3_IPG_CLK] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root_clk", base + 0x48e0, 0, &share_count_sai3); - clks[IMX7D_SPDIF_ROOT_CLK] = imx_clk_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0); - clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0); - clks[IMX7D_NAND_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_rawnand_clk", "nand_root_clk", base + 0x4140, 0, &share_count_nand); - clks[IMX7D_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_root_clk", base + 0x4140, 0, &share_count_nand); - clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0); - clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0); - clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0); - clks[IMX7D_USDHC3_ROOT_CLK] = imx_clk_gate4("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0); - clks[IMX7D_CAN1_ROOT_CLK] = imx_clk_gate4("can1_root_clk", "can1_post_div", base + 0x4740, 0); - clks[IMX7D_CAN2_ROOT_CLK] = imx_clk_gate4("can2_root_clk", "can2_post_div", base + 0x4750, 0); - clks[IMX7D_I2C1_ROOT_CLK] = imx_clk_gate4("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0); - clks[IMX7D_I2C2_ROOT_CLK] = imx_clk_gate4("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0); - clks[IMX7D_I2C3_ROOT_CLK] = imx_clk_gate4("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0); - clks[IMX7D_I2C4_ROOT_CLK] = imx_clk_gate4("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0); - clks[IMX7D_UART1_ROOT_CLK] = imx_clk_gate4("uart1_root_clk", "uart1_post_div", base + 0x4940, 0); - clks[IMX7D_UART2_ROOT_CLK] = imx_clk_gate4("uart2_root_clk", "uart2_post_div", base + 0x4950, 0); - clks[IMX7D_UART3_ROOT_CLK] = imx_clk_gate4("uart3_root_clk", "uart3_post_div", base + 0x4960, 0); - clks[IMX7D_UART4_ROOT_CLK] = imx_clk_gate4("uart4_root_clk", "uart4_post_div", base + 0x4970, 0); - clks[IMX7D_UART5_ROOT_CLK] = imx_clk_gate4("uart5_root_clk", "uart5_post_div", base + 0x4980, 0); - clks[IMX7D_UART6_ROOT_CLK] = imx_clk_gate4("uart6_root_clk", "uart6_post_div", base + 0x4990, 0); - clks[IMX7D_UART7_ROOT_CLK] = imx_clk_gate4("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0); - clks[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_gate4("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0); - clks[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_gate4("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0); - clks[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_gate4("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0); - clks[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_gate4("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0); - clks[IMX7D_PWM1_ROOT_CLK] = imx_clk_gate4("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0); - clks[IMX7D_PWM2_ROOT_CLK] = imx_clk_gate4("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0); - clks[IMX7D_PWM3_ROOT_CLK] = imx_clk_gate4("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0); - clks[IMX7D_PWM4_ROOT_CLK] = imx_clk_gate4("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0); - clks[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_gate4("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0); - clks[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_gate4("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0); - clks[IMX7D_SIM1_ROOT_CLK] = imx_clk_gate4("sim1_root_clk", "sim1_post_div", base + 0x4900, 0); - clks[IMX7D_SIM2_ROOT_CLK] = imx_clk_gate4("sim2_root_clk", "sim2_post_div", base + 0x4910, 0); - clks[IMX7D_GPT1_ROOT_CLK] = imx_clk_gate4("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0); - clks[IMX7D_GPT2_ROOT_CLK] = imx_clk_gate4("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0); - clks[IMX7D_GPT3_ROOT_CLK] = imx_clk_gate4("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0); - clks[IMX7D_GPT4_ROOT_CLK] = imx_clk_gate4("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0); - clks[IMX7D_TRACE_ROOT_CLK] = imx_clk_gate4("trace_root_clk", "trace_post_div", base + 0x4300, 0); - clks[IMX7D_WDOG1_ROOT_CLK] = imx_clk_gate4("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0); - clks[IMX7D_WDOG2_ROOT_CLK] = imx_clk_gate4("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0); - clks[IMX7D_WDOG3_ROOT_CLK] = imx_clk_gate4("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0); - clks[IMX7D_WDOG4_ROOT_CLK] = imx_clk_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); - clks[IMX7D_KPP_ROOT_CLK] = imx_clk_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); - clks[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); - clks[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); - clks[IMX7D_WRCLK_ROOT_CLK] = imx_clk_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); - clks[IMX7D_USB_CTRL_CLK] = imx_clk_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); - clks[IMX7D_USB_PHY1_CLK] = imx_clk_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); - clks[IMX7D_USB_PHY2_CLK] = imx_clk_gate4("usb_phy2_clk", "pll_usb_main_clk", base + 0x46b0, 0); - clks[IMX7D_ADC_ROOT_CLK] = imx_clk_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0); - - clks[IMX7D_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc", 1, 8); - - clks[IMX7D_CLK_ARM] = imx_clk_cpu("arm", "arm_a7_root_clk", - clks[IMX7D_ARM_A7_ROOT_CLK], - clks[IMX7D_ARM_A7_ROOT_SRC], - clks[IMX7D_PLL_ARM_MAIN_CLK], - clks[IMX7D_PLL_SYS_MAIN_CLK]); - - imx_check_clocks(clks, ARRAY_SIZE(clks)); - - clk_data.clks = clks; - clk_data.clk_num = ARRAY_SIZE(clks); - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); - - clk_set_parent(clks[IMX7D_PLL_ARM_MAIN_BYPASS], clks[IMX7D_PLL_ARM_MAIN]); - clk_set_parent(clks[IMX7D_PLL_DRAM_MAIN_BYPASS], clks[IMX7D_PLL_DRAM_MAIN]); - clk_set_parent(clks[IMX7D_PLL_SYS_MAIN_BYPASS], clks[IMX7D_PLL_SYS_MAIN]); - clk_set_parent(clks[IMX7D_PLL_ENET_MAIN_BYPASS], clks[IMX7D_PLL_ENET_MAIN]); - clk_set_parent(clks[IMX7D_PLL_AUDIO_MAIN_BYPASS], clks[IMX7D_PLL_AUDIO_MAIN]); - clk_set_parent(clks[IMX7D_PLL_VIDEO_MAIN_BYPASS], clks[IMX7D_PLL_VIDEO_MAIN]); - - clk_set_parent(clks[IMX7D_MIPI_CSI_ROOT_SRC], clks[IMX7D_PLL_SYS_PFD3_CLK]); + hws[IMX7D_ARM_A7_ROOT_SRC] = imx_clk_hw_mux2("arm_a7_src", base + 0x8000, 24, 3, arm_a7_sel, ARRAY_SIZE(arm_a7_sel)); + hws[IMX7D_ARM_M4_ROOT_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, arm_m4_sel, ARRAY_SIZE(arm_m4_sel)); + hws[IMX7D_MAIN_AXI_ROOT_SRC] = imx_clk_hw_mux2("axi_src", base + 0x8800, 24, 3, axi_sel, ARRAY_SIZE(axi_sel)); + hws[IMX7D_DISP_AXI_ROOT_SRC] = imx_clk_hw_mux2("disp_axi_src", base + 0x8880, 24, 3, disp_axi_sel, ARRAY_SIZE(disp_axi_sel)); + hws[IMX7D_ENET_AXI_ROOT_SRC] = imx_clk_hw_mux2("enet_axi_src", base + 0x8900, 24, 3, enet_axi_sel, ARRAY_SIZE(enet_axi_sel)); + hws[IMX7D_NAND_USDHC_BUS_ROOT_SRC] = imx_clk_hw_mux2("nand_usdhc_src", base + 0x8980, 24, 3, nand_usdhc_bus_sel, ARRAY_SIZE(nand_usdhc_bus_sel)); + hws[IMX7D_AHB_CHANNEL_ROOT_SRC] = imx_clk_hw_mux2("ahb_src", base + 0x9000, 24, 3, ahb_channel_sel, ARRAY_SIZE(ahb_channel_sel)); + hws[IMX7D_DRAM_PHYM_ROOT_SRC] = imx_clk_hw_mux2("dram_phym_src", base + 0x9800, 24, 1, dram_phym_sel, ARRAY_SIZE(dram_phym_sel)); + hws[IMX7D_DRAM_ROOT_SRC] = imx_clk_hw_mux2("dram_src", base + 0x9880, 24, 1, dram_sel, ARRAY_SIZE(dram_sel)); + hws[IMX7D_DRAM_PHYM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_phym_alt_src", base + 0xa000, 24, 3, dram_phym_alt_sel, ARRAY_SIZE(dram_phym_alt_sel)); + hws[IMX7D_DRAM_ALT_ROOT_SRC] = imx_clk_hw_mux2("dram_alt_src", base + 0xa080, 24, 3, dram_alt_sel, ARRAY_SIZE(dram_alt_sel)); + hws[IMX7D_USB_HSIC_ROOT_SRC] = imx_clk_hw_mux2("usb_hsic_src", base + 0xa100, 24, 3, usb_hsic_sel, ARRAY_SIZE(usb_hsic_sel)); + hws[IMX7D_PCIE_CTRL_ROOT_SRC] = imx_clk_hw_mux2("pcie_ctrl_src", base + 0xa180, 24, 3, pcie_ctrl_sel, ARRAY_SIZE(pcie_ctrl_sel)); + hws[IMX7D_PCIE_PHY_ROOT_SRC] = imx_clk_hw_mux2("pcie_phy_src", base + 0xa200, 24, 3, pcie_phy_sel, ARRAY_SIZE(pcie_phy_sel)); + hws[IMX7D_EPDC_PIXEL_ROOT_SRC] = imx_clk_hw_mux2("epdc_pixel_src", base + 0xa280, 24, 3, epdc_pixel_sel, ARRAY_SIZE(epdc_pixel_sel)); + hws[IMX7D_LCDIF_PIXEL_ROOT_SRC] = imx_clk_hw_mux2("lcdif_pixel_src", base + 0xa300, 24, 3, lcdif_pixel_sel, ARRAY_SIZE(lcdif_pixel_sel)); + hws[IMX7D_MIPI_DSI_ROOT_SRC] = imx_clk_hw_mux2("mipi_dsi_src", base + 0xa380, 24, 3, mipi_dsi_sel, ARRAY_SIZE(mipi_dsi_sel)); + hws[IMX7D_MIPI_CSI_ROOT_SRC] = imx_clk_hw_mux2("mipi_csi_src", base + 0xa400, 24, 3, mipi_csi_sel, ARRAY_SIZE(mipi_csi_sel)); + hws[IMX7D_MIPI_DPHY_ROOT_SRC] = imx_clk_hw_mux2("mipi_dphy_src", base + 0xa480, 24, 3, mipi_dphy_sel, ARRAY_SIZE(mipi_dphy_sel)); + hws[IMX7D_SAI1_ROOT_SRC] = imx_clk_hw_mux2("sai1_src", base + 0xa500, 24, 3, sai1_sel, ARRAY_SIZE(sai1_sel)); + hws[IMX7D_SAI2_ROOT_SRC] = imx_clk_hw_mux2("sai2_src", base + 0xa580, 24, 3, sai2_sel, ARRAY_SIZE(sai2_sel)); + hws[IMX7D_SAI3_ROOT_SRC] = imx_clk_hw_mux2("sai3_src", base + 0xa600, 24, 3, sai3_sel, ARRAY_SIZE(sai3_sel)); + hws[IMX7D_SPDIF_ROOT_SRC] = imx_clk_hw_mux2("spdif_src", base + 0xa680, 24, 3, spdif_sel, ARRAY_SIZE(spdif_sel)); + hws[IMX7D_ENET1_REF_ROOT_SRC] = imx_clk_hw_mux2("enet1_ref_src", base + 0xa700, 24, 3, enet1_ref_sel, ARRAY_SIZE(enet1_ref_sel)); + hws[IMX7D_ENET1_TIME_ROOT_SRC] = imx_clk_hw_mux2("enet1_time_src", base + 0xa780, 24, 3, enet1_time_sel, ARRAY_SIZE(enet1_time_sel)); + hws[IMX7D_ENET2_REF_ROOT_SRC] = imx_clk_hw_mux2("enet2_ref_src", base + 0xa800, 24, 3, enet2_ref_sel, ARRAY_SIZE(enet2_ref_sel)); + hws[IMX7D_ENET2_TIME_ROOT_SRC] = imx_clk_hw_mux2("enet2_time_src", base + 0xa880, 24, 3, enet2_time_sel, ARRAY_SIZE(enet2_time_sel)); + hws[IMX7D_ENET_PHY_REF_ROOT_SRC] = imx_clk_hw_mux2("enet_phy_ref_src", base + 0xa900, 24, 3, enet_phy_ref_sel, ARRAY_SIZE(enet_phy_ref_sel)); + hws[IMX7D_EIM_ROOT_SRC] = imx_clk_hw_mux2("eim_src", base + 0xa980, 24, 3, eim_sel, ARRAY_SIZE(eim_sel)); + hws[IMX7D_NAND_ROOT_SRC] = imx_clk_hw_mux2("nand_src", base + 0xaa00, 24, 3, nand_sel, ARRAY_SIZE(nand_sel)); + hws[IMX7D_QSPI_ROOT_SRC] = imx_clk_hw_mux2("qspi_src", base + 0xaa80, 24, 3, qspi_sel, ARRAY_SIZE(qspi_sel)); + hws[IMX7D_USDHC1_ROOT_SRC] = imx_clk_hw_mux2("usdhc1_src", base + 0xab00, 24, 3, usdhc1_sel, ARRAY_SIZE(usdhc1_sel)); + hws[IMX7D_USDHC2_ROOT_SRC] = imx_clk_hw_mux2("usdhc2_src", base + 0xab80, 24, 3, usdhc2_sel, ARRAY_SIZE(usdhc2_sel)); + hws[IMX7D_USDHC3_ROOT_SRC] = imx_clk_hw_mux2("usdhc3_src", base + 0xac00, 24, 3, usdhc3_sel, ARRAY_SIZE(usdhc3_sel)); + hws[IMX7D_CAN1_ROOT_SRC] = imx_clk_hw_mux2("can1_src", base + 0xac80, 24, 3, can1_sel, ARRAY_SIZE(can1_sel)); + hws[IMX7D_CAN2_ROOT_SRC] = imx_clk_hw_mux2("can2_src", base + 0xad00, 24, 3, can2_sel, ARRAY_SIZE(can2_sel)); + hws[IMX7D_I2C1_ROOT_SRC] = imx_clk_hw_mux2("i2c1_src", base + 0xad80, 24, 3, i2c1_sel, ARRAY_SIZE(i2c1_sel)); + hws[IMX7D_I2C2_ROOT_SRC] = imx_clk_hw_mux2("i2c2_src", base + 0xae00, 24, 3, i2c2_sel, ARRAY_SIZE(i2c2_sel)); + hws[IMX7D_I2C3_ROOT_SRC] = imx_clk_hw_mux2("i2c3_src", base + 0xae80, 24, 3, i2c3_sel, ARRAY_SIZE(i2c3_sel)); + hws[IMX7D_I2C4_ROOT_SRC] = imx_clk_hw_mux2("i2c4_src", base + 0xaf00, 24, 3, i2c4_sel, ARRAY_SIZE(i2c4_sel)); + hws[IMX7D_UART1_ROOT_SRC] = imx_clk_hw_mux2("uart1_src", base + 0xaf80, 24, 3, uart1_sel, ARRAY_SIZE(uart1_sel)); + hws[IMX7D_UART2_ROOT_SRC] = imx_clk_hw_mux2("uart2_src", base + 0xb000, 24, 3, uart2_sel, ARRAY_SIZE(uart2_sel)); + hws[IMX7D_UART3_ROOT_SRC] = imx_clk_hw_mux2("uart3_src", base + 0xb080, 24, 3, uart3_sel, ARRAY_SIZE(uart3_sel)); + hws[IMX7D_UART4_ROOT_SRC] = imx_clk_hw_mux2("uart4_src", base + 0xb100, 24, 3, uart4_sel, ARRAY_SIZE(uart4_sel)); + hws[IMX7D_UART5_ROOT_SRC] = imx_clk_hw_mux2("uart5_src", base + 0xb180, 24, 3, uart5_sel, ARRAY_SIZE(uart5_sel)); + hws[IMX7D_UART6_ROOT_SRC] = imx_clk_hw_mux2("uart6_src", base + 0xb200, 24, 3, uart6_sel, ARRAY_SIZE(uart6_sel)); + hws[IMX7D_UART7_ROOT_SRC] = imx_clk_hw_mux2("uart7_src", base + 0xb280, 24, 3, uart7_sel, ARRAY_SIZE(uart7_sel)); + hws[IMX7D_ECSPI1_ROOT_SRC] = imx_clk_hw_mux2("ecspi1_src", base + 0xb300, 24, 3, ecspi1_sel, ARRAY_SIZE(ecspi1_sel)); + hws[IMX7D_ECSPI2_ROOT_SRC] = imx_clk_hw_mux2("ecspi2_src", base + 0xb380, 24, 3, ecspi2_sel, ARRAY_SIZE(ecspi2_sel)); + hws[IMX7D_ECSPI3_ROOT_SRC] = imx_clk_hw_mux2("ecspi3_src", base + 0xb400, 24, 3, ecspi3_sel, ARRAY_SIZE(ecspi3_sel)); + hws[IMX7D_ECSPI4_ROOT_SRC] = imx_clk_hw_mux2("ecspi4_src", base + 0xb480, 24, 3, ecspi4_sel, ARRAY_SIZE(ecspi4_sel)); + hws[IMX7D_PWM1_ROOT_SRC] = imx_clk_hw_mux2("pwm1_src", base + 0xb500, 24, 3, pwm1_sel, ARRAY_SIZE(pwm1_sel)); + hws[IMX7D_PWM2_ROOT_SRC] = imx_clk_hw_mux2("pwm2_src", base + 0xb580, 24, 3, pwm2_sel, ARRAY_SIZE(pwm2_sel)); + hws[IMX7D_PWM3_ROOT_SRC] = imx_clk_hw_mux2("pwm3_src", base + 0xb600, 24, 3, pwm3_sel, ARRAY_SIZE(pwm3_sel)); + hws[IMX7D_PWM4_ROOT_SRC] = imx_clk_hw_mux2("pwm4_src", base + 0xb680, 24, 3, pwm4_sel, ARRAY_SIZE(pwm4_sel)); + hws[IMX7D_FLEXTIMER1_ROOT_SRC] = imx_clk_hw_mux2("flextimer1_src", base + 0xb700, 24, 3, flextimer1_sel, ARRAY_SIZE(flextimer1_sel)); + hws[IMX7D_FLEXTIMER2_ROOT_SRC] = imx_clk_hw_mux2("flextimer2_src", base + 0xb780, 24, 3, flextimer2_sel, ARRAY_SIZE(flextimer2_sel)); + hws[IMX7D_SIM1_ROOT_SRC] = imx_clk_hw_mux2("sim1_src", base + 0xb800, 24, 3, sim1_sel, ARRAY_SIZE(sim1_sel)); + hws[IMX7D_SIM2_ROOT_SRC] = imx_clk_hw_mux2("sim2_src", base + 0xb880, 24, 3, sim2_sel, ARRAY_SIZE(sim2_sel)); + hws[IMX7D_GPT1_ROOT_SRC] = imx_clk_hw_mux2("gpt1_src", base + 0xb900, 24, 3, gpt1_sel, ARRAY_SIZE(gpt1_sel)); + hws[IMX7D_GPT2_ROOT_SRC] = imx_clk_hw_mux2("gpt2_src", base + 0xb980, 24, 3, gpt2_sel, ARRAY_SIZE(gpt2_sel)); + hws[IMX7D_GPT3_ROOT_SRC] = imx_clk_hw_mux2("gpt3_src", base + 0xba00, 24, 3, gpt3_sel, ARRAY_SIZE(gpt3_sel)); + hws[IMX7D_GPT4_ROOT_SRC] = imx_clk_hw_mux2("gpt4_src", base + 0xba80, 24, 3, gpt4_sel, ARRAY_SIZE(gpt4_sel)); + hws[IMX7D_TRACE_ROOT_SRC] = imx_clk_hw_mux2("trace_src", base + 0xbb00, 24, 3, trace_sel, ARRAY_SIZE(trace_sel)); + hws[IMX7D_WDOG_ROOT_SRC] = imx_clk_hw_mux2("wdog_src", base + 0xbb80, 24, 3, wdog_sel, ARRAY_SIZE(wdog_sel)); + hws[IMX7D_CSI_MCLK_ROOT_SRC] = imx_clk_hw_mux2("csi_mclk_src", base + 0xbc00, 24, 3, csi_mclk_sel, ARRAY_SIZE(csi_mclk_sel)); + hws[IMX7D_AUDIO_MCLK_ROOT_SRC] = imx_clk_hw_mux2("audio_mclk_src", base + 0xbc80, 24, 3, audio_mclk_sel, ARRAY_SIZE(audio_mclk_sel)); + hws[IMX7D_WRCLK_ROOT_SRC] = imx_clk_hw_mux2("wrclk_src", base + 0xbd00, 24, 3, wrclk_sel, ARRAY_SIZE(wrclk_sel)); + hws[IMX7D_CLKO1_ROOT_SRC] = imx_clk_hw_mux2("clko1_src", base + 0xbd80, 24, 3, clko1_sel, ARRAY_SIZE(clko1_sel)); + hws[IMX7D_CLKO2_ROOT_SRC] = imx_clk_hw_mux2("clko2_src", base + 0xbe00, 24, 3, clko2_sel, ARRAY_SIZE(clko2_sel)); + + hws[IMX7D_ARM_A7_ROOT_CG] = imx_clk_hw_gate3("arm_a7_cg", "arm_a7_src", base + 0x8000, 28); + hws[IMX7D_ARM_M4_ROOT_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); + hws[IMX7D_MAIN_AXI_ROOT_CG] = imx_clk_hw_gate3("axi_cg", "axi_src", base + 0x8800, 28); + hws[IMX7D_DISP_AXI_ROOT_CG] = imx_clk_hw_gate3("disp_axi_cg", "disp_axi_src", base + 0x8880, 28); + hws[IMX7D_ENET_AXI_ROOT_CG] = imx_clk_hw_gate3("enet_axi_cg", "enet_axi_src", base + 0x8900, 28); + hws[IMX7D_NAND_USDHC_BUS_ROOT_CG] = imx_clk_hw_gate3("nand_usdhc_cg", "nand_usdhc_src", base + 0x8980, 28); + hws[IMX7D_AHB_CHANNEL_ROOT_CG] = imx_clk_hw_gate3("ahb_cg", "ahb_src", base + 0x9000, 28); + hws[IMX7D_DRAM_PHYM_ROOT_CG] = imx_clk_hw_gate3("dram_phym_cg", "dram_phym_src", base + 0x9800, 28); + hws[IMX7D_DRAM_ROOT_CG] = imx_clk_hw_gate3("dram_cg", "dram_src", base + 0x9880, 28); + hws[IMX7D_DRAM_PHYM_ALT_ROOT_CG] = imx_clk_hw_gate3("dram_phym_alt_cg", "dram_phym_alt_src", base + 0xa000, 28); + hws[IMX7D_DRAM_ALT_ROOT_CG] = imx_clk_hw_gate3("dram_alt_cg", "dram_alt_src", base + 0xa080, 28); + hws[IMX7D_USB_HSIC_ROOT_CG] = imx_clk_hw_gate3("usb_hsic_cg", "usb_hsic_src", base + 0xa100, 28); + hws[IMX7D_PCIE_CTRL_ROOT_CG] = imx_clk_hw_gate3("pcie_ctrl_cg", "pcie_ctrl_src", base + 0xa180, 28); + hws[IMX7D_PCIE_PHY_ROOT_CG] = imx_clk_hw_gate3("pcie_phy_cg", "pcie_phy_src", base + 0xa200, 28); + hws[IMX7D_EPDC_PIXEL_ROOT_CG] = imx_clk_hw_gate3("epdc_pixel_cg", "epdc_pixel_src", base + 0xa280, 28); + hws[IMX7D_LCDIF_PIXEL_ROOT_CG] = imx_clk_hw_gate3("lcdif_pixel_cg", "lcdif_pixel_src", base + 0xa300, 28); + hws[IMX7D_MIPI_DSI_ROOT_CG] = imx_clk_hw_gate3("mipi_dsi_cg", "mipi_dsi_src", base + 0xa380, 28); + hws[IMX7D_MIPI_CSI_ROOT_CG] = imx_clk_hw_gate3("mipi_csi_cg", "mipi_csi_src", base + 0xa400, 28); + hws[IMX7D_MIPI_DPHY_ROOT_CG] = imx_clk_hw_gate3("mipi_dphy_cg", "mipi_dphy_src", base + 0xa480, 28); + hws[IMX7D_SAI1_ROOT_CG] = imx_clk_hw_gate3("sai1_cg", "sai1_src", base + 0xa500, 28); + hws[IMX7D_SAI2_ROOT_CG] = imx_clk_hw_gate3("sai2_cg", "sai2_src", base + 0xa580, 28); + hws[IMX7D_SAI3_ROOT_CG] = imx_clk_hw_gate3("sai3_cg", "sai3_src", base + 0xa600, 28); + hws[IMX7D_SPDIF_ROOT_CG] = imx_clk_hw_gate3("spdif_cg", "spdif_src", base + 0xa680, 28); + hws[IMX7D_ENET1_REF_ROOT_CG] = imx_clk_hw_gate3("enet1_ref_cg", "enet1_ref_src", base + 0xa700, 28); + hws[IMX7D_ENET1_TIME_ROOT_CG] = imx_clk_hw_gate3("enet1_time_cg", "enet1_time_src", base + 0xa780, 28); + hws[IMX7D_ENET2_REF_ROOT_CG] = imx_clk_hw_gate3("enet2_ref_cg", "enet2_ref_src", base + 0xa800, 28); + hws[IMX7D_ENET2_TIME_ROOT_CG] = imx_clk_hw_gate3("enet2_time_cg", "enet2_time_src", base + 0xa880, 28); + hws[IMX7D_ENET_PHY_REF_ROOT_CG] = imx_clk_hw_gate3("enet_phy_ref_cg", "enet_phy_ref_src", base + 0xa900, 28); + hws[IMX7D_EIM_ROOT_CG] = imx_clk_hw_gate3("eim_cg", "eim_src", base + 0xa980, 28); + hws[IMX7D_NAND_ROOT_CG] = imx_clk_hw_gate3("nand_cg", "nand_src", base + 0xaa00, 28); + hws[IMX7D_QSPI_ROOT_CG] = imx_clk_hw_gate3("qspi_cg", "qspi_src", base + 0xaa80, 28); + hws[IMX7D_USDHC1_ROOT_CG] = imx_clk_hw_gate3("usdhc1_cg", "usdhc1_src", base + 0xab00, 28); + hws[IMX7D_USDHC2_ROOT_CG] = imx_clk_hw_gate3("usdhc2_cg", "usdhc2_src", base + 0xab80, 28); + hws[IMX7D_USDHC3_ROOT_CG] = imx_clk_hw_gate3("usdhc3_cg", "usdhc3_src", base + 0xac00, 28); + hws[IMX7D_CAN1_ROOT_CG] = imx_clk_hw_gate3("can1_cg", "can1_src", base + 0xac80, 28); + hws[IMX7D_CAN2_ROOT_CG] = imx_clk_hw_gate3("can2_cg", "can2_src", base + 0xad00, 28); + hws[IMX7D_I2C1_ROOT_CG] = imx_clk_hw_gate3("i2c1_cg", "i2c1_src", base + 0xad80, 28); + hws[IMX7D_I2C2_ROOT_CG] = imx_clk_hw_gate3("i2c2_cg", "i2c2_src", base + 0xae00, 28); + hws[IMX7D_I2C3_ROOT_CG] = imx_clk_hw_gate3("i2c3_cg", "i2c3_src", base + 0xae80, 28); + hws[IMX7D_I2C4_ROOT_CG] = imx_clk_hw_gate3("i2c4_cg", "i2c4_src", base + 0xaf00, 28); + hws[IMX7D_UART1_ROOT_CG] = imx_clk_hw_gate3("uart1_cg", "uart1_src", base + 0xaf80, 28); + hws[IMX7D_UART2_ROOT_CG] = imx_clk_hw_gate3("uart2_cg", "uart2_src", base + 0xb000, 28); + hws[IMX7D_UART3_ROOT_CG] = imx_clk_hw_gate3("uart3_cg", "uart3_src", base + 0xb080, 28); + hws[IMX7D_UART4_ROOT_CG] = imx_clk_hw_gate3("uart4_cg", "uart4_src", base + 0xb100, 28); + hws[IMX7D_UART5_ROOT_CG] = imx_clk_hw_gate3("uart5_cg", "uart5_src", base + 0xb180, 28); + hws[IMX7D_UART6_ROOT_CG] = imx_clk_hw_gate3("uart6_cg", "uart6_src", base + 0xb200, 28); + hws[IMX7D_UART7_ROOT_CG] = imx_clk_hw_gate3("uart7_cg", "uart7_src", base + 0xb280, 28); + hws[IMX7D_ECSPI1_ROOT_CG] = imx_clk_hw_gate3("ecspi1_cg", "ecspi1_src", base + 0xb300, 28); + hws[IMX7D_ECSPI2_ROOT_CG] = imx_clk_hw_gate3("ecspi2_cg", "ecspi2_src", base + 0xb380, 28); + hws[IMX7D_ECSPI3_ROOT_CG] = imx_clk_hw_gate3("ecspi3_cg", "ecspi3_src", base + 0xb400, 28); + hws[IMX7D_ECSPI4_ROOT_CG] = imx_clk_hw_gate3("ecspi4_cg", "ecspi4_src", base + 0xb480, 28); + hws[IMX7D_PWM1_ROOT_CG] = imx_clk_hw_gate3("pwm1_cg", "pwm1_src", base + 0xb500, 28); + hws[IMX7D_PWM2_ROOT_CG] = imx_clk_hw_gate3("pwm2_cg", "pwm2_src", base + 0xb580, 28); + hws[IMX7D_PWM3_ROOT_CG] = imx_clk_hw_gate3("pwm3_cg", "pwm3_src", base + 0xb600, 28); + hws[IMX7D_PWM4_ROOT_CG] = imx_clk_hw_gate3("pwm4_cg", "pwm4_src", base + 0xb680, 28); + hws[IMX7D_FLEXTIMER1_ROOT_CG] = imx_clk_hw_gate3("flextimer1_cg", "flextimer1_src", base + 0xb700, 28); + hws[IMX7D_FLEXTIMER2_ROOT_CG] = imx_clk_hw_gate3("flextimer2_cg", "flextimer2_src", base + 0xb780, 28); + hws[IMX7D_SIM1_ROOT_CG] = imx_clk_hw_gate3("sim1_cg", "sim1_src", base + 0xb800, 28); + hws[IMX7D_SIM2_ROOT_CG] = imx_clk_hw_gate3("sim2_cg", "sim2_src", base + 0xb880, 28); + hws[IMX7D_GPT1_ROOT_CG] = imx_clk_hw_gate3("gpt1_cg", "gpt1_src", base + 0xb900, 28); + hws[IMX7D_GPT2_ROOT_CG] = imx_clk_hw_gate3("gpt2_cg", "gpt2_src", base + 0xb980, 28); + hws[IMX7D_GPT3_ROOT_CG] = imx_clk_hw_gate3("gpt3_cg", "gpt3_src", base + 0xbA00, 28); + hws[IMX7D_GPT4_ROOT_CG] = imx_clk_hw_gate3("gpt4_cg", "gpt4_src", base + 0xbA80, 28); + hws[IMX7D_TRACE_ROOT_CG] = imx_clk_hw_gate3("trace_cg", "trace_src", base + 0xbb00, 28); + hws[IMX7D_WDOG_ROOT_CG] = imx_clk_hw_gate3("wdog_cg", "wdog_src", base + 0xbb80, 28); + hws[IMX7D_CSI_MCLK_ROOT_CG] = imx_clk_hw_gate3("csi_mclk_cg", "csi_mclk_src", base + 0xbc00, 28); + hws[IMX7D_AUDIO_MCLK_ROOT_CG] = imx_clk_hw_gate3("audio_mclk_cg", "audio_mclk_src", base + 0xbc80, 28); + hws[IMX7D_WRCLK_ROOT_CG] = imx_clk_hw_gate3("wrclk_cg", "wrclk_src", base + 0xbd00, 28); + hws[IMX7D_CLKO1_ROOT_CG] = imx_clk_hw_gate3("clko1_cg", "clko1_src", base + 0xbd80, 28); + hws[IMX7D_CLKO2_ROOT_CG] = imx_clk_hw_gate3("clko2_cg", "clko2_src", base + 0xbe00, 28); + + hws[IMX7D_MAIN_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("axi_pre_div", "axi_cg", base + 0x8800, 16, 3); + hws[IMX7D_DISP_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("disp_axi_pre_div", "disp_axi_cg", base + 0x8880, 16, 3); + hws[IMX7D_ENET_AXI_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet_axi_pre_div", "enet_axi_cg", base + 0x8900, 16, 3); + hws[IMX7D_NAND_USDHC_BUS_ROOT_PRE_DIV] = imx_clk_hw_divider2("nand_usdhc_pre_div", "nand_usdhc_cg", base + 0x8980, 16, 3); + hws[IMX7D_AHB_CHANNEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("ahb_pre_div", "ahb_cg", base + 0x9000, 16, 3); + hws[IMX7D_DRAM_PHYM_ALT_ROOT_PRE_DIV] = imx_clk_hw_divider2("dram_phym_alt_pre_div", "dram_phym_alt_cg", base + 0xa000, 16, 3); + hws[IMX7D_DRAM_ALT_ROOT_PRE_DIV] = imx_clk_hw_divider2("dram_alt_pre_div", "dram_alt_cg", base + 0xa080, 16, 3); + hws[IMX7D_USB_HSIC_ROOT_PRE_DIV] = imx_clk_hw_divider2("usb_hsic_pre_div", "usb_hsic_cg", base + 0xa100, 16, 3); + hws[IMX7D_PCIE_CTRL_ROOT_PRE_DIV] = imx_clk_hw_divider2("pcie_ctrl_pre_div", "pcie_ctrl_cg", base + 0xa180, 16, 3); + hws[IMX7D_PCIE_PHY_ROOT_PRE_DIV] = imx_clk_hw_divider2("pcie_phy_pre_div", "pcie_phy_cg", base + 0xa200, 16, 3); + hws[IMX7D_EPDC_PIXEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("epdc_pixel_pre_div", "epdc_pixel_cg", base + 0xa280, 16, 3); + hws[IMX7D_LCDIF_PIXEL_ROOT_PRE_DIV] = imx_clk_hw_divider2("lcdif_pixel_pre_div", "lcdif_pixel_cg", base + 0xa300, 16, 3); + hws[IMX7D_MIPI_DSI_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_dsi_pre_div", "mipi_dsi_cg", base + 0xa380, 16, 3); + hws[IMX7D_MIPI_CSI_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_csi_pre_div", "mipi_csi_cg", base + 0xa400, 16, 3); + hws[IMX7D_MIPI_DPHY_ROOT_PRE_DIV] = imx_clk_hw_divider2("mipi_dphy_pre_div", "mipi_dphy_cg", base + 0xa480, 16, 3); + hws[IMX7D_SAI1_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai1_pre_div", "sai1_cg", base + 0xa500, 16, 3); + hws[IMX7D_SAI2_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai2_pre_div", "sai2_cg", base + 0xa580, 16, 3); + hws[IMX7D_SAI3_ROOT_PRE_DIV] = imx_clk_hw_divider2("sai3_pre_div", "sai3_cg", base + 0xa600, 16, 3); + hws[IMX7D_SPDIF_ROOT_PRE_DIV] = imx_clk_hw_divider2("spdif_pre_div", "spdif_cg", base + 0xa680, 16, 3); + hws[IMX7D_ENET1_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet1_ref_pre_div", "enet1_ref_cg", base + 0xa700, 16, 3); + hws[IMX7D_ENET1_TIME_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet1_time_pre_div", "enet1_time_cg", base + 0xa780, 16, 3); + hws[IMX7D_ENET2_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet2_ref_pre_div", "enet2_ref_cg", base + 0xa800, 16, 3); + hws[IMX7D_ENET2_TIME_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet2_time_pre_div", "enet2_time_cg", base + 0xa880, 16, 3); + hws[IMX7D_ENET_PHY_REF_ROOT_PRE_DIV] = imx_clk_hw_divider2("enet_phy_ref_pre_div", "enet_phy_ref_cg", base + 0xa900, 16, 3); + hws[IMX7D_EIM_ROOT_PRE_DIV] = imx_clk_hw_divider2("eim_pre_div", "eim_cg", base + 0xa980, 16, 3); + hws[IMX7D_NAND_ROOT_PRE_DIV] = imx_clk_hw_divider2("nand_pre_div", "nand_cg", base + 0xaa00, 16, 3); + hws[IMX7D_QSPI_ROOT_PRE_DIV] = imx_clk_hw_divider2("qspi_pre_div", "qspi_cg", base + 0xaa80, 16, 3); + hws[IMX7D_USDHC1_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc1_pre_div", "usdhc1_cg", base + 0xab00, 16, 3); + hws[IMX7D_USDHC2_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc2_pre_div", "usdhc2_cg", base + 0xab80, 16, 3); + hws[IMX7D_USDHC3_ROOT_PRE_DIV] = imx_clk_hw_divider2("usdhc3_pre_div", "usdhc3_cg", base + 0xac00, 16, 3); + hws[IMX7D_CAN1_ROOT_PRE_DIV] = imx_clk_hw_divider2("can1_pre_div", "can1_cg", base + 0xac80, 16, 3); + hws[IMX7D_CAN2_ROOT_PRE_DIV] = imx_clk_hw_divider2("can2_pre_div", "can2_cg", base + 0xad00, 16, 3); + hws[IMX7D_I2C1_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c1_pre_div", "i2c1_cg", base + 0xad80, 16, 3); + hws[IMX7D_I2C2_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c2_pre_div", "i2c2_cg", base + 0xae00, 16, 3); + hws[IMX7D_I2C3_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c3_pre_div", "i2c3_cg", base + 0xae80, 16, 3); + hws[IMX7D_I2C4_ROOT_PRE_DIV] = imx_clk_hw_divider2("i2c4_pre_div", "i2c4_cg", base + 0xaf00, 16, 3); + hws[IMX7D_UART1_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart1_pre_div", "uart1_cg", base + 0xaf80, 16, 3); + hws[IMX7D_UART2_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart2_pre_div", "uart2_cg", base + 0xb000, 16, 3); + hws[IMX7D_UART3_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart3_pre_div", "uart3_cg", base + 0xb080, 16, 3); + hws[IMX7D_UART4_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart4_pre_div", "uart4_cg", base + 0xb100, 16, 3); + hws[IMX7D_UART5_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart5_pre_div", "uart5_cg", base + 0xb180, 16, 3); + hws[IMX7D_UART6_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart6_pre_div", "uart6_cg", base + 0xb200, 16, 3); + hws[IMX7D_UART7_ROOT_PRE_DIV] = imx_clk_hw_divider2("uart7_pre_div", "uart7_cg", base + 0xb280, 16, 3); + hws[IMX7D_ECSPI1_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi1_pre_div", "ecspi1_cg", base + 0xb300, 16, 3); + hws[IMX7D_ECSPI2_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi2_pre_div", "ecspi2_cg", base + 0xb380, 16, 3); + hws[IMX7D_ECSPI3_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi3_pre_div", "ecspi3_cg", base + 0xb400, 16, 3); + hws[IMX7D_ECSPI4_ROOT_PRE_DIV] = imx_clk_hw_divider2("ecspi4_pre_div", "ecspi4_cg", base + 0xb480, 16, 3); + hws[IMX7D_PWM1_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm1_pre_div", "pwm1_cg", base + 0xb500, 16, 3); + hws[IMX7D_PWM2_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm2_pre_div", "pwm2_cg", base + 0xb580, 16, 3); + hws[IMX7D_PWM3_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm3_pre_div", "pwm3_cg", base + 0xb600, 16, 3); + hws[IMX7D_PWM4_ROOT_PRE_DIV] = imx_clk_hw_divider2("pwm4_pre_div", "pwm4_cg", base + 0xb680, 16, 3); + hws[IMX7D_FLEXTIMER1_ROOT_PRE_DIV] = imx_clk_hw_divider2("flextimer1_pre_div", "flextimer1_cg", base + 0xb700, 16, 3); + hws[IMX7D_FLEXTIMER2_ROOT_PRE_DIV] = imx_clk_hw_divider2("flextimer2_pre_div", "flextimer2_cg", base + 0xb780, 16, 3); + hws[IMX7D_SIM1_ROOT_PRE_DIV] = imx_clk_hw_divider2("sim1_pre_div", "sim1_cg", base + 0xb800, 16, 3); + hws[IMX7D_SIM2_ROOT_PRE_DIV] = imx_clk_hw_divider2("sim2_pre_div", "sim2_cg", base + 0xb880, 16, 3); + hws[IMX7D_GPT1_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt1_pre_div", "gpt1_cg", base + 0xb900, 16, 3); + hws[IMX7D_GPT2_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt2_pre_div", "gpt2_cg", base + 0xb980, 16, 3); + hws[IMX7D_GPT3_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt3_pre_div", "gpt3_cg", base + 0xba00, 16, 3); + hws[IMX7D_GPT4_ROOT_PRE_DIV] = imx_clk_hw_divider2("gpt4_pre_div", "gpt4_cg", base + 0xba80, 16, 3); + hws[IMX7D_TRACE_ROOT_PRE_DIV] = imx_clk_hw_divider2("trace_pre_div", "trace_cg", base + 0xbb00, 16, 3); + hws[IMX7D_WDOG_ROOT_PRE_DIV] = imx_clk_hw_divider2("wdog_pre_div", "wdog_cg", base + 0xbb80, 16, 3); + hws[IMX7D_CSI_MCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("csi_mclk_pre_div", "csi_mclk_cg", base + 0xbc00, 16, 3); + hws[IMX7D_AUDIO_MCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("audio_mclk_pre_div", "audio_mclk_cg", base + 0xbc80, 16, 3); + hws[IMX7D_WRCLK_ROOT_PRE_DIV] = imx_clk_hw_divider2("wrclk_pre_div", "wrclk_cg", base + 0xbd00, 16, 3); + hws[IMX7D_CLKO1_ROOT_PRE_DIV] = imx_clk_hw_divider2("clko1_pre_div", "clko1_cg", base + 0xbd80, 16, 3); + hws[IMX7D_CLKO2_ROOT_PRE_DIV] = imx_clk_hw_divider2("clko2_pre_div", "clko2_cg", base + 0xbe00, 16, 3); + + hws[IMX7D_ARM_A7_ROOT_DIV] = imx_clk_hw_divider2("arm_a7_div", "arm_a7_cg", base + 0x8000, 0, 3); + hws[IMX7D_ARM_M4_ROOT_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); + hws[IMX7D_MAIN_AXI_ROOT_DIV] = imx_clk_hw_divider2("axi_post_div", "axi_pre_div", base + 0x8800, 0, 6); + hws[IMX7D_DISP_AXI_ROOT_DIV] = imx_clk_hw_divider2("disp_axi_post_div", "disp_axi_pre_div", base + 0x8880, 0, 6); + hws[IMX7D_ENET_AXI_ROOT_DIV] = imx_clk_hw_divider2("enet_axi_post_div", "enet_axi_pre_div", base + 0x8900, 0, 6); + hws[IMX7D_NAND_USDHC_BUS_ROOT_CLK] = imx_clk_hw_divider2("nand_usdhc_root_clk", "nand_usdhc_pre_div", base + 0x8980, 0, 6); + hws[IMX7D_AHB_CHANNEL_ROOT_DIV] = imx_clk_hw_divider2("ahb_root_clk", "ahb_pre_div", base + 0x9000, 0, 6); + hws[IMX7D_IPG_ROOT_CLK] = imx_clk_hw_divider_flags("ipg_root_clk", "ahb_root_clk", base + 0x9080, 0, 2, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_PARENT); + hws[IMX7D_DRAM_ROOT_DIV] = imx_clk_hw_divider2("dram_post_div", "dram_cg", base + 0x9880, 0, 3); + hws[IMX7D_DRAM_PHYM_ALT_ROOT_DIV] = imx_clk_hw_divider2("dram_phym_alt_post_div", "dram_phym_alt_pre_div", base + 0xa000, 0, 3); + hws[IMX7D_DRAM_ALT_ROOT_DIV] = imx_clk_hw_divider2("dram_alt_post_div", "dram_alt_pre_div", base + 0xa080, 0, 3); + hws[IMX7D_USB_HSIC_ROOT_DIV] = imx_clk_hw_divider2("usb_hsic_post_div", "usb_hsic_pre_div", base + 0xa100, 0, 6); + hws[IMX7D_PCIE_CTRL_ROOT_DIV] = imx_clk_hw_divider2("pcie_ctrl_post_div", "pcie_ctrl_pre_div", base + 0xa180, 0, 6); + hws[IMX7D_PCIE_PHY_ROOT_DIV] = imx_clk_hw_divider2("pcie_phy_post_div", "pcie_phy_pre_div", base + 0xa200, 0, 6); + hws[IMX7D_EPDC_PIXEL_ROOT_DIV] = imx_clk_hw_divider2("epdc_pixel_post_div", "epdc_pixel_pre_div", base + 0xa280, 0, 6); + hws[IMX7D_LCDIF_PIXEL_ROOT_DIV] = imx_clk_hw_divider2("lcdif_pixel_post_div", "lcdif_pixel_pre_div", base + 0xa300, 0, 6); + hws[IMX7D_MIPI_DSI_ROOT_DIV] = imx_clk_hw_divider2("mipi_dsi_post_div", "mipi_dsi_pre_div", base + 0xa380, 0, 6); + hws[IMX7D_MIPI_CSI_ROOT_DIV] = imx_clk_hw_divider2("mipi_csi_post_div", "mipi_csi_pre_div", base + 0xa400, 0, 6); + hws[IMX7D_MIPI_DPHY_ROOT_DIV] = imx_clk_hw_divider2("mipi_dphy_post_div", "mipi_dphy_pre_div", base + 0xa480, 0, 6); + hws[IMX7D_SAI1_ROOT_DIV] = imx_clk_hw_divider2("sai1_post_div", "sai1_pre_div", base + 0xa500, 0, 6); + hws[IMX7D_SAI2_ROOT_DIV] = imx_clk_hw_divider2("sai2_post_div", "sai2_pre_div", base + 0xa580, 0, 6); + hws[IMX7D_SAI3_ROOT_DIV] = imx_clk_hw_divider2("sai3_post_div", "sai3_pre_div", base + 0xa600, 0, 6); + hws[IMX7D_SPDIF_ROOT_DIV] = imx_clk_hw_divider2("spdif_post_div", "spdif_pre_div", base + 0xa680, 0, 6); + hws[IMX7D_ENET1_REF_ROOT_DIV] = imx_clk_hw_divider2("enet1_ref_post_div", "enet1_ref_pre_div", base + 0xa700, 0, 6); + hws[IMX7D_ENET1_TIME_ROOT_DIV] = imx_clk_hw_divider2("enet1_time_post_div", "enet1_time_pre_div", base + 0xa780, 0, 6); + hws[IMX7D_ENET2_REF_ROOT_DIV] = imx_clk_hw_divider2("enet2_ref_post_div", "enet2_ref_pre_div", base + 0xa800, 0, 6); + hws[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_hw_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6); + hws[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_hw_divider2("enet_phy_ref_root_clk", "enet_phy_ref_pre_div", base + 0xa900, 0, 6); + hws[IMX7D_EIM_ROOT_DIV] = imx_clk_hw_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6); + hws[IMX7D_NAND_ROOT_CLK] = imx_clk_hw_divider2("nand_root_clk", "nand_pre_div", base + 0xaa00, 0, 6); + hws[IMX7D_QSPI_ROOT_DIV] = imx_clk_hw_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6); + hws[IMX7D_USDHC1_ROOT_DIV] = imx_clk_hw_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6); + hws[IMX7D_USDHC2_ROOT_DIV] = imx_clk_hw_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6); + hws[IMX7D_USDHC3_ROOT_DIV] = imx_clk_hw_divider2("usdhc3_post_div", "usdhc3_pre_div", base + 0xac00, 0, 6); + hws[IMX7D_CAN1_ROOT_DIV] = imx_clk_hw_divider2("can1_post_div", "can1_pre_div", base + 0xac80, 0, 6); + hws[IMX7D_CAN2_ROOT_DIV] = imx_clk_hw_divider2("can2_post_div", "can2_pre_div", base + 0xad00, 0, 6); + hws[IMX7D_I2C1_ROOT_DIV] = imx_clk_hw_divider2("i2c1_post_div", "i2c1_pre_div", base + 0xad80, 0, 6); + hws[IMX7D_I2C2_ROOT_DIV] = imx_clk_hw_divider2("i2c2_post_div", "i2c2_pre_div", base + 0xae00, 0, 6); + hws[IMX7D_I2C3_ROOT_DIV] = imx_clk_hw_divider2("i2c3_post_div", "i2c3_pre_div", base + 0xae80, 0, 6); + hws[IMX7D_I2C4_ROOT_DIV] = imx_clk_hw_divider2("i2c4_post_div", "i2c4_pre_div", base + 0xaf00, 0, 6); + hws[IMX7D_UART1_ROOT_DIV] = imx_clk_hw_divider2("uart1_post_div", "uart1_pre_div", base + 0xaf80, 0, 6); + hws[IMX7D_UART2_ROOT_DIV] = imx_clk_hw_divider2("uart2_post_div", "uart2_pre_div", base + 0xb000, 0, 6); + hws[IMX7D_UART3_ROOT_DIV] = imx_clk_hw_divider2("uart3_post_div", "uart3_pre_div", base + 0xb080, 0, 6); + hws[IMX7D_UART4_ROOT_DIV] = imx_clk_hw_divider2("uart4_post_div", "uart4_pre_div", base + 0xb100, 0, 6); + hws[IMX7D_UART5_ROOT_DIV] = imx_clk_hw_divider2("uart5_post_div", "uart5_pre_div", base + 0xb180, 0, 6); + hws[IMX7D_UART6_ROOT_DIV] = imx_clk_hw_divider2("uart6_post_div", "uart6_pre_div", base + 0xb200, 0, 6); + hws[IMX7D_UART7_ROOT_DIV] = imx_clk_hw_divider2("uart7_post_div", "uart7_pre_div", base + 0xb280, 0, 6); + hws[IMX7D_ECSPI1_ROOT_DIV] = imx_clk_hw_divider2("ecspi1_post_div", "ecspi1_pre_div", base + 0xb300, 0, 6); + hws[IMX7D_ECSPI2_ROOT_DIV] = imx_clk_hw_divider2("ecspi2_post_div", "ecspi2_pre_div", base + 0xb380, 0, 6); + hws[IMX7D_ECSPI3_ROOT_DIV] = imx_clk_hw_divider2("ecspi3_post_div", "ecspi3_pre_div", base + 0xb400, 0, 6); + hws[IMX7D_ECSPI4_ROOT_DIV] = imx_clk_hw_divider2("ecspi4_post_div", "ecspi4_pre_div", base + 0xb480, 0, 6); + hws[IMX7D_PWM1_ROOT_DIV] = imx_clk_hw_divider2("pwm1_post_div", "pwm1_pre_div", base + 0xb500, 0, 6); + hws[IMX7D_PWM2_ROOT_DIV] = imx_clk_hw_divider2("pwm2_post_div", "pwm2_pre_div", base + 0xb580, 0, 6); + hws[IMX7D_PWM3_ROOT_DIV] = imx_clk_hw_divider2("pwm3_post_div", "pwm3_pre_div", base + 0xb600, 0, 6); + hws[IMX7D_PWM4_ROOT_DIV] = imx_clk_hw_divider2("pwm4_post_div", "pwm4_pre_div", base + 0xb680, 0, 6); + hws[IMX7D_FLEXTIMER1_ROOT_DIV] = imx_clk_hw_divider2("flextimer1_post_div", "flextimer1_pre_div", base + 0xb700, 0, 6); + hws[IMX7D_FLEXTIMER2_ROOT_DIV] = imx_clk_hw_divider2("flextimer2_post_div", "flextimer2_pre_div", base + 0xb780, 0, 6); + hws[IMX7D_SIM1_ROOT_DIV] = imx_clk_hw_divider2("sim1_post_div", "sim1_pre_div", base + 0xb800, 0, 6); + hws[IMX7D_SIM2_ROOT_DIV] = imx_clk_hw_divider2("sim2_post_div", "sim2_pre_div", base + 0xb880, 0, 6); + hws[IMX7D_GPT1_ROOT_DIV] = imx_clk_hw_divider2("gpt1_post_div", "gpt1_pre_div", base + 0xb900, 0, 6); + hws[IMX7D_GPT2_ROOT_DIV] = imx_clk_hw_divider2("gpt2_post_div", "gpt2_pre_div", base + 0xb980, 0, 6); + hws[IMX7D_GPT3_ROOT_DIV] = imx_clk_hw_divider2("gpt3_post_div", "gpt3_pre_div", base + 0xba00, 0, 6); + hws[IMX7D_GPT4_ROOT_DIV] = imx_clk_hw_divider2("gpt4_post_div", "gpt4_pre_div", base + 0xba80, 0, 6); + hws[IMX7D_TRACE_ROOT_DIV] = imx_clk_hw_divider2("trace_post_div", "trace_pre_div", base + 0xbb00, 0, 6); + hws[IMX7D_WDOG_ROOT_DIV] = imx_clk_hw_divider2("wdog_post_div", "wdog_pre_div", base + 0xbb80, 0, 6); + hws[IMX7D_CSI_MCLK_ROOT_DIV] = imx_clk_hw_divider2("csi_mclk_post_div", "csi_mclk_pre_div", base + 0xbc00, 0, 6); + hws[IMX7D_AUDIO_MCLK_ROOT_DIV] = imx_clk_hw_divider2("audio_mclk_post_div", "audio_mclk_pre_div", base + 0xbc80, 0, 6); + hws[IMX7D_WRCLK_ROOT_DIV] = imx_clk_hw_divider2("wrclk_post_div", "wrclk_pre_div", base + 0xbd00, 0, 6); + hws[IMX7D_CLKO1_ROOT_DIV] = imx_clk_hw_divider2("clko1_post_div", "clko1_pre_div", base + 0xbd80, 0, 6); + hws[IMX7D_CLKO2_ROOT_DIV] = imx_clk_hw_divider2("clko2_post_div", "clko2_pre_div", base + 0xbe00, 0, 6); + + hws[IMX7D_ARM_A7_ROOT_CLK] = imx_clk_hw_gate2_flags("arm_a7_root_clk", "arm_a7_div", base + 0x4000, 0, CLK_OPS_PARENT_ENABLE); + hws[IMX7D_ARM_M4_ROOT_CLK] = imx_clk_hw_gate4("arm_m4_root_clk", "arm_m4_div", base + 0x4010, 0); + hws[IMX7D_MAIN_AXI_ROOT_CLK] = imx_clk_hw_gate2_flags("main_axi_root_clk", "axi_post_div", base + 0x4040, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + hws[IMX7D_DISP_AXI_ROOT_CLK] = imx_clk_hw_gate4("disp_axi_root_clk", "disp_axi_post_div", base + 0x4050, 0); + hws[IMX7D_ENET_AXI_ROOT_CLK] = imx_clk_hw_gate4("enet_axi_root_clk", "enet_axi_post_div", base + 0x4060, 0); + hws[IMX7D_OCRAM_CLK] = imx_clk_hw_gate4("ocram_clk", "main_axi_root_clk", base + 0x4110, 0); + hws[IMX7D_OCRAM_S_CLK] = imx_clk_hw_gate4("ocram_s_clk", "ahb_root_clk", base + 0x4120, 0); + hws[IMX7D_DRAM_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_root_clk", "dram_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + hws[IMX7D_DRAM_PHYM_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_root_clk", "dram_phym_cg", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + hws[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + hws[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); + hws[IMX7D_OCOTP_CLK] = imx_clk_hw_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0); + hws[IMX7D_SNVS_CLK] = imx_clk_hw_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0); + hws[IMX7D_MU_ROOT_CLK] = imx_clk_hw_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0); + hws[IMX7D_CAAM_CLK] = imx_clk_hw_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0); + hws[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_hw_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0); + hws[IMX7D_SDMA_CORE_CLK] = imx_clk_hw_gate4("sdma_root_clk", "ahb_root_clk", base + 0x4480, 0); + hws[IMX7D_PCIE_CTRL_ROOT_CLK] = imx_clk_hw_gate4("pcie_ctrl_root_clk", "pcie_ctrl_post_div", base + 0x4600, 0); + hws[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_hw_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0); + hws[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0); + hws[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0); + hws[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0); + hws[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0); + hws[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_hw_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0); + hws[IMX7D_ENET1_IPG_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet1_ipg_root_clk", "enet_axi_post_div", base + 0x4700, 0, &share_count_enet1); + hws[IMX7D_ENET1_TIME_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet1_time_root_clk", "enet1_time_post_div", base + 0x4700, 0, &share_count_enet1); + hws[IMX7D_ENET2_IPG_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet2_ipg_root_clk", "enet_axi_post_div", base + 0x4710, 0, &share_count_enet2); + hws[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_hw_gate2_shared2("enet2_time_root_clk", "enet2_time_post_div", base + 0x4710, 0, &share_count_enet2); + hws[IMX7D_SAI1_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai1_root_clk", "sai1_post_div", base + 0x48c0, 0, &share_count_sai1); + hws[IMX7D_SAI1_IPG_CLK] = imx_clk_hw_gate2_shared2("sai1_ipg_clk", "ipg_root_clk", base + 0x48c0, 0, &share_count_sai1); + hws[IMX7D_SAI2_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai2_root_clk", "sai2_post_div", base + 0x48d0, 0, &share_count_sai2); + hws[IMX7D_SAI2_IPG_CLK] = imx_clk_hw_gate2_shared2("sai2_ipg_clk", "ipg_root_clk", base + 0x48d0, 0, &share_count_sai2); + hws[IMX7D_SAI3_ROOT_CLK] = imx_clk_hw_gate2_shared2("sai3_root_clk", "sai3_post_div", base + 0x48e0, 0, &share_count_sai3); + hws[IMX7D_SAI3_IPG_CLK] = imx_clk_hw_gate2_shared2("sai3_ipg_clk", "ipg_root_clk", base + 0x48e0, 0, &share_count_sai3); + hws[IMX7D_SPDIF_ROOT_CLK] = imx_clk_hw_gate4("spdif_root_clk", "spdif_post_div", base + 0x44d0, 0); + hws[IMX7D_EIM_ROOT_CLK] = imx_clk_hw_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0); + hws[IMX7D_NAND_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_rawnand_clk", "nand_root_clk", base + 0x4140, 0, &share_count_nand); + hws[IMX7D_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_hw_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_root_clk", base + 0x4140, 0, &share_count_nand); + hws[IMX7D_QSPI_ROOT_CLK] = imx_clk_hw_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0); + hws[IMX7D_USDHC1_ROOT_CLK] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0); + hws[IMX7D_USDHC2_ROOT_CLK] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0); + hws[IMX7D_USDHC3_ROOT_CLK] = imx_clk_hw_gate4("usdhc3_root_clk", "usdhc3_post_div", base + 0x46e0, 0); + hws[IMX7D_CAN1_ROOT_CLK] = imx_clk_hw_gate4("can1_root_clk", "can1_post_div", base + 0x4740, 0); + hws[IMX7D_CAN2_ROOT_CLK] = imx_clk_hw_gate4("can2_root_clk", "can2_post_div", base + 0x4750, 0); + hws[IMX7D_I2C1_ROOT_CLK] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1_post_div", base + 0x4880, 0); + hws[IMX7D_I2C2_ROOT_CLK] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2_post_div", base + 0x4890, 0); + hws[IMX7D_I2C3_ROOT_CLK] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3_post_div", base + 0x48a0, 0); + hws[IMX7D_I2C4_ROOT_CLK] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4_post_div", base + 0x48b0, 0); + hws[IMX7D_UART1_ROOT_CLK] = imx_clk_hw_gate4("uart1_root_clk", "uart1_post_div", base + 0x4940, 0); + hws[IMX7D_UART2_ROOT_CLK] = imx_clk_hw_gate4("uart2_root_clk", "uart2_post_div", base + 0x4950, 0); + hws[IMX7D_UART3_ROOT_CLK] = imx_clk_hw_gate4("uart3_root_clk", "uart3_post_div", base + 0x4960, 0); + hws[IMX7D_UART4_ROOT_CLK] = imx_clk_hw_gate4("uart4_root_clk", "uart4_post_div", base + 0x4970, 0); + hws[IMX7D_UART5_ROOT_CLK] = imx_clk_hw_gate4("uart5_root_clk", "uart5_post_div", base + 0x4980, 0); + hws[IMX7D_UART6_ROOT_CLK] = imx_clk_hw_gate4("uart6_root_clk", "uart6_post_div", base + 0x4990, 0); + hws[IMX7D_UART7_ROOT_CLK] = imx_clk_hw_gate4("uart7_root_clk", "uart7_post_div", base + 0x49a0, 0); + hws[IMX7D_ECSPI1_ROOT_CLK] = imx_clk_hw_gate4("ecspi1_root_clk", "ecspi1_post_div", base + 0x4780, 0); + hws[IMX7D_ECSPI2_ROOT_CLK] = imx_clk_hw_gate4("ecspi2_root_clk", "ecspi2_post_div", base + 0x4790, 0); + hws[IMX7D_ECSPI3_ROOT_CLK] = imx_clk_hw_gate4("ecspi3_root_clk", "ecspi3_post_div", base + 0x47a0, 0); + hws[IMX7D_ECSPI4_ROOT_CLK] = imx_clk_hw_gate4("ecspi4_root_clk", "ecspi4_post_div", base + 0x47b0, 0); + hws[IMX7D_PWM1_ROOT_CLK] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1_post_div", base + 0x4840, 0); + hws[IMX7D_PWM2_ROOT_CLK] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2_post_div", base + 0x4850, 0); + hws[IMX7D_PWM3_ROOT_CLK] = imx_clk_hw_gate4("pwm3_root_clk", "pwm3_post_div", base + 0x4860, 0); + hws[IMX7D_PWM4_ROOT_CLK] = imx_clk_hw_gate4("pwm4_root_clk", "pwm4_post_div", base + 0x4870, 0); + hws[IMX7D_FLEXTIMER1_ROOT_CLK] = imx_clk_hw_gate4("flextimer1_root_clk", "flextimer1_post_div", base + 0x4800, 0); + hws[IMX7D_FLEXTIMER2_ROOT_CLK] = imx_clk_hw_gate4("flextimer2_root_clk", "flextimer2_post_div", base + 0x4810, 0); + hws[IMX7D_SIM1_ROOT_CLK] = imx_clk_hw_gate4("sim1_root_clk", "sim1_post_div", base + 0x4900, 0); + hws[IMX7D_SIM2_ROOT_CLK] = imx_clk_hw_gate4("sim2_root_clk", "sim2_post_div", base + 0x4910, 0); + hws[IMX7D_GPT1_ROOT_CLK] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1_post_div", base + 0x47c0, 0); + hws[IMX7D_GPT2_ROOT_CLK] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2_post_div", base + 0x47d0, 0); + hws[IMX7D_GPT3_ROOT_CLK] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3_post_div", base + 0x47e0, 0); + hws[IMX7D_GPT4_ROOT_CLK] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4_post_div", base + 0x47f0, 0); + hws[IMX7D_TRACE_ROOT_CLK] = imx_clk_hw_gate4("trace_root_clk", "trace_post_div", base + 0x4300, 0); + hws[IMX7D_WDOG1_ROOT_CLK] = imx_clk_hw_gate4("wdog1_root_clk", "wdog_post_div", base + 0x49c0, 0); + hws[IMX7D_WDOG2_ROOT_CLK] = imx_clk_hw_gate4("wdog2_root_clk", "wdog_post_div", base + 0x49d0, 0); + hws[IMX7D_WDOG3_ROOT_CLK] = imx_clk_hw_gate4("wdog3_root_clk", "wdog_post_div", base + 0x49e0, 0); + hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); + hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); + hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); + hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); + hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); + hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); + hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); + hws[IMX7D_USB_PHY2_CLK] = imx_clk_hw_gate4("usb_phy2_clk", "pll_usb_main_clk", base + 0x46b0, 0); + hws[IMX7D_ADC_ROOT_CLK] = imx_clk_hw_gate4("adc_root_clk", "ipg_root_clk", base + 0x4200, 0); + + hws[IMX7D_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc", 1, 8); + + hws[IMX7D_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a7_root_clk", + hws[IMX7D_ARM_A7_ROOT_CLK]->clk, + hws[IMX7D_ARM_A7_ROOT_SRC]->clk, + hws[IMX7D_PLL_ARM_MAIN_CLK]->clk, + hws[IMX7D_PLL_SYS_MAIN_CLK]->clk); + + imx_check_clk_hws(hws, IMX7D_CLK_END); + + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); + + clk_set_parent(hws[IMX7D_PLL_ARM_MAIN_BYPASS]->clk, hws[IMX7D_PLL_ARM_MAIN]->clk); + clk_set_parent(hws[IMX7D_PLL_DRAM_MAIN_BYPASS]->clk, hws[IMX7D_PLL_DRAM_MAIN]->clk); + clk_set_parent(hws[IMX7D_PLL_SYS_MAIN_BYPASS]->clk, hws[IMX7D_PLL_SYS_MAIN]->clk); + clk_set_parent(hws[IMX7D_PLL_ENET_MAIN_BYPASS]->clk, hws[IMX7D_PLL_ENET_MAIN]->clk); + clk_set_parent(hws[IMX7D_PLL_AUDIO_MAIN_BYPASS]->clk, hws[IMX7D_PLL_AUDIO_MAIN]->clk); + clk_set_parent(hws[IMX7D_PLL_VIDEO_MAIN_BYPASS]->clk, hws[IMX7D_PLL_VIDEO_MAIN]->clk); + + clk_set_parent(hws[IMX7D_MIPI_CSI_ROOT_SRC]->clk, hws[IMX7D_PLL_SYS_PFD3_CLK]->clk); /* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */ - clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]); + clk_set_parent(hws[IMX7D_GPT1_ROOT_SRC]->clk, hws[IMX7D_OSC_24M_CLK]->clk); /* Set clock rate for USBPHY, the USB_PLL at CCM is from USBOTG2 */ - clks[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb1_main_clk", "osc", 20, 1); - clks[IMX7D_USB_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb_main_clk", "osc", 20, 1); + hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1); + hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1); + + for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) { + int index = uart_clk_ids[i]; + + uart_clks[i] = &hws[index]->clk; + } + imx_register_uart_clocks(uart_clks); diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 66682100f14c..42e4667f22fd 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -115,7 +115,7 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np) clks[IMX7ULP_CLK_NIC0_DIV] = imx_clk_hw_divider_flags("nic0_clk", "nic_sel", base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); clks[IMX7ULP_CLK_NIC1_DIV] = imx_clk_hw_divider_flags("nic1_clk", "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); - clks[IMX7ULP_CLK_NIC1_BUS_DIV] = imx_clk_hw_divider_flags("nic1_bus_clk", "nic1_clk", base + 0x40, 4, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); + clks[IMX7ULP_CLK_NIC1_BUS_DIV] = imx_clk_hw_divider_flags("nic1_bus_clk", "nic0_clk", base + 0x40, 4, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL); clks[IMX7ULP_CLK_GPU_DIV] = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4); diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 122a81ab8e48..6b8e75df994d 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -288,6 +288,9 @@ static const char *imx8mm_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pl static const char *imx8mm_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m", "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; +static const char *imx8mm_gic_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll2_100m", + "sys_pll1_800m", "clk_ext2", "clk_ext4", "audio_pll2_out" }; + static const char *imx8mm_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; @@ -325,7 +328,7 @@ static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", - "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", }; + "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", }; static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; @@ -361,11 +364,11 @@ static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", - "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; + "audio_pll2_out", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; -static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk", +static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out", "vpu_pll", "sys_pll1_80m", }; static struct clk *clks[IMX8MM_CLK_END]; @@ -523,7 +526,7 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) /* IP */ clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000); - clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080); + clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mm_dram_apb_sels, base + 0xa080); clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100); clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180); clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200); @@ -558,6 +561,7 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080); clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100); clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180); + clks[IMX8MM_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mm_gic_sels, base + 0xb200); clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280); clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300); clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380); @@ -590,6 +594,11 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0); clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0); clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0); + clks[IMX8MM_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0); + clks[IMX8MM_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0); + clks[IMX8MM_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0); + clks[IMX8MM_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0); + clks[IMX8MM_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0); clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); @@ -617,6 +626,7 @@ static int __init imx8mm_clocks_init(struct device_node *ccm_node) clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); + clks[IMX8MM_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0); clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index daf1841b2adb..d407a07e7e6d 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -192,6 +192,9 @@ static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", static const char * const imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; +static const char * const imx8mq_gic_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys2_pll_100m", + "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out" }; + static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; @@ -269,13 +272,20 @@ static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sy static struct clk_onecell_data clk_data; +static struct clk ** const uart_clks[] = { + &clks[IMX8MQ_CLK_UART1_ROOT], + &clks[IMX8MQ_CLK_UART2_ROOT], + &clks[IMX8MQ_CLK_UART3_ROOT], + &clks[IMX8MQ_CLK_UART4_ROOT], + NULL +}; + static int imx8mq_clocks_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; void __iomem *base; int err; - int i; clks[IMX8MQ_CLK_DUMMY] = imx_clk_fixed("dummy", 0); clks[IMX8MQ_CLK_32K] = of_clk_get_by_name(np, "ckil"); @@ -358,9 +368,9 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1); np = dev->of_node; - base = of_iomap(np, 0); - if (WARN_ON(!base)) - return -ENOMEM; + base = devm_platform_ioremap_resource(pdev, 0); + if (WARN_ON(IS_ERR(base))) + return PTR_ERR(base); /* CORE */ clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); @@ -442,6 +452,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_UART4] = imx8m_clk_composite("uart4", imx8mq_uart4_sels, base + 0xb080); clks[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100); clks[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180); + clks[IMX8MQ_CLK_GIC] = imx8m_clk_composite_critical("gic", imx8mq_gic_sels, base + 0xb200); clks[IMX8MQ_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280); clks[IMX8MQ_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300); clks[IMX8MQ_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380); @@ -507,6 +518,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); clks[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); clks[IMX8MQ_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); + clks[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0); clks[IMX8MQ_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); @@ -543,10 +555,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) clks[IMX8MQ_ARM_PLL_OUT], clks[IMX8MQ_SYS1_PLL_800M]); - for (i = 0; i < IMX8MQ_CLK_END; i++) - if (IS_ERR(clks[i])) - pr_err("i.MX8mq clk %u register failed with %ld\n", - i, PTR_ERR(clks[i])); + imx_check_clocks(clks, ARRAY_SIZE(clks)); clk_data.clks = clks; clk_data.clk_num = ARRAY_SIZE(clks); @@ -554,6 +563,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); WARN_ON(err); + imx_register_uart_clocks(uart_clks); + return err; } diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c index fa82ddea4996..50b7c30296f7 100644 --- a/drivers/clk/imx/clk-pfd.c +++ b/drivers/clk/imx/clk-pfd.c @@ -121,12 +121,13 @@ static const struct clk_ops clk_pfd_ops = { .is_enabled = clk_pfd_is_enabled, }; -struct clk *imx_clk_pfd(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name, void __iomem *reg, u8 idx) { struct clk_pfd *pfd; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; pfd = kzalloc(sizeof(*pfd), GFP_KERNEL); if (!pfd) @@ -142,10 +143,13 @@ struct clk *imx_clk_pfd(const char *name, const char *parent_name, init.num_parents = 1; pfd->hw.init = &init; + hw = &pfd->hw; - clk = clk_register(NULL, &pfd->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(pfd); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index 93b059608d3c..df91a8244fb4 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -410,14 +410,15 @@ static const struct clk_ops clk_pllv3_enet_ops = { .recalc_rate = clk_pllv3_enet_recalc_rate, }; -struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, +struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name, const char *parent_name, void __iomem *base, u32 div_mask) { struct clk_pllv3 *pll; const struct clk_ops *ops; - struct clk *clk; + struct clk_hw *hw; struct clk_init_data init; + int ret; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) @@ -478,10 +479,13 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, init.num_parents = 1; pll->hw.init = &init; + hw = &pll->hw; - clk = clk_register(NULL, &pll->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(NULL, hw); + if (ret) { kfree(pll); + return ERR_PTR(ret); + } - return clk; + return hw; } diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c index 1efed86217f7..f628071f6605 100644 --- a/drivers/clk/imx/clk.c +++ b/drivers/clk/imx/clk.c @@ -1,14 +1,30 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/err.h> +#include <linux/io.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "clk.h" +#define CCM_CCDR 0x4 +#define CCDR_MMDC_CH0_MASK BIT(17) +#define CCDR_MMDC_CH1_MASK BIT(16) + DEFINE_SPINLOCK(imx_ccm_lock); -void __init imx_check_clocks(struct clk *clks[], unsigned int count) +void __init imx_mmdc_mask_handshake(void __iomem *ccm_base, + unsigned int chn) +{ + unsigned int reg; + + reg = readl_relaxed(ccm_base + CCM_CCDR); + reg |= chn == 0 ? CCDR_MMDC_CH0_MASK : CCDR_MMDC_CH1_MASK; + writel_relaxed(reg, ccm_base + CCM_CCDR); +} + +void imx_check_clocks(struct clk *clks[], unsigned int count) { unsigned i; @@ -59,6 +75,17 @@ struct clk * __init imx_obtain_fixed_clock( return clk; } +struct clk_hw * __init imx_obtain_fixed_clock_hw( + const char *name, unsigned long rate) +{ + struct clk *clk; + + clk = imx_obtain_fixed_clock_from_dt(name); + if (IS_ERR(clk)) + clk = imx_clk_fixed(name, rate); + return __clk_get_hw(clk); +} + struct clk_hw * __init imx_obtain_fixed_clk_hw(struct device_node *np, const char *name) { @@ -97,8 +124,8 @@ void imx_cscmr1_fixup(u32 *val) return; } -static int imx_keep_uart_clocks __initdata; -static struct clk ** const *imx_uart_clocks __initdata; +static int imx_keep_uart_clocks; +static struct clk ** const *imx_uart_clocks; static int __init imx_keep_uart_clocks_param(char *str) { @@ -111,7 +138,7 @@ __setup_param("earlycon", imx_keep_uart_earlycon, __setup_param("earlyprintk", imx_keep_uart_earlyprintk, imx_keep_uart_clocks_param, 0); -void __init imx_register_uart_clocks(struct clk ** const clks[]) +void imx_register_uart_clocks(struct clk ** const clks[]) { if (imx_keep_uart_clocks) { int i; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 8639a8f2153e..d94d9cb079d3 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -10,6 +10,8 @@ extern spinlock_t imx_ccm_lock; void imx_check_clocks(struct clk *clks[], unsigned int count); void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count); void imx_register_uart_clocks(struct clk ** const clks[]); +void imx_register_uart_clocks_hws(struct clk_hw ** const hws[]); +void imx_mmdc_mask_handshake(void __iomem *ccm_base, unsigned int chn); extern void imx_cscmr1_fixup(u32 *val); @@ -48,6 +50,74 @@ struct imx_pll14xx_clk { int flags; }; +#define imx_clk_busy_divider(name, parent_name, reg, shift, width, busy_reg, busy_shift) \ + imx_clk_hw_busy_divider(name, parent_name, reg, shift, width, busy_reg, busy_shift)->clk + +#define imx_clk_busy_mux(name, reg, shift, width, busy_reg, busy_shift, parent_names, num_parents) \ + imx_clk_hw_busy_mux(name, reg, shift, width, busy_reg, busy_shift, parent_names, num_parents)->clk + +#define imx_clk_cpu(name, parent_name, div, mux, pll, step) \ + imx_clk_hw_cpu(name, parent_name, div, mux, pll, step)->clk + +#define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \ + cgr_val, clk_gate_flags, lock, share_count) \ + clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \ + cgr_val, clk_gate_flags, lock, share_count)->clk + +#define imx_clk_pllv3(type, name, parent_name, base, div_mask) \ + imx_clk_hw_pllv3(type, name, parent_name, base, div_mask)->clk + +#define imx_clk_pfd(name, parent_name, reg, idx) \ + imx_clk_hw_pfd(name, parent_name, reg, idx)->clk + +#define imx_clk_gate_exclusive(name, parent, reg, shift, exclusive_mask) \ + imx_clk_hw_gate_exclusive(name, parent, reg, shift, exclusive_mask)->clk + +#define imx_clk_fixup_divider(name, parent, reg, shift, width, fixup) \ + imx_clk_hw_fixup_divider(name, parent, reg, shift, width, fixup)->clk + +#define imx_clk_fixup_mux(name, reg, shift, width, parents, num_parents, fixup) \ + imx_clk_hw_fixup_mux(name, reg, shift, width, parents, num_parents, fixup)->clk + +#define imx_clk_mux_ldb(name, reg, shift, width, parents, num_parents) \ + imx_clk_hw_mux_ldb(name, reg, shift, width, parents, num_parents)->clk + +#define imx_clk_fixed_factor(name, parent, mult, div) \ + imx_clk_hw_fixed_factor(name, parent, mult, div)->clk + +#define imx_clk_divider2(name, parent, reg, shift, width) \ + imx_clk_hw_divider2(name, parent, reg, shift, width)->clk + +#define imx_clk_gate_dis(name, parent, reg, shift) \ + imx_clk_hw_gate_dis(name, parent, reg, shift)->clk + +#define imx_clk_gate_dis_flags(name, parent, reg, shift, flags) \ + imx_clk_hw_gate_dis_flags(name, parent, reg, shift, flags)->clk + +#define imx_clk_gate_flags(name, parent, reg, shift, flags) \ + imx_clk_hw_gate_flags(name, parent, reg, shift, flags)->clk + +#define imx_clk_gate2(name, parent, reg, shift) \ + imx_clk_hw_gate2(name, parent, reg, shift)->clk + +#define imx_clk_gate2_flags(name, parent, reg, shift, flags) \ + imx_clk_hw_gate2_flags(name, parent, reg, shift, flags)->clk + +#define imx_clk_gate2_shared(name, parent, reg, shift, share_count) \ + imx_clk_hw_gate2_shared(name, parent, reg, shift, share_count)->clk + +#define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \ + imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count)->clk + +#define imx_clk_gate3(name, parent, reg, shift) \ + imx_clk_hw_gate3(name, parent, reg, shift)->clk + +#define imx_clk_gate4(name, parent, reg, shift) \ + imx_clk_hw_gate4(name, parent, reg, shift)->clk + +#define imx_clk_mux(name, reg, shift, width, parents, num_parents) \ + imx_clk_hw_mux(name, reg, shift, width, parents, num_parents)->clk + struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, void __iomem *base, const struct imx_pll14xx_clk *pll_clk); @@ -80,13 +150,13 @@ enum imx_pllv3_type { IMX_PLLV3_AV_IMX7, }; -struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, +struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name, const char *parent_name, void __iomem *base, u32 div_mask); struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name, void __iomem *base); -struct clk *clk_register_gate2(struct device *dev, const char *name, +struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 clk_gate_flags, spinlock_t *lock, @@ -95,23 +165,26 @@ struct clk *clk_register_gate2(struct device *dev, const char *name, struct clk * imx_obtain_fixed_clock( const char *name, unsigned long rate); +struct clk_hw *imx_obtain_fixed_clock_hw( + const char *name, unsigned long rate); + struct clk_hw *imx_obtain_fixed_clk_hw(struct device_node *np, const char *name); -struct clk *imx_clk_gate_exclusive(const char *name, const char *parent, +struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent, void __iomem *reg, u8 shift, u32 exclusive_mask); -struct clk *imx_clk_pfd(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name, void __iomem *reg, u8 idx); struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name, void __iomem *reg, u8 idx); -struct clk *imx_clk_busy_divider(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift); -struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift, +struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift, u8 width, void __iomem *busy_reg, u8 busy_shift, const char * const *parent_names, int num_parents); @@ -121,11 +194,11 @@ struct clk_hw *imx7ulp_clk_composite(const char *name, bool rate_present, bool gate_present, void __iomem *reg); -struct clk *imx_clk_fixup_divider(const char *name, const char *parent, +struct clk_hw *imx_clk_hw_fixup_divider(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width, void (*fixup)(u32 *val)); -struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg, +struct clk_hw *imx_clk_hw_fixup_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents, void (*fixup)(u32 *val)); @@ -139,19 +212,19 @@ static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate) return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate); } -static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg, +static inline struct clk_hw *imx_clk_hw_mux_ldb(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents) { - return clk_register_mux(NULL, name, parents, num_parents, + return clk_hw_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg, shift, width, CLK_MUX_READ_ONLY, &imx_ccm_lock); } -static inline struct clk *imx_clk_fixed_factor(const char *name, +static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name, const char *parent, unsigned int mult, unsigned int div) { - return clk_register_fixed_factor(NULL, name, parent, + return clk_hw_register_fixed_factor(NULL, name, parent, CLK_SET_RATE_PARENT, mult, div); } @@ -188,10 +261,10 @@ static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name, reg, shift, width, 0, &imx_ccm_lock); } -static inline struct clk *imx_clk_divider2(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_divider2(const char *name, const char *parent, void __iomem *reg, u8 shift, u8 width) { - return clk_register_divider(NULL, name, parent, + return clk_hw_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, reg, shift, width, 0, &imx_ccm_lock); } @@ -212,10 +285,10 @@ static inline struct clk *imx_clk_gate(const char *name, const char *parent, shift, 0, &imx_ccm_lock); } -static inline struct clk *imx_clk_gate_flags(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent, void __iomem *reg, u8 shift, unsigned long flags) { - return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, shift, 0, &imx_ccm_lock); } @@ -226,47 +299,47 @@ static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *paren shift, 0, &imx_ccm_lock); } -static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent, void __iomem *reg, u8 shift) { - return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg, shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock); } -static inline struct clk *imx_clk_gate_dis_flags(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate_dis_flags(const char *name, const char *parent, void __iomem *reg, u8 shift, unsigned long flags) { - return clk_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock); } -static inline struct clk *imx_clk_gate2(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *parent, void __iomem *reg, u8 shift) { - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, shift, 0x3, 0, &imx_ccm_lock, NULL); } -static inline struct clk *imx_clk_gate2_flags(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent, void __iomem *reg, u8 shift, unsigned long flags) { - return clk_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg, shift, 0x3, 0, &imx_ccm_lock, NULL); } -static inline struct clk *imx_clk_gate2_shared(const char *name, +static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name, const char *parent, void __iomem *reg, u8 shift, unsigned int *share_count) { - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, + return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg, shift, 0x3, 0, &imx_ccm_lock, share_count); } -static inline struct clk *imx_clk_gate2_shared2(const char *name, +static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name, const char *parent, void __iomem *reg, u8 shift, unsigned int *share_count) { - return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT | + return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0, &imx_ccm_lock, share_count); } @@ -278,10 +351,10 @@ static inline struct clk *imx_clk_gate2_cgr(const char *name, shift, cgr_val, 0, &imx_ccm_lock, NULL); } -static inline struct clk *imx_clk_gate3(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent, void __iomem *reg, u8 shift) { - return clk_register_gate(NULL, name, parent, + return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, reg, shift, 0, &imx_ccm_lock); } @@ -295,10 +368,10 @@ static inline struct clk *imx_clk_gate3_flags(const char *name, reg, shift, 0, &imx_ccm_lock); } -static inline struct clk *imx_clk_gate4(const char *name, const char *parent, +static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent, void __iomem *reg, u8 shift) { - return clk_register_gate2(NULL, name, parent, + return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0, &imx_ccm_lock, NULL); } @@ -312,11 +385,11 @@ static inline struct clk *imx_clk_gate4_flags(const char *name, reg, shift, 0x3, 0, &imx_ccm_lock, NULL); } -static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg, +static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg, u8 shift, u8 width, const char * const *parents, int num_parents) { - return clk_register_mux(NULL, name, parents, num_parents, + return clk_hw_register_mux(NULL, name, parents, num_parents, CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0, &imx_ccm_lock); } @@ -373,7 +446,7 @@ static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name, reg, shift, width, 0, &imx_ccm_lock); } -struct clk *imx_clk_cpu(const char *name, const char *parent_name, +struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name, struct clk *div, struct clk *mux, struct clk *pll, struct clk *step); diff --git a/drivers/clk/ingenic/Makefile b/drivers/clk/ingenic/Makefile index ab58a6a862a5..250570a809d3 100644 --- a/drivers/clk/ingenic/Makefile +++ b/drivers/clk/ingenic/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_INGENIC_CGU_COMMON) += cgu.o +obj-$(CONFIG_INGENIC_CGU_COMMON) += cgu.o pm.o obj-$(CONFIG_INGENIC_CGU_JZ4740) += jz4740-cgu.o obj-$(CONFIG_INGENIC_CGU_JZ4725B) += jz4725b-cgu.o obj-$(CONFIG_INGENIC_CGU_JZ4770) += jz4770-cgu.o diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 92c331427513..6e963031cd87 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -375,8 +375,11 @@ ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) div_reg = readl(cgu->base + clk_info->div.reg); div = (div_reg >> clk_info->div.shift) & GENMASK(clk_info->div.bits - 1, 0); - div += 1; - div *= clk_info->div.div; + + if (clk_info->div.div_table) + div = clk_info->div.div_table[div]; + else + div = (div + 1) * clk_info->div.div; rate /= div; } else if (clk_info->type & CGU_CLK_FIXDIV) { @@ -386,16 +389,37 @@ ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return rate; } +static unsigned int +ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info, + unsigned int div) +{ + unsigned int i; + + for (i = 0; i < (1 << clk_info->div.bits) + && clk_info->div.div_table[i]; i++) { + if (clk_info->div.div_table[i] >= div) + return i; + } + + return i - 1; +} + static unsigned ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info, unsigned long parent_rate, unsigned long req_rate) { - unsigned div; + unsigned int div, hw_div; /* calculate the divide */ div = DIV_ROUND_UP(parent_rate, req_rate); - /* and impose hardware constraints */ + if (clk_info->div.div_table) { + hw_div = ingenic_clk_calc_hw_div(clk_info, div); + + return clk_info->div.div_table[hw_div]; + } + + /* Impose hardware constraints */ div = min_t(unsigned, div, 1 << clk_info->div.bits); div = max_t(unsigned, div, 1); @@ -438,7 +462,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, const struct ingenic_cgu_clk_info *clk_info; const unsigned timeout = 100; unsigned long rate, flags; - unsigned div, i; + unsigned int hw_div, div, i; u32 reg, mask; int ret = 0; @@ -451,13 +475,18 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, if (rate != req_rate) return -EINVAL; + if (clk_info->div.div_table) + hw_div = ingenic_clk_calc_hw_div(clk_info, div); + else + hw_div = ((div / clk_info->div.div) - 1); + spin_lock_irqsave(&cgu->lock, flags); reg = readl(cgu->base + clk_info->div.reg); /* update the divide */ mask = GENMASK(clk_info->div.bits - 1, 0); reg &= ~(mask << clk_info->div.shift); - reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift; + reg |= hw_div << clk_info->div.shift; /* clear the stop bit */ if (clk_info->div.stop_bit != -1) diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index bfbcf6db437d..0dc8004079ee 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -10,6 +10,7 @@ #define __DRIVERS_CLK_INGENIC_CGU_H__ #include <linux/bitops.h> +#include <linux/clk-provider.h> #include <linux/of.h> #include <linux/spinlock.h> @@ -79,6 +80,8 @@ struct ingenic_cgu_mux_info { * isn't one * @busy_bit: the index of the busy bit within reg, or -1 if there isn't one * @stop_bit: the index of the stop bit within reg, or -1 if there isn't one + * @div_table: optional table to map the value read from the register to the + * actual divider value */ struct ingenic_cgu_div_info { unsigned reg; @@ -88,6 +91,7 @@ struct ingenic_cgu_div_info { s8 ce_bit; s8 busy_bit; s8 stop_bit; + const u8 *div_table; }; /** diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 8901ea0295b7..2642d36d1e2c 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -11,6 +11,7 @@ #include <linux/of.h> #include <dt-bindings/clock/jz4725b-cgu.h> #include "cgu.h" +#include "pm.h" /* CGU register offsets */ #define CGU_REG_CPCCR 0x00 @@ -33,6 +34,14 @@ static const s8 pll_od_encoding[4] = { 0x0, 0x1, -1, 0x3, }; +static const u8 jz4725b_cgu_cpccr_div_table[] = { + 1, 2, 3, 4, 6, 8, +}; + +static const u8 jz4725b_cgu_pll_half_div_table[] = { + 2, 1, +}; + static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { /* External clocks */ @@ -66,37 +75,55 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { [JZ4725B_CLK_PLL_HALF] = { "pll half", CGU_CLK_DIV, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 }, + .div = { + CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1, + jz4725b_cgu_pll_half_div_table, + }, }, [JZ4725B_CLK_CCLK] = { "cclk", CGU_CLK_DIV, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, + jz4725b_cgu_cpccr_div_table, + }, }, [JZ4725B_CLK_HCLK] = { "hclk", CGU_CLK_DIV, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1, + jz4725b_cgu_cpccr_div_table, + }, }, [JZ4725B_CLK_PCLK] = { "pclk", CGU_CLK_DIV, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1, + jz4725b_cgu_cpccr_div_table, + }, }, [JZ4725B_CLK_MCLK] = { "mclk", CGU_CLK_DIV, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, + jz4725b_cgu_cpccr_div_table, + }, }, [JZ4725B_CLK_IPU] = { "ipu", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1, + jz4725b_cgu_cpccr_div_table, + }, .gate = { CGU_REG_CLKGR, 13 }, }, @@ -227,5 +254,7 @@ static void __init jz4725b_cgu_init(struct device_node *np) retval = ingenic_cgu_register_clocks(cgu); if (retval) pr_err("%s: failed to register CGU Clocks\n", __func__); + + ingenic_cgu_register_syscore_ops(cgu); } CLK_OF_DECLARE(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init); diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index c77f4e1506dc..4c0a20949c2c 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c @@ -11,8 +11,8 @@ #include <linux/io.h> #include <linux/of.h> #include <dt-bindings/clock/jz4740-cgu.h> -#include <asm/mach-jz4740/clock.h> #include "cgu.h" +#include "pm.h" /* CGU register offsets */ #define CGU_REG_CPCCR 0x00 @@ -49,6 +49,10 @@ static const s8 pll_od_encoding[4] = { 0x0, 0x1, -1, 0x3, }; +static const u8 jz4740_cgu_cpccr_div_table[] = { + 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, +}; + static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { /* External clocks */ @@ -88,31 +92,46 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { [JZ4740_CLK_CCLK] = { "cclk", CGU_CLK_DIV, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, + jz4740_cgu_cpccr_div_table, + }, }, [JZ4740_CLK_HCLK] = { "hclk", CGU_CLK_DIV, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1, + jz4740_cgu_cpccr_div_table, + }, }, [JZ4740_CLK_PCLK] = { "pclk", CGU_CLK_DIV, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1, + jz4740_cgu_cpccr_div_table, + }, }, [JZ4740_CLK_MCLK] = { "mclk", CGU_CLK_DIV, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, + jz4740_cgu_cpccr_div_table, + }, }, [JZ4740_CLK_LCD] = { "lcd", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { JZ4740_CLK_PLL_HALF, -1, -1, -1 }, - .div = { CGU_REG_CPCCR, 16, 1, 5, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 16, 1, 5, 22, -1, -1, + jz4740_cgu_cpccr_div_table, + }, .gate = { CGU_REG_CLKGR, 10 }, }, @@ -219,77 +238,7 @@ static void __init jz4740_cgu_init(struct device_node *np) retval = ingenic_cgu_register_clocks(cgu); if (retval) pr_err("%s: failed to register CGU Clocks\n", __func__); -} -CLK_OF_DECLARE(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init); - -void jz4740_clock_set_wait_mode(enum jz4740_wait_mode mode) -{ - uint32_t lcr = readl(cgu->base + CGU_REG_LCR); - - switch (mode) { - case JZ4740_WAIT_MODE_IDLE: - lcr &= ~LCR_SLEEP; - break; - - case JZ4740_WAIT_MODE_SLEEP: - lcr |= LCR_SLEEP; - break; - } - - writel(lcr, cgu->base + CGU_REG_LCR); -} -void jz4740_clock_udc_disable_auto_suspend(void) -{ - uint32_t clkgr = readl(cgu->base + CGU_REG_CLKGR); - - clkgr &= ~CLKGR_UDC; - writel(clkgr, cgu->base + CGU_REG_CLKGR); -} -EXPORT_SYMBOL_GPL(jz4740_clock_udc_disable_auto_suspend); - -void jz4740_clock_udc_enable_auto_suspend(void) -{ - uint32_t clkgr = readl(cgu->base + CGU_REG_CLKGR); - - clkgr |= CLKGR_UDC; - writel(clkgr, cgu->base + CGU_REG_CLKGR); -} -EXPORT_SYMBOL_GPL(jz4740_clock_udc_enable_auto_suspend); - -#define JZ_CLOCK_GATE_UART0 BIT(0) -#define JZ_CLOCK_GATE_TCU BIT(1) -#define JZ_CLOCK_GATE_DMAC BIT(12) - -void jz4740_clock_suspend(void) -{ - uint32_t clkgr, cppcr; - - clkgr = readl(cgu->base + CGU_REG_CLKGR); - clkgr |= JZ_CLOCK_GATE_TCU | JZ_CLOCK_GATE_DMAC | JZ_CLOCK_GATE_UART0; - writel(clkgr, cgu->base + CGU_REG_CLKGR); - - cppcr = readl(cgu->base + CGU_REG_CPPCR); - cppcr &= ~BIT(jz4740_cgu_clocks[JZ4740_CLK_PLL].pll.enable_bit); - writel(cppcr, cgu->base + CGU_REG_CPPCR); -} - -void jz4740_clock_resume(void) -{ - uint32_t clkgr, cppcr, stable; - - cppcr = readl(cgu->base + CGU_REG_CPPCR); - cppcr |= BIT(jz4740_cgu_clocks[JZ4740_CLK_PLL].pll.enable_bit); - writel(cppcr, cgu->base + CGU_REG_CPPCR); - - stable = BIT(jz4740_cgu_clocks[JZ4740_CLK_PLL].pll.stable_bit); - do { - cppcr = readl(cgu->base + CGU_REG_CPPCR); - } while (!(cppcr & stable)); - - clkgr = readl(cgu->base + CGU_REG_CLKGR); - clkgr &= ~JZ_CLOCK_GATE_TCU; - clkgr &= ~JZ_CLOCK_GATE_DMAC; - clkgr &= ~JZ_CLOCK_GATE_UART0; - writel(clkgr, cgu->base + CGU_REG_CLKGR); + ingenic_cgu_register_syscore_ops(cgu); } +CLK_OF_DECLARE(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init); diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index dfce740c25a8..eebc1bea3841 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -9,9 +9,9 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/syscore_ops.h> #include <dt-bindings/clock/jz4770-cgu.h> #include "cgu.h" +#include "pm.h" /* * CPM registers offset address definition @@ -38,9 +38,6 @@ #define CGU_REG_MSC2CDR 0xA8 #define CGU_REG_BCHCDR 0xAC -/* bits within the LCR register */ -#define LCR_LPM BIT(0) /* Low Power Mode */ - /* bits within the OPCR register */ #define OPCR_SPENDH BIT(5) /* UHC PHY suspend */ @@ -87,6 +84,10 @@ static const s8 pll_od_encoding[8] = { 0x0, 0x1, -1, 0x2, -1, -1, -1, 0x3, }; +static const u8 jz4770_cgu_cpccr_div_table[] = { + 1, 2, 3, 4, 6, 8, 12, +}; + static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { /* External clocks */ @@ -144,34 +145,52 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { [JZ4770_CLK_CCLK] = { "cclk", CGU_CLK_DIV, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, }, [JZ4770_CLK_H0CLK] = { "h0clk", CGU_CLK_DIV, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, }, [JZ4770_CLK_H1CLK] = { "h1clk", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 24, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 24, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, .gate = { CGU_REG_CLKGR1, 7 }, }, [JZ4770_CLK_H2CLK] = { "h2clk", CGU_CLK_DIV, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 16, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, }, [JZ4770_CLK_C1CLK] = { "c1clk", CGU_CLK_DIV | CGU_CLK_GATE, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, .gate = { CGU_REG_OPCR, 31, true }, // disable CCLK stop on idle }, [JZ4770_CLK_PCLK] = { "pclk", CGU_CLK_DIV, .parents = { JZ4770_CLK_PLL0, }, - .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 }, + .div = { + CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1, + jz4770_cgu_cpccr_div_table, + }, }, /* Those divided clocks can connect to PLL0 or PLL1 */ @@ -407,30 +426,6 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { }, }; -#if IS_ENABLED(CONFIG_PM_SLEEP) -static int jz4770_cgu_pm_suspend(void) -{ - u32 val; - - val = readl(cgu->base + CGU_REG_LCR); - writel(val | LCR_LPM, cgu->base + CGU_REG_LCR); - return 0; -} - -static void jz4770_cgu_pm_resume(void) -{ - u32 val; - - val = readl(cgu->base + CGU_REG_LCR); - writel(val & ~LCR_LPM, cgu->base + CGU_REG_LCR); -} - -static struct syscore_ops jz4770_cgu_pm_ops = { - .suspend = jz4770_cgu_pm_suspend, - .resume = jz4770_cgu_pm_resume, -}; -#endif /* CONFIG_PM_SLEEP */ - static void __init jz4770_cgu_init(struct device_node *np) { int retval; @@ -444,9 +439,7 @@ static void __init jz4770_cgu_init(struct device_node *np) if (retval) pr_err("%s: failed to register CGU Clocks\n", __func__); -#if IS_ENABLED(CONFIG_PM_SLEEP) - register_syscore_ops(&jz4770_cgu_pm_ops); -#endif + ingenic_cgu_register_syscore_ops(cgu); } /* We only probe via devicetree, no need for a platform driver */ diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index 2464fc4032af..8c67f89df25e 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -12,6 +12,7 @@ #include <linux/of.h> #include <dt-bindings/clock/jz4780-cgu.h> #include "cgu.h" +#include "pm.h" /* CGU register offsets */ #define CGU_REG_CLOCKCONTROL 0x00 @@ -721,5 +722,7 @@ static void __init jz4780_cgu_init(struct device_node *np) pr_err("%s: failed to register CGU Clocks\n", __func__); return; } + + ingenic_cgu_register_syscore_ops(cgu); } CLK_OF_DECLARE(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init); diff --git a/drivers/clk/ingenic/pm.c b/drivers/clk/ingenic/pm.c new file mode 100644 index 000000000000..341752b640d2 --- /dev/null +++ b/drivers/clk/ingenic/pm.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net> + */ + +#include "cgu.h" +#include "pm.h" + +#include <linux/io.h> +#include <linux/syscore_ops.h> + +#define CGU_REG_LCR 0x04 + +#define LCR_LOW_POWER_MODE BIT(0) + +static void __iomem * __maybe_unused ingenic_cgu_base; + +static int __maybe_unused ingenic_cgu_pm_suspend(void) +{ + u32 val = readl(ingenic_cgu_base + CGU_REG_LCR); + + writel(val | LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR); + + return 0; +} + +static void __maybe_unused ingenic_cgu_pm_resume(void) +{ + u32 val = readl(ingenic_cgu_base + CGU_REG_LCR); + + writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR); +} + +static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = { + .suspend = ingenic_cgu_pm_suspend, + .resume = ingenic_cgu_pm_resume, +}; + +void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu) +{ + if (IS_ENABLED(CONFIG_PM_SLEEP)) { + ingenic_cgu_base = cgu->base; + register_syscore_ops(&ingenic_cgu_pm_ops); + } +} diff --git a/drivers/clk/ingenic/pm.h b/drivers/clk/ingenic/pm.h new file mode 100644 index 000000000000..fa7540407b6b --- /dev/null +++ b/drivers/clk/ingenic/pm.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net> + */ +#ifndef DRIVERS_CLK_INGENIC_PM_H +#define DRIVERS_CLK_INGENIC_PM_H + +struct ingenic_cgu; + +void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu); + +#endif /* DRIVERS_CLK_INGENIC_PM_H */ diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig index 0ca63014718a..38aeefb1e808 100644 --- a/drivers/clk/keystone/Kconfig +++ b/drivers/clk/keystone/Kconfig @@ -15,3 +15,14 @@ config TI_SCI_CLK This adds the clock driver support over TI System Control Interface. If you wish to use clock resources from the PMMC firmware, say Y. Otherwise, say N. + +config TI_SCI_CLK_PROBE_FROM_FW + bool "Probe available clocks from firmware" + depends on TI_SCI_CLK + default n + help + Forces the TI SCI clock driver to probe available clocks from the + firmware. By default, only the used clocks are probed from DT. + This is mostly only useful for debugging purposes, and will + increase the boot time of the device. If you want the clocks probed + from firmware, say Y. Otherwise, say N. diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 4cb70bed89a9..7edf8c8432b6 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/soc/ti/ti_sci_protocol.h> #include <linux/bsearch.h> +#include <linux/list_sort.h> #define SCI_CLK_SSC_ENABLE BIT(0) #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) @@ -52,14 +53,16 @@ struct sci_clk_provider { * @num_parents: Number of parents for this clock * @provider: Master clock provider * @flags: Flags for the clock + * @node: Link for handling clocks probed via DT */ struct sci_clk { struct clk_hw hw; u16 dev_id; - u8 clk_id; - u8 num_parents; + u32 clk_id; + u32 num_parents; struct sci_clk_provider *provider; u8 flags; + struct list_head node; }; #define to_sci_clk(_hw) container_of(_hw, struct sci_clk, hw) @@ -218,11 +221,11 @@ static int sci_clk_set_rate(struct clk_hw *hw, unsigned long rate, static u8 sci_clk_get_parent(struct clk_hw *hw) { struct sci_clk *clk = to_sci_clk(hw); - u8 parent_id; + u32 parent_id = 0; int ret; ret = clk->provider->ops->get_parent(clk->provider->sci, clk->dev_id, - clk->clk_id, &parent_id); + clk->clk_id, (void *)&parent_id); if (ret) { dev_err(clk->provider->dev, "get-parent failed for dev=%d, clk=%d, ret=%d\n", @@ -230,7 +233,9 @@ static u8 sci_clk_get_parent(struct clk_hw *hw) return 0; } - return parent_id - clk->clk_id - 1; + parent_id = parent_id - clk->clk_id - 1; + + return (u8)parent_id; } /** @@ -280,8 +285,8 @@ static int _sci_clk_build(struct sci_clk_provider *provider, int i; int ret = 0; - name = kasprintf(GFP_KERNEL, "%s:%d:%d", dev_name(provider->dev), - sci_clk->dev_id, sci_clk->clk_id); + name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id, + sci_clk->clk_id); init.name = name; @@ -306,8 +311,7 @@ static int _sci_clk_build(struct sci_clk_provider *provider, for (i = 0; i < sci_clk->num_parents; i++) { char *parent_name; - parent_name = kasprintf(GFP_KERNEL, "%s:%d:%d", - dev_name(provider->dev), + parent_name = kasprintf(GFP_KERNEL, "clk:%d:%d", sci_clk->dev_id, sci_clk->clk_id + 1 + i); if (!parent_name) { @@ -404,22 +408,9 @@ static const struct of_device_id ti_sci_clk_of_match[] = { }; MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match); -/** - * ti_sci_clk_probe - Probe function for the TI SCI clock driver - * @pdev: platform device pointer to be probed - * - * Probes the TI SCI clock device. Allocates a new clock provider - * and registers this to the common clock framework. Also applies - * any required flags to the identified clocks via clock lists - * supplied from DT. Returns 0 for success, negative error value - * for failure. - */ -static int ti_sci_clk_probe(struct platform_device *pdev) +#ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW +static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider) { - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct sci_clk_provider *provider; - const struct ti_sci_handle *handle; int ret; int num_clks = 0; struct sci_clk **clks = NULL; @@ -428,24 +419,14 @@ static int ti_sci_clk_probe(struct platform_device *pdev) int max_clks = 0; int clk_id = 0; int dev_id = 0; - u8 num_parents; + u32 num_parents = 0; int gap_size = 0; - - handle = devm_ti_sci_get_handle(dev); - if (IS_ERR(handle)) - return PTR_ERR(handle); - - provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL); - if (!provider) - return -ENOMEM; - - provider->sci = handle; - provider->ops = &handle->ops.clk_ops; - provider->dev = dev; + struct device *dev = provider->dev; while (1) { ret = provider->ops->get_num_parents(provider->sci, dev_id, - clk_id, &num_parents); + clk_id, + (void *)&num_parents); if (ret) { gap_size++; if (!clk_id) { @@ -502,6 +483,188 @@ static int ti_sci_clk_probe(struct platform_device *pdev) devm_kfree(dev, clks); + return 0; +} + +#else + +static int _cmp_sci_clk_list(void *priv, struct list_head *a, + struct list_head *b) +{ + struct sci_clk *ca = container_of(a, struct sci_clk, node); + struct sci_clk *cb = container_of(b, struct sci_clk, node); + + return _cmp_sci_clk(ca, &cb); +} + +static int ti_sci_scan_clocks_from_dt(struct sci_clk_provider *provider) +{ + struct device *dev = provider->dev; + struct device_node *np = NULL; + int ret; + int index; + struct of_phandle_args args; + struct list_head clks; + struct sci_clk *sci_clk, *prev; + int num_clks = 0; + int num_parents; + int clk_id; + const char * const clk_names[] = { + "clocks", "assigned-clocks", "assigned-clock-parents", NULL + }; + const char * const *clk_name; + + INIT_LIST_HEAD(&clks); + + clk_name = clk_names; + + while (*clk_name) { + np = of_find_node_with_property(np, *clk_name); + if (!np) { + clk_name++; + break; + } + + if (!of_device_is_available(np)) + continue; + + index = 0; + + do { + ret = of_parse_phandle_with_args(np, *clk_name, + "#clock-cells", index, + &args); + if (ret) + break; + + if (args.args_count == 2 && args.np == dev->of_node) { + sci_clk = devm_kzalloc(dev, sizeof(*sci_clk), + GFP_KERNEL); + if (!sci_clk) + return -ENOMEM; + + sci_clk->dev_id = args.args[0]; + sci_clk->clk_id = args.args[1]; + sci_clk->provider = provider; + provider->ops->get_num_parents(provider->sci, + sci_clk->dev_id, + sci_clk->clk_id, + (void *)&sci_clk->num_parents); + list_add_tail(&sci_clk->node, &clks); + + num_clks++; + + num_parents = sci_clk->num_parents; + if (num_parents == 1) + num_parents = 0; + + /* + * Linux kernel has inherent limitation + * of 255 clock parents at the moment. + * Right now, it is not expected that + * any mux clock from sci-clk driver + * would exceed that limit either, but + * the ABI basically provides that + * possibility. Print out a warning if + * this happens for any clock. + */ + if (num_parents >= 255) { + dev_warn(dev, "too many parents for dev=%d, clk=%d (%d), cropping to 255.\n", + sci_clk->dev_id, + sci_clk->clk_id, num_parents); + num_parents = 255; + } + + clk_id = args.args[1] + 1; + + while (num_parents--) { + sci_clk = devm_kzalloc(dev, + sizeof(*sci_clk), + GFP_KERNEL); + if (!sci_clk) + return -ENOMEM; + sci_clk->dev_id = args.args[0]; + sci_clk->clk_id = clk_id++; + sci_clk->provider = provider; + list_add_tail(&sci_clk->node, &clks); + + num_clks++; + } + } + + index++; + } while (args.np); + } + + list_sort(NULL, &clks, _cmp_sci_clk_list); + + provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk), + GFP_KERNEL); + if (!provider->clocks) + return -ENOMEM; + + num_clks = 0; + prev = NULL; + + list_for_each_entry(sci_clk, &clks, node) { + if (prev && prev->dev_id == sci_clk->dev_id && + prev->clk_id == sci_clk->clk_id) + continue; + + provider->clocks[num_clks++] = sci_clk; + prev = sci_clk; + } + + provider->num_clocks = num_clks; + + return 0; +} +#endif + +/** + * ti_sci_clk_probe - Probe function for the TI SCI clock driver + * @pdev: platform device pointer to be probed + * + * Probes the TI SCI clock device. Allocates a new clock provider + * and registers this to the common clock framework. Also applies + * any required flags to the identified clocks via clock lists + * supplied from DT. Returns 0 for success, negative error value + * for failure. + */ +static int ti_sci_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct sci_clk_provider *provider; + const struct ti_sci_handle *handle; + int ret; + + handle = devm_ti_sci_get_handle(dev); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL); + if (!provider) + return -ENOMEM; + + provider->sci = handle; + provider->ops = &handle->ops.clk_ops; + provider->dev = dev; + +#ifdef CONFIG_TI_SCI_CLK_PROBE_FROM_FW + ret = ti_sci_scan_clocks_from_fw(provider); + if (ret) { + dev_err(dev, "scan clocks from FW failed: %d\n", ret); + return ret; + } +#else + ret = ti_sci_scan_clocks_from_dt(provider); + if (ret) { + dev_err(dev, "scan clocks from DT failed: %d\n", ret); + return ret; + } +#endif + ret = ti_sci_init_clocks(provider); if (ret) { pr_err("ti-sci-init-clocks failed.\n"); diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index f797f09c6425..ce3d9b300bab 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -300,4 +300,10 @@ config COMMON_CLK_MT8516 help This driver supports MediaTek MT8516 clocks. +config COMMON_CLK_MT8516_AUDSYS + bool "Clock driver for MediaTek MT8516 audsys" + depends on COMMON_CLK_MT8516 + help + This driver supports MediaTek MT8516 audsys clocks. + endmenu diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index f74937b35f68..672de0099eef 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -45,3 +45,4 @@ obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o +obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 9d8651033ae9..1aa5f4059251 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -395,14 +395,6 @@ static const char * const atb_parents[] = { "syspll_d5" }; -static const char * const sspm_parents[] = { - "clk26m", - "univpll_d2_d4", - "syspll_d2_d2", - "univpll_d2_d2", - "syspll_d3" -}; - static const char * const dpi0_parents[] = { "clk26m", "tvdpll_d2", @@ -606,9 +598,6 @@ static const struct mtk_mux top_muxes[] = { MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel", atb_parents, 0xa0, 0xa4, 0xa8, 0, 2, 7, 0x004, 24), - MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSPM, "sspm_sel", - sspm_parents, 0xa0, - 0xa4, 0xa8, 8, 3, 15, 0x004, 25), MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel", dpi0_parents, 0xa0, 0xa4, 0xa8, 16, 4, 23, 0x004, 26), @@ -947,12 +936,8 @@ static const struct mtk_gate infra_clks[] = { "fufs_sel", 13), GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk", "axi_sel", 14), - GATE_INFRA2(CLK_INFRA_SSPM, "infra_sspm", - "sspm_sel", 15), GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist", "axi_sel", 16), - GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk", - "axi_sel", 17), GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5", "i2c_sel", 18), GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter", @@ -986,10 +971,6 @@ static const struct mtk_gate infra_clks[] = { "msdc50_0_sel", 1), GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self", "msdc50_0_sel", 2), - GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", - "f_f26m_ck", 3), - GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", - "f_f26m_ck", 4), GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5), GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c new file mode 100644 index 000000000000..6ab3a06dc9d5 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8516-aud.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: James Liao <jamesjj.liao@mediatek.com> + * Fabien Parent <fparent@baylibre.com> + */ + +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8516-clk.h> + +static const struct mtk_gate_regs aud_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +#define GATE_AUD(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &aud_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_no_setclr, \ + } + +static const struct mtk_gate aud_clks[] __initconst = { + GATE_AUD(CLK_AUD_AFE, "aud_afe", "clk26m_ck", 2), + GATE_AUD(CLK_AUD_I2S, "aud_i2s", "i2s_infra_bck", 6), + GATE_AUD(CLK_AUD_22M, "aud_22m", "rg_aud_engen1", 8), + GATE_AUD(CLK_AUD_24M, "aud_24m", "rg_aud_engen2", 9), + GATE_AUD(CLK_AUD_INTDIR, "aud_intdir", "rg_aud_spdif_in", 15), + GATE_AUD(CLK_AUD_APLL2_TUNER, "aud_apll2_tuner", "rg_aud_engen2", 18), + GATE_AUD(CLK_AUD_APLL_TUNER, "aud_apll_tuner", "rg_aud_engen1", 19), + GATE_AUD(CLK_AUD_HDMI, "aud_hdmi", "apll12_div4", 20), + GATE_AUD(CLK_AUD_SPDF, "aud_spdf", "apll12_div6", 21), + GATE_AUD(CLK_AUD_ADC, "aud_adc", "aud_afe", 24), + GATE_AUD(CLK_AUD_DAC, "aud_dac", "aud_afe", 25), + GATE_AUD(CLK_AUD_DAC_PREDIS, "aud_dac_predis", "aud_afe", 26), + GATE_AUD(CLK_AUD_TML, "aud_tml", "aud_afe", 27), +}; + +static void __init mtk_audsys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + + clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK); + + mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + +} +CLK_OF_DECLARE(mtk_audsys, "mediatek,mt8516-audsys", mtk_audsys_init); diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c index 26fe43cc9ea2..9d4261ecc760 100644 --- a/drivers/clk/mediatek/clk-mt8516.c +++ b/drivers/clk/mediatek/clk-mt8516.c @@ -231,11 +231,6 @@ static const char * const nfi1x_pad_parents[] __initconst = { "nfi1x_ck" }; -static const char * const ddrphycfg_parents[] __initconst = { - "clk26m_ck", - "mainpll_d16" -}; - static const char * const usb_78m_parents[] __initconst = { "clk_null", "clk26m_ck", diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 7a8ef80e5f2c..3ddd0efc9ee0 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c @@ -469,11 +469,6 @@ static struct clk_regmap axg_mpll0_div = { .shift = 16, .width = 9, }, - .ssen = { - .reg_off = HHI_MPLL_CNTL, - .shift = 25, - .width = 1, - }, .misc = { .reg_off = HHI_PLL_TOP_MISC, .shift = 0, @@ -568,6 +563,11 @@ static struct clk_regmap axg_mpll2_div = { .shift = 16, .width = 9, }, + .ssen = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, .misc = { .reg_off = HHI_PLL_TOP_MISC, .shift = 2, diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index f76850d99e59..2d39a8bc367c 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c @@ -115,21 +115,12 @@ static int mpll_set_rate(struct clk_hw *hw, else __acquire(mpll->lock); - /* Enable and set the fractional part */ + /* Set the fractional part */ meson_parm_write(clk->map, &mpll->sdm, sdm); - meson_parm_write(clk->map, &mpll->sdm_en, 1); - - /* Set additional fractional part enable if required */ - if (MESON_PARM_APPLICABLE(&mpll->ssen)) - meson_parm_write(clk->map, &mpll->ssen, 1); /* Set the integer divider part */ meson_parm_write(clk->map, &mpll->n2, n2); - /* Set the magic misc bit if required */ - if (MESON_PARM_APPLICABLE(&mpll->misc)) - meson_parm_write(clk->map, &mpll->misc, 1); - if (mpll->lock) spin_unlock_irqrestore(mpll->lock, flags); else @@ -138,6 +129,30 @@ static int mpll_set_rate(struct clk_hw *hw, return 0; } +static void mpll_init(struct clk_hw *hw) +{ + struct clk_regmap *clk = to_clk_regmap(hw); + struct meson_clk_mpll_data *mpll = meson_clk_mpll_data(clk); + + if (mpll->init_count) + regmap_multi_reg_write(clk->map, mpll->init_regs, + mpll->init_count); + + /* Enable the fractional part */ + meson_parm_write(clk->map, &mpll->sdm_en, 1); + + /* Set spread spectrum if possible */ + if (MESON_PARM_APPLICABLE(&mpll->ssen)) { + unsigned int ss = + mpll->flags & CLK_MESON_MPLL_SPREAD_SPECTRUM ? 1 : 0; + meson_parm_write(clk->map, &mpll->ssen, ss); + } + + /* Set the magic misc bit if required */ + if (MESON_PARM_APPLICABLE(&mpll->misc)) + meson_parm_write(clk->map, &mpll->misc, 1); +} + const struct clk_ops meson_clk_mpll_ro_ops = { .recalc_rate = mpll_recalc_rate, .round_rate = mpll_round_rate, @@ -148,6 +163,7 @@ const struct clk_ops meson_clk_mpll_ops = { .recalc_rate = mpll_recalc_rate, .round_rate = mpll_round_rate, .set_rate = mpll_set_rate, + .init = mpll_init, }; EXPORT_SYMBOL_GPL(meson_clk_mpll_ops); diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h index cf79340006dd..a991d568c43a 100644 --- a/drivers/clk/meson/clk-mpll.h +++ b/drivers/clk/meson/clk-mpll.h @@ -18,11 +18,14 @@ struct meson_clk_mpll_data { struct parm n2; struct parm ssen; struct parm misc; + const struct reg_sequence *init_regs; + unsigned int init_count; spinlock_t *lock; u8 flags; }; #define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0) +#define CLK_MESON_MPLL_SPREAD_SPECTRUM BIT(1) extern const struct clk_ops meson_clk_mpll_ro_ops; extern const struct clk_ops meson_clk_mpll_ops; diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 206fafd299ea..db1c4ed9d54e 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -150,6 +150,57 @@ static struct clk_regmap g12a_sys_pll = { }, }; +static struct clk_regmap g12b_sys1_pll_dco = { + .data = &(struct meson_clk_pll_data){ + .en = { + .reg_off = HHI_SYS1_PLL_CNTL0, + .shift = 28, + .width = 1, + }, + .m = { + .reg_off = HHI_SYS1_PLL_CNTL0, + .shift = 0, + .width = 8, + }, + .n = { + .reg_off = HHI_SYS1_PLL_CNTL0, + .shift = 10, + .width = 5, + }, + .l = { + .reg_off = HHI_SYS1_PLL_CNTL0, + .shift = 31, + .width = 1, + }, + .rst = { + .reg_off = HHI_SYS1_PLL_CNTL0, + .shift = 29, + .width = 1, + }, + }, + .hw.init = &(struct clk_init_data){ + .name = "sys1_pll_dco", + .ops = &meson_clk_pll_ro_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12b_sys1_pll = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS1_PLL_CNTL0, + .shift = 16, + .width = 3, + .flags = CLK_DIVIDER_POWER_OF_TWO, + }, + .hw.init = &(struct clk_init_data){ + .name = "sys1_pll", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "sys1_pll_dco" }, + .num_parents = 1, + }, +}; + static struct clk_regmap g12a_sys_pll_div16_en = { .data = &(struct clk_regmap_gate_data){ .offset = HHI_SYS_CPU_CLK_CNTL1, @@ -167,6 +218,23 @@ static struct clk_regmap g12a_sys_pll_div16_en = { }, }; +static struct clk_regmap g12b_sys1_pll_div16_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data) { + .name = "sys1_pll_div16_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "sys1_pll" }, + .num_parents = 1, + /* + * This clock is used to debug the sys_pll range + * Linux should not change it at runtime + */ + }, +}; + static struct clk_fixed_factor g12a_sys_pll_div16 = { .mult = 1, .div = 16, @@ -178,6 +246,17 @@ static struct clk_fixed_factor g12a_sys_pll_div16 = { }, }; +static struct clk_fixed_factor g12b_sys1_pll_div16 = { + .mult = 1, + .div = 16, + .hw.init = &(struct clk_init_data){ + .name = "sys1_pll_div16", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "sys1_pll_div16_en" }, + .num_parents = 1, + }, +}; + /* Datasheet names this field as "premux0" */ static struct clk_regmap g12a_cpu_clk_premux0 = { .data = &(struct clk_regmap_mux_data){ @@ -306,6 +385,150 @@ static struct clk_regmap g12a_cpu_clk = { }, }; +/* Datasheet names this field as "Final_mux_sel" */ +static struct clk_regmap g12b_cpu_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPU_CLK_CNTL0, + .mask = 0x1, + .shift = 11, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpu_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpu_clk_dyn", + "sys1_pll" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "premux0" */ +static struct clk_regmap g12b_cpub_clk_premux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x3, + .shift = 0, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn0_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal", + "fclk_div2", + "fclk_div3" }, + .num_parents = 3, + }, +}; + +/* Datasheet names this field as "mux0_divn_tcnt" */ +static struct clk_regmap g12b_cpub_clk_mux0_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .shift = 4, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn0_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn0_sel" }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux0" */ +static struct clk_regmap g12b_cpub_clk_postmux0 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x1, + .shift = 2, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn0", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn0_sel", + "cpub_clk_dyn0_div" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "premux1" */ +static struct clk_regmap g12b_cpub_clk_premux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x3, + .shift = 16, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn1_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ IN_PREFIX "xtal", + "fclk_div2", + "fclk_div3" }, + .num_parents = 3, + }, +}; + +/* Datasheet names this field as "Mux1_divn_tcnt" */ +static struct clk_regmap g12b_cpub_clk_mux1_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .shift = 20, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn1_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn1_sel" }, + .num_parents = 1, + }, +}; + +/* Datasheet names this field as "postmux1" */ +static struct clk_regmap g12b_cpub_clk_postmux1 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x1, + .shift = 18, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn1", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn1_sel", + "cpub_clk_dyn1_div" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_dyn_mux_sel" */ +static struct clk_regmap g12b_cpub_clk_dyn = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x1, + .shift = 10, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_dyn", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn0", + "cpub_clk_dyn1" }, + .num_parents = 2, + }, +}; + +/* Datasheet names this field as "Final_mux_sel" */ +static struct clk_regmap g12b_cpub_clk = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL, + .mask = 0x1, + .shift = 11, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_dyn", + "sys_pll" }, + .num_parents = 2, + }, +}; + static struct clk_regmap g12a_cpu_clk_div16_en = { .data = &(struct clk_regmap_gate_data){ .offset = HHI_SYS_CPU_CLK_CNTL1, @@ -323,6 +546,23 @@ static struct clk_regmap g12a_cpu_clk_div16_en = { }, }; +static struct clk_regmap g12b_cpub_clk_div16_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 1, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpub_clk_div16_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + /* + * This clock is used to debug the cpu_clk range + * Linux should not change it at runtime + */ + }, +}; + static struct clk_fixed_factor g12a_cpu_clk_div16 = { .mult = 1, .div = 16, @@ -334,6 +574,17 @@ static struct clk_fixed_factor g12a_cpu_clk_div16 = { }, }; +static struct clk_fixed_factor g12b_cpub_clk_div16 = { + .mult = 1, + .div = 16, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div16", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk_div16_en" }, + .num_parents = 1, + }, +}; + static struct clk_regmap g12a_cpu_clk_apb_div = { .data = &(struct clk_regmap_div_data){ .offset = HHI_SYS_CPU_CLK_CNTL1, @@ -462,6 +713,240 @@ static struct clk_regmap g12a_cpu_clk_trace = { }, }; +static struct clk_fixed_factor g12b_cpub_clk_div2 = { + .mult = 1, + .div = 2, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div2", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div3 = { + .mult = 1, + .div = 3, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div3", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div4 = { + .mult = 1, + .div = 4, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div4", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div5 = { + .mult = 1, + .div = 5, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div5", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div6 = { + .mult = 1, + .div = 6, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div6", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div7 = { + .mult = 1, + .div = 7, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div7", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static struct clk_fixed_factor g12b_cpub_clk_div8 = { + .mult = 1, + .div = 8, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_div8", + .ops = &clk_fixed_factor_ops, + .parent_names = (const char *[]){ "cpub_clk" }, + .num_parents = 1, + }, +}; + +static u32 mux_table_cpub[] = { 1, 2, 3, 4, 5, 6, 7 }; +static struct clk_regmap g12b_cpub_clk_apb_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .mask = 7, + .shift = 3, + .table = mux_table_cpub, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_apb_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_div2", + "cpub_clk_div3", + "cpub_clk_div4", + "cpub_clk_div5", + "cpub_clk_div6", + "cpub_clk_div7", + "cpub_clk_div8" }, + .num_parents = 7, + }, +}; + +static struct clk_regmap g12b_cpub_clk_apb = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 16, + .flags = CLK_GATE_SET_TO_DISABLE, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpub_clk_apb", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_apb_sel" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12b_cpub_clk_atb_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .mask = 7, + .shift = 6, + .table = mux_table_cpub, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_atb_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_div2", + "cpub_clk_div3", + "cpub_clk_div4", + "cpub_clk_div5", + "cpub_clk_div6", + "cpub_clk_div7", + "cpub_clk_div8" }, + .num_parents = 7, + }, +}; + +static struct clk_regmap g12b_cpub_clk_atb = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 17, + .flags = CLK_GATE_SET_TO_DISABLE, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpub_clk_atb", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_atb_sel" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12b_cpub_clk_axi_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .mask = 7, + .shift = 9, + .table = mux_table_cpub, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_axi_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_div2", + "cpub_clk_div3", + "cpub_clk_div4", + "cpub_clk_div5", + "cpub_clk_div6", + "cpub_clk_div7", + "cpub_clk_div8" }, + .num_parents = 7, + }, +}; + +static struct clk_regmap g12b_cpub_clk_axi = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 18, + .flags = CLK_GATE_SET_TO_DISABLE, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpub_clk_axi", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_axi_sel" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + +static struct clk_regmap g12b_cpub_clk_trace_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .mask = 7, + .shift = 20, + .table = mux_table_cpub, + }, + .hw.init = &(struct clk_init_data){ + .name = "cpub_clk_trace_sel", + .ops = &clk_regmap_mux_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_div2", + "cpub_clk_div3", + "cpub_clk_div4", + "cpub_clk_div5", + "cpub_clk_div6", + "cpub_clk_div7", + "cpub_clk_div8" }, + .num_parents = 7, + }, +}; + +static struct clk_regmap g12b_cpub_clk_trace = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SYS_CPUB_CLK_CNTL1, + .bit_idx = 23, + .flags = CLK_GATE_SET_TO_DISABLE, + }, + .hw.init = &(struct clk_init_data) { + .name = "cpub_clk_trace", + .ops = &clk_regmap_gate_ro_ops, + .parent_names = (const char *[]){ "cpub_clk_trace_sel" }, + .num_parents = 1, + /* + * This clock is set by the ROM monitor code, + * Linux should not change it at runtime + */ + }, +}; + static const struct pll_mult_range g12a_gp0_pll_mult_range = { .min = 55, .max = 255, @@ -865,6 +1350,16 @@ static struct clk_regmap g12a_fclk_div3 = { .ops = &clk_regmap_gate_ops, .parent_names = (const char *[]){ "fclk_div3_div" }, .num_parents = 1, + /* + * This clock is used by the resident firmware and is required + * by the platform to operate correctly. + * Until the following condition are met, we need this clock to + * be marked as critical: + * a) Mark the clock used by a firmware resource, if possible + * b) CCF has a clock hand-off mechanism to make the sure the + * clock stays on until the proper driver comes along + */ + .flags = CLK_IS_CRITICAL, }, }; @@ -1001,6 +1496,10 @@ static struct clk_fixed_factor g12a_mpll_prediv = { }, }; +static const struct reg_sequence g12a_mpll0_init_regs[] = { + { .reg = HHI_MPLL_CNTL2, .def = 0x40000033 }, +}; + static struct clk_regmap g12a_mpll0_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { @@ -1024,6 +1523,8 @@ static struct clk_regmap g12a_mpll0_div = { .width = 1, }, .lock = &meson_clk_lock, + .init_regs = g12a_mpll0_init_regs, + .init_count = ARRAY_SIZE(g12a_mpll0_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "mpll0_div", @@ -1047,6 +1548,10 @@ static struct clk_regmap g12a_mpll0 = { }, }; +static const struct reg_sequence g12a_mpll1_init_regs[] = { + { .reg = HHI_MPLL_CNTL4, .def = 0x40000033 }, +}; + static struct clk_regmap g12a_mpll1_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { @@ -1070,6 +1575,8 @@ static struct clk_regmap g12a_mpll1_div = { .width = 1, }, .lock = &meson_clk_lock, + .init_regs = g12a_mpll1_init_regs, + .init_count = ARRAY_SIZE(g12a_mpll1_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "mpll1_div", @@ -1093,6 +1600,10 @@ static struct clk_regmap g12a_mpll1 = { }, }; +static const struct reg_sequence g12a_mpll2_init_regs[] = { + { .reg = HHI_MPLL_CNTL6, .def = 0x40000033 }, +}; + static struct clk_regmap g12a_mpll2_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { @@ -1116,6 +1627,8 @@ static struct clk_regmap g12a_mpll2_div = { .width = 1, }, .lock = &meson_clk_lock, + .init_regs = g12a_mpll2_init_regs, + .init_count = ARRAY_SIZE(g12a_mpll2_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "mpll2_div", @@ -1139,6 +1652,10 @@ static struct clk_regmap g12a_mpll2 = { }, }; +static const struct reg_sequence g12a_mpll3_init_regs[] = { + { .reg = HHI_MPLL_CNTL8, .def = 0x40000033 }, +}; + static struct clk_regmap g12a_mpll3_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { @@ -1162,6 +1679,8 @@ static struct clk_regmap g12a_mpll3_div = { .width = 1, }, .lock = &meson_clk_lock, + .init_regs = g12a_mpll3_init_regs, + .init_count = ARRAY_SIZE(g12a_mpll3_init_regs), }, .hw.init = &(struct clk_init_data){ .name = "mpll3_div", @@ -2480,6 +2999,33 @@ static struct clk_regmap g12a_mali = { }, }; +static struct clk_regmap g12a_ts_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_TS_CLK_CNTL, + .shift = 0, + .width = 8, + }, + .hw.init = &(struct clk_init_data){ + .name = "ts_div", + .ops = &clk_regmap_divider_ro_ops, + .parent_names = (const char *[]){ "xtal" }, + .num_parents = 1, + }, +}; + +static struct clk_regmap g12a_ts = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_TS_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data){ + .name = "ts", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "ts_div" }, + .num_parents = 1, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0); static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1); @@ -2769,6 +3315,257 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = { [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw, [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw, [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw, + [CLKID_TS_DIV] = &g12a_ts_div.hw, + [CLKID_TS] = &g12a_ts.hw, + [NR_CLKS] = NULL, + }, + .num = NR_CLKS, +}; + +static struct clk_hw_onecell_data g12b_hw_onecell_data = { + .hws = { + [CLKID_SYS_PLL] = &g12a_sys_pll.hw, + [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw, + [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw, + [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw, + [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw, + [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw, + [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw, + [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw, + [CLKID_GP0_PLL] = &g12a_gp0_pll.hw, + [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw, + [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw, + [CLKID_CLK81] = &g12a_clk81.hw, + [CLKID_MPLL0] = &g12a_mpll0.hw, + [CLKID_MPLL1] = &g12a_mpll1.hw, + [CLKID_MPLL2] = &g12a_mpll2.hw, + [CLKID_MPLL3] = &g12a_mpll3.hw, + [CLKID_DDR] = &g12a_ddr.hw, + [CLKID_DOS] = &g12a_dos.hw, + [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw, + [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw, + [CLKID_ETH_PHY] = &g12a_eth_phy.hw, + [CLKID_ISA] = &g12a_isa.hw, + [CLKID_PL301] = &g12a_pl301.hw, + [CLKID_PERIPHS] = &g12a_periphs.hw, + [CLKID_SPICC0] = &g12a_spicc_0.hw, + [CLKID_I2C] = &g12a_i2c.hw, + [CLKID_SANA] = &g12a_sana.hw, + [CLKID_SD] = &g12a_sd.hw, + [CLKID_RNG0] = &g12a_rng0.hw, + [CLKID_UART0] = &g12a_uart0.hw, + [CLKID_SPICC1] = &g12a_spicc_1.hw, + [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw, + [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw, + [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw, + [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw, + [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw, + [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw, + [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw, + [CLKID_AUDIO] = &g12a_audio.hw, + [CLKID_ETH] = &g12a_eth_core.hw, + [CLKID_DEMUX] = &g12a_demux.hw, + [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw, + [CLKID_ADC] = &g12a_adc.hw, + [CLKID_UART1] = &g12a_uart1.hw, + [CLKID_G2D] = &g12a_g2d.hw, + [CLKID_RESET] = &g12a_reset.hw, + [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw, + [CLKID_PARSER] = &g12a_parser.hw, + [CLKID_USB] = &g12a_usb_general.hw, + [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw, + [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw, + [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw, + [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw, + [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw, + [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw, + [CLKID_BT656] = &g12a_bt656.hw, + [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw, + [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw, + [CLKID_UART2] = &g12a_uart2.hw, + [CLKID_VPU_INTR] = &g12a_vpu_intr.hw, + [CLKID_GIC] = &g12a_gic.hw, + [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw, + [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw, + [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw, + [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw, + [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw, + [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw, + [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw, + [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw, + [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw, + [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw, + [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw, + [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw, + [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw, + [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw, + [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw, + [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw, + [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw, + [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw, + [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw, + [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw, + [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw, + [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw, + [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw, + [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw, + [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw, + [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw, + [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw, + [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw, + [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw, + [CLKID_DAC_CLK] = &g12a_dac_clk.hw, + [CLKID_AOCLK] = &g12a_aoclk_gate.hw, + [CLKID_IEC958] = &g12a_iec958_gate.hw, + [CLKID_ENC480P] = &g12a_enc480p.hw, + [CLKID_RNG1] = &g12a_rng1.hw, + [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw, + [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw, + [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw, + [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw, + [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw, + [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw, + [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw, + [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw, + [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw, + [CLKID_DMA] = &g12a_dma.hw, + [CLKID_EFUSE] = &g12a_efuse.hw, + [CLKID_ROM_BOOT] = &g12a_rom_boot.hw, + [CLKID_RESET_SEC] = &g12a_reset_sec.hw, + [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw, + [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw, + [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw, + [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw, + [CLKID_VPU_0] = &g12a_vpu_0.hw, + [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw, + [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw, + [CLKID_VPU_1] = &g12a_vpu_1.hw, + [CLKID_VPU] = &g12a_vpu.hw, + [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw, + [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw, + [CLKID_VAPB_0] = &g12a_vapb_0.hw, + [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw, + [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw, + [CLKID_VAPB_1] = &g12a_vapb_1.hw, + [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw, + [CLKID_VAPB] = &g12a_vapb.hw, + [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw, + [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw, + [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw, + [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw, + [CLKID_VID_PLL] = &g12a_vid_pll_div.hw, + [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw, + [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw, + [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw, + [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw, + [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw, + [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw, + [CLKID_VCLK_DIV] = &g12a_vclk_div.hw, + [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw, + [CLKID_VCLK] = &g12a_vclk.hw, + [CLKID_VCLK2] = &g12a_vclk2.hw, + [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw, + [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw, + [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw, + [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw, + [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw, + [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw, + [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw, + [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw, + [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw, + [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw, + [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw, + [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw, + [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw, + [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw, + [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw, + [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw, + [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw, + [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw, + [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw, + [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw, + [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw, + [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw, + [CLKID_CTS_ENCI] = &g12a_cts_enci.hw, + [CLKID_CTS_ENCP] = &g12a_cts_encp.hw, + [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw, + [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw, + [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw, + [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw, + [CLKID_HDMI] = &g12a_hdmi.hw, + [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw, + [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw, + [CLKID_MALI_0] = &g12a_mali_0.hw, + [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw, + [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, + [CLKID_MALI_1] = &g12a_mali_1.hw, + [CLKID_MALI] = &g12a_mali.hw, + [CLKID_MPLL_50M_DIV] = &g12a_mpll_50m_div.hw, + [CLKID_MPLL_50M] = &g12a_mpll_50m.hw, + [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw, + [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw, + [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw, + [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw, + [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw, + [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw, + [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw, + [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw, + [CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw, + [CLKID_CPU_CLK] = &g12b_cpu_clk.hw, + [CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw, + [CLKID_CPU_CLK_DIV16] = &g12a_cpu_clk_div16.hw, + [CLKID_CPU_CLK_APB_DIV] = &g12a_cpu_clk_apb_div.hw, + [CLKID_CPU_CLK_APB] = &g12a_cpu_clk_apb.hw, + [CLKID_CPU_CLK_ATB_DIV] = &g12a_cpu_clk_atb_div.hw, + [CLKID_CPU_CLK_ATB] = &g12a_cpu_clk_atb.hw, + [CLKID_CPU_CLK_AXI_DIV] = &g12a_cpu_clk_axi_div.hw, + [CLKID_CPU_CLK_AXI] = &g12a_cpu_clk_axi.hw, + [CLKID_CPU_CLK_TRACE_DIV] = &g12a_cpu_clk_trace_div.hw, + [CLKID_CPU_CLK_TRACE] = &g12a_cpu_clk_trace.hw, + [CLKID_PCIE_PLL_DCO] = &g12a_pcie_pll_dco.hw, + [CLKID_PCIE_PLL_DCO_DIV2] = &g12a_pcie_pll_dco_div2.hw, + [CLKID_PCIE_PLL_OD] = &g12a_pcie_pll_od.hw, + [CLKID_PCIE_PLL] = &g12a_pcie_pll.hw, + [CLKID_VDEC_1_SEL] = &g12a_vdec_1_sel.hw, + [CLKID_VDEC_1_DIV] = &g12a_vdec_1_div.hw, + [CLKID_VDEC_1] = &g12a_vdec_1.hw, + [CLKID_VDEC_HEVC_SEL] = &g12a_vdec_hevc_sel.hw, + [CLKID_VDEC_HEVC_DIV] = &g12a_vdec_hevc_div.hw, + [CLKID_VDEC_HEVC] = &g12a_vdec_hevc.hw, + [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw, + [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw, + [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw, + [CLKID_TS_DIV] = &g12a_ts_div.hw, + [CLKID_TS] = &g12a_ts.hw, + [CLKID_SYS1_PLL_DCO] = &g12b_sys1_pll_dco.hw, + [CLKID_SYS1_PLL] = &g12b_sys1_pll.hw, + [CLKID_SYS1_PLL_DIV16_EN] = &g12b_sys1_pll_div16_en.hw, + [CLKID_SYS1_PLL_DIV16] = &g12b_sys1_pll_div16.hw, + [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_premux0.hw, + [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_mux0_div.hw, + [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_postmux0.hw, + [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_premux1.hw, + [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_mux1_div.hw, + [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_postmux1.hw, + [CLKID_CPUB_CLK_DYN] = &g12b_cpub_clk_dyn.hw, + [CLKID_CPUB_CLK] = &g12b_cpub_clk.hw, + [CLKID_CPUB_CLK_DIV16_EN] = &g12b_cpub_clk_div16_en.hw, + [CLKID_CPUB_CLK_DIV16] = &g12b_cpub_clk_div16.hw, + [CLKID_CPUB_CLK_DIV2] = &g12b_cpub_clk_div2.hw, + [CLKID_CPUB_CLK_DIV3] = &g12b_cpub_clk_div3.hw, + [CLKID_CPUB_CLK_DIV4] = &g12b_cpub_clk_div4.hw, + [CLKID_CPUB_CLK_DIV5] = &g12b_cpub_clk_div5.hw, + [CLKID_CPUB_CLK_DIV6] = &g12b_cpub_clk_div6.hw, + [CLKID_CPUB_CLK_DIV7] = &g12b_cpub_clk_div7.hw, + [CLKID_CPUB_CLK_DIV8] = &g12b_cpub_clk_div8.hw, + [CLKID_CPUB_CLK_APB_SEL] = &g12b_cpub_clk_apb_sel.hw, + [CLKID_CPUB_CLK_APB] = &g12b_cpub_clk_apb.hw, + [CLKID_CPUB_CLK_ATB_SEL] = &g12b_cpub_clk_atb_sel.hw, + [CLKID_CPUB_CLK_ATB] = &g12b_cpub_clk_atb.hw, + [CLKID_CPUB_CLK_AXI_SEL] = &g12b_cpub_clk_axi_sel.hw, + [CLKID_CPUB_CLK_AXI] = &g12b_cpub_clk_axi.hw, + [CLKID_CPUB_CLK_TRACE_SEL] = &g12b_cpub_clk_trace_sel.hw, + [CLKID_CPUB_CLK_TRACE] = &g12b_cpub_clk_trace.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -2966,16 +3763,52 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12a_vdec_hevcf_sel, &g12a_vdec_hevcf_div, &g12a_vdec_hevcf, + &g12a_ts_div, + &g12a_ts, + &g12b_cpu_clk, + &g12b_sys1_pll_dco, + &g12b_sys1_pll, + &g12b_sys1_pll_div16_en, + &g12b_cpub_clk_premux0, + &g12b_cpub_clk_mux0_div, + &g12b_cpub_clk_postmux0, + &g12b_cpub_clk_premux1, + &g12b_cpub_clk_mux1_div, + &g12b_cpub_clk_postmux1, + &g12b_cpub_clk_dyn, + &g12b_cpub_clk, + &g12b_cpub_clk_div16_en, + &g12b_cpub_clk_apb_sel, + &g12b_cpub_clk_apb, + &g12b_cpub_clk_atb_sel, + &g12b_cpub_clk_atb, + &g12b_cpub_clk_axi_sel, + &g12b_cpub_clk_axi, + &g12b_cpub_clk_trace_sel, + &g12b_cpub_clk_trace, +}; + +static const struct reg_sequence g12a_init_regs[] = { + { .reg = HHI_MPLL_CNTL0, .def = 0x00000543 }, }; static const struct meson_eeclkc_data g12a_clkc_data = { .regmap_clks = g12a_clk_regmaps, .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps), - .hw_onecell_data = &g12a_hw_onecell_data + .hw_onecell_data = &g12a_hw_onecell_data, + .init_regs = g12a_init_regs, + .init_count = ARRAY_SIZE(g12a_init_regs), +}; + +static const struct meson_eeclkc_data g12b_clkc_data = { + .regmap_clks = g12a_clk_regmaps, + .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps), + .hw_onecell_data = &g12b_hw_onecell_data }; static const struct of_device_id clkc_match_table[] = { { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data }, + { .compatible = "amlogic,g12b-clkc", .data = &g12b_clkc_data }, {} }; diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index bcc05cd9882f..c8aed31fbe17 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -69,6 +69,8 @@ #define HHI_VDEC4_CLK_CNTL 0x1EC #define HHI_HDCP22_CLK_CNTL 0x1F0 #define HHI_VAPBCLK_CNTL 0x1F4 +#define HHI_SYS_CPUB_CLK_CNTL1 0x200 +#define HHI_SYS_CPUB_CLK_CNTL 0x208 #define HHI_VPU_CLKB_CNTL 0x20C #define HHI_GEN_CLK_CNTL 0x228 #define HHI_VDIN_MEAS_CLK_CNTL 0x250 @@ -102,6 +104,13 @@ #define HHI_HDMI_PLL_CNTL5 0x334 #define HHI_HDMI_PLL_CNTL6 0x338 #define HHI_SPICC_CLK_CNTL 0x3dc +#define HHI_SYS1_PLL_CNTL0 0x380 +#define HHI_SYS1_PLL_CNTL1 0x384 +#define HHI_SYS1_PLL_CNTL2 0x388 +#define HHI_SYS1_PLL_CNTL3 0x38c +#define HHI_SYS1_PLL_CNTL4 0x390 +#define HHI_SYS1_PLL_CNTL5 0x394 +#define HHI_SYS1_PLL_CNTL6 0x398 /* * CLKID index values @@ -195,8 +204,38 @@ #define CLKID_VDEC_HEVC_DIV 206 #define CLKID_VDEC_HEVCF_SEL 208 #define CLKID_VDEC_HEVCF_DIV 209 +#define CLKID_TS_DIV 211 +#define CLKID_SYS1_PLL_DCO 213 +#define CLKID_SYS1_PLL 214 +#define CLKID_SYS1_PLL_DIV16_EN 215 +#define CLKID_SYS1_PLL_DIV16 216 +#define CLKID_CPUB_CLK_DYN0_SEL 217 +#define CLKID_CPUB_CLK_DYN0_DIV 218 +#define CLKID_CPUB_CLK_DYN0 219 +#define CLKID_CPUB_CLK_DYN1_SEL 220 +#define CLKID_CPUB_CLK_DYN1_DIV 221 +#define CLKID_CPUB_CLK_DYN1 222 +#define CLKID_CPUB_CLK_DYN 223 +#define CLKID_CPUB_CLK 224 +#define CLKID_CPUB_CLK_DIV16_EN 225 +#define CLKID_CPUB_CLK_DIV16 226 +#define CLKID_CPUB_CLK_DIV2 227 +#define CLKID_CPUB_CLK_DIV3 228 +#define CLKID_CPUB_CLK_DIV4 229 +#define CLKID_CPUB_CLK_DIV5 230 +#define CLKID_CPUB_CLK_DIV6 231 +#define CLKID_CPUB_CLK_DIV7 232 +#define CLKID_CPUB_CLK_DIV8 233 +#define CLKID_CPUB_CLK_APB_SEL 234 +#define CLKID_CPUB_CLK_APB 235 +#define CLKID_CPUB_CLK_ATB_SEL 236 +#define CLKID_CPUB_CLK_ATB 237 +#define CLKID_CPUB_CLK_AXI_SEL 238 +#define CLKID_CPUB_CLK_AXI 239 +#define CLKID_CPUB_CLK_TRACE_SEL 240 +#define CLKID_CPUB_CLK_TRACE 241 -#define NR_CLKS 211 +#define NR_CLKS 242 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/g12a-clkc.h> diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 29ffb4fde714..dab16d9b1af8 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -679,11 +679,6 @@ static struct clk_regmap gxbb_mpll0_div = { .shift = 16, .width = 9, }, - .ssen = { - .reg_off = HHI_MPLL_CNTL, - .shift = 25, - .width = 1, - }, .lock = &meson_clk_lock, }, .hw.init = &(struct clk_init_data){ diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c index 37a34c9c3885..6ba2094be257 100644 --- a/drivers/clk/meson/meson-eeclk.c +++ b/drivers/clk/meson/meson-eeclk.c @@ -34,6 +34,9 @@ int meson_eeclkc_probe(struct platform_device *pdev) return PTR_ERR(map); } + if (data->init_count) + regmap_multi_reg_write(map, data->init_regs, data->init_count); + input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0); if (IS_ERR(input)) { ret = PTR_ERR(input); diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h index 1b809b1419fe..9ab5d6fa7ccb 100644 --- a/drivers/clk/meson/meson-eeclk.h +++ b/drivers/clk/meson/meson-eeclk.h @@ -17,6 +17,8 @@ struct platform_device; struct meson_eeclkc_data { struct clk_regmap *const *regmap_clks; unsigned int regmap_clk_num; + const struct reg_sequence *init_regs; + unsigned int init_count; struct clk_hw_onecell_data *hw_onecell_data; }; diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 62cd3a7f1f65..537219fa573e 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -2153,6 +2153,132 @@ static struct clk_regmap meson8b_vdec_hevc = { }, }; +/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */ +static const char * const meson8b_cts_amclk_parent_names[] = { + "mpll0", "mpll1", "mpll2" +}; + +static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 }; + +static struct clk_regmap meson8b_cts_amclk_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_AUD_CLK_CNTL, + .mask = 0x3, + .shift = 9, + .table = meson8b_cts_amclk_mux_table, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_cts_amclk_parent_names, + .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_names), + }, +}; + +static struct clk_regmap meson8b_cts_amclk_div = { + .data = &(struct clk_regmap_div_data) { + .offset = HHI_AUD_CLK_CNTL, + .shift = 0, + .width = 8, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "cts_amclk_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_cts_amclk = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_AUD_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data){ + .name = "cts_amclk", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "cts_amclk_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */ +static const char * const meson8b_cts_mclk_i958_parent_names[] = { + "mpll0", "mpll1", "mpll2" +}; + +static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 }; + +static struct clk_regmap meson8b_cts_mclk_i958_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_AUD_CLK_CNTL2, + .mask = 0x3, + .shift = 25, + .table = meson8b_cts_mclk_i958_mux_table, + .flags = CLK_MUX_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data) { + .name = "cts_mclk_i958_sel", + .ops = &clk_regmap_mux_ops, + .parent_names = meson8b_cts_mclk_i958_parent_names, + .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_names), + }, +}; + +static struct clk_regmap meson8b_cts_mclk_i958_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_AUD_CLK_CNTL2, + .shift = 16, + .width = 8, + .flags = CLK_DIVIDER_ROUND_CLOSEST, + }, + .hw.init = &(struct clk_init_data) { + .name = "cts_mclk_i958_div", + .ops = &clk_regmap_divider_ops, + .parent_names = (const char *[]){ "cts_mclk_i958_sel" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_cts_mclk_i958 = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_AUD_CLK_CNTL2, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data){ + .name = "cts_mclk_i958", + .ops = &clk_regmap_gate_ops, + .parent_names = (const char *[]){ "cts_mclk_i958_div" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap meson8b_cts_i958 = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_AUD_CLK_CNTL2, + .mask = 0x1, + .shift = 27, + }, + .hw.init = &(struct clk_init_data){ + .name = "cts_i958", + .ops = &clk_regmap_mux_ops, + .parent_names = (const char *[]){ "cts_amclk", + "cts_mclk_i958" }, + .num_parents = 2, + /* + * The parent is specific to origin of the audio data. Let the + * consumer choose the appropriate parent. + */ + .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, + }, +}; + /* Everything Else (EE) domain gates */ static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); @@ -2432,6 +2558,13 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = { [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, + [CLKID_CTS_AMCLK_SEL] = &meson8b_cts_amclk_sel.hw, + [CLKID_CTS_AMCLK_DIV] = &meson8b_cts_amclk_div.hw, + [CLKID_CTS_AMCLK] = &meson8b_cts_amclk.hw, + [CLKID_CTS_MCLK_I958_SEL] = &meson8b_cts_mclk_i958_sel.hw, + [CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw, + [CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw, + [CLKID_CTS_I958] = &meson8b_cts_i958.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -2641,6 +2774,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, + [CLKID_CTS_AMCLK_SEL] = &meson8b_cts_amclk_sel.hw, + [CLKID_CTS_AMCLK_DIV] = &meson8b_cts_amclk_div.hw, + [CLKID_CTS_AMCLK] = &meson8b_cts_amclk.hw, + [CLKID_CTS_MCLK_I958_SEL] = &meson8b_cts_mclk_i958_sel.hw, + [CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw, + [CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw, + [CLKID_CTS_I958] = &meson8b_cts_i958.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -2852,6 +2992,13 @@ static struct clk_hw_onecell_data meson8m2_hw_onecell_data = { [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw, [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw, [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw, + [CLKID_CTS_AMCLK_SEL] = &meson8b_cts_amclk_sel.hw, + [CLKID_CTS_AMCLK_DIV] = &meson8b_cts_amclk_div.hw, + [CLKID_CTS_AMCLK] = &meson8b_cts_amclk.hw, + [CLKID_CTS_MCLK_I958_SEL] = &meson8b_cts_mclk_i958_sel.hw, + [CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw, + [CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw, + [CLKID_CTS_I958] = &meson8b_cts_i958.hw, [CLK_NR_CLKS] = NULL, }, .num = CLK_NR_CLKS, @@ -3041,6 +3188,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { &meson8b_vdec_hevc_div, &meson8b_vdec_hevc_en, &meson8b_vdec_hevc, + &meson8b_cts_amclk, + &meson8b_cts_amclk_sel, + &meson8b_cts_amclk_div, + &meson8b_cts_mclk_i958_sel, + &meson8b_cts_mclk_i958_div, + &meson8b_cts_mclk_i958, + &meson8b_cts_i958, }; static const struct meson8b_clk_reset_line { diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index ed37196187e6..c889fbeec30f 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -30,7 +30,9 @@ #define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */ #define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */ #define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */ +#define HHI_AUD_CLK_CNTL 0x178 /* 0x5e offset in data sheet */ #define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */ +#define HHI_AUD_CLK_CNTL2 0x190 /* 0x64 offset in data sheet */ #define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ #define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ #define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ @@ -171,8 +173,12 @@ #define CLKID_VDEC_HEVC_SEL 203 #define CLKID_VDEC_HEVC_DIV 204 #define CLKID_VDEC_HEVC_EN 205 +#define CLKID_CTS_AMCLK_SEL 207 +#define CLKID_CTS_AMCLK_DIV 208 +#define CLKID_CTS_MCLK_I958_SEL 210 +#define CLKID_CTS_MCLK_I958_DIV 211 -#define CLK_NR_CLKS 207 +#define CLK_NR_CLKS 214 /* * include the CLKID and RESETID that have diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c index cb43d54735b0..90bf181f191a 100644 --- a/drivers/clk/mmp/clk-frac.c +++ b/drivers/clk/mmp/clk-frac.c @@ -78,11 +78,10 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, struct mmp_clk_factor_masks *masks = factor->masks; int i; unsigned long val; - unsigned long prev_rate, rate = 0; + unsigned long rate = 0; unsigned long flags = 0; for (i = 0; i < factor->ftbl_cnt; i++) { - prev_rate = rate; rate = (((prate / 10000) * factor->ftbl[i].den) / (factor->ftbl[i].num * factor->masks->factor)) * 10000; if (rate > drate) diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 35af3aa18f1c..47680237d0be 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -185,6 +185,11 @@ static void __init mv88f6180_get_clk_ratio( } } +static u32 __init mv98dx1135_get_tclk_freq(void __iomem *sar) +{ + return 166666667; +} + static const struct coreclk_soc_desc kirkwood_coreclks = { .get_tclk_freq = kirkwood_get_tclk_freq, .get_cpu_freq = kirkwood_get_cpu_freq, @@ -201,6 +206,14 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = { .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), }; +static const struct coreclk_soc_desc mv98dx1135_coreclks = { + .get_tclk_freq = mv98dx1135_get_tclk_freq, + .get_cpu_freq = kirkwood_get_cpu_freq, + .get_clk_ratio = kirkwood_get_clk_ratio, + .ratios = kirkwood_coreclk_ratios, + .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), +}; + /* * Clock Gating Control */ @@ -325,6 +338,8 @@ static void __init kirkwood_clk_init(struct device_node *np) if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) mvebu_coreclk_setup(np, &mv88f6180_coreclks); + else if (of_device_is_compatible(np, "marvell,mv98dx1135-core-clock")) + mvebu_coreclk_setup(np, &mv98dx1135_coreclks); else mvebu_coreclk_setup(np, &kirkwood_coreclks); @@ -339,3 +354,5 @@ CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", kirkwood_clk_init); CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", kirkwood_clk_init); +CLK_OF_DECLARE(98dx1135_clk, "marvell,mv98dx1135-core-clock", + kirkwood_clk_init); diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index d2f39a972cad..d004cdaa0e39 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -130,22 +130,6 @@ static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = { "gpll0_early_div" }; -static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] = { - { P_XO, 0 }, - { P_GPLL0, 1 }, - { P_GPLL2, 2 }, - { P_GPLL3, 3 }, - { P_GPLL0_EARLY_DIV, 6 } -}; - -static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = { - "xo", - "gpll0", - "gpll2", - "gpll3", - "gpll0_early_div" -}; - static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, @@ -184,26 +168,6 @@ static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early "gpll0_early_div" }; -static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = { - { P_XO, 0 }, - { P_GPLL0, 1 }, - { P_GPLL2, 2 }, - { P_GPLL3, 3 }, - { P_GPLL1, 4 }, - { P_GPLL4, 5 }, - { P_GPLL0_EARLY_DIV, 6 } -}; - -static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = { - "xo", - "gpll0", - "gpll2", - "gpll3", - "gpll1", - "gpll4", - "gpll0_early_div" -}; - static struct clk_fixed_factor xo = { .mult = 1, .div = 1, diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index a54807eb3b28..29cf464dd2c8 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -2766,6 +2766,13 @@ static const struct qcom_reset_map gcc_qcs404_resets[] = { [GCC_PCIE_0_PHY_BCR] = { 0x3e004 }, [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x3e038 }, [GCC_PCIEPHY_0_PHY_BCR] = { 0x3e03c }, + [GCC_PCIE_0_AXI_MASTER_STICKY_ARES] = { 0x3e040, 6}, + [GCC_PCIE_0_AHB_ARES] = { 0x3e040, 5 }, + [GCC_PCIE_0_AXI_SLAVE_ARES] = { 0x3e040, 4 }, + [GCC_PCIE_0_AXI_MASTER_ARES] = { 0x3e040, 3 }, + [GCC_PCIE_0_CORE_STICKY_ARES] = { 0x3e040, 2 }, + [GCC_PCIE_0_SLEEP_ARES] = { 0x3e040, 1 }, + [GCC_PCIE_0_PIPE_ARES] = { 0x3e040, 0 }, [GCC_EMAC_BCR] = { 0x4e000 }, }; diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 679bc7d8950a..a250f59708d8 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -141,7 +141,9 @@ static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status) udelay(1); } - return gdsc_poll_status(sc, status); + ret = gdsc_poll_status(sc, status); + WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n"); + return ret; } static inline int gdsc_deassert_reset(struct gdsc *sc) diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c index e98a9f5b3c90..5ca183e70166 100644 --- a/drivers/clk/renesas/clk-div6.c +++ b/drivers/clk/renesas/clk-div6.c @@ -30,8 +30,8 @@ * @div: divisor value (1-64) * @src_shift: Shift to access the register bits to select the parent clock * @src_width: Number of register bits to select the parent clock (may be 0) - * @parents: Array to map from valid parent clocks indices to hardware indices * @nb: Notifier block to save/restore clock state for system resume + * @parents: Array to map from valid parent clocks indices to hardware indices */ struct div6_clock { struct clk_hw hw; @@ -39,8 +39,8 @@ struct div6_clock { unsigned int div; u32 src_shift; u32 src_width; - u8 *parents; struct notifier_block nb; + u8 parents[]; }; #define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw) @@ -221,17 +221,10 @@ struct clk * __init cpg_div6_register(const char *name, struct clk *clk; unsigned int i; - clock = kzalloc(sizeof(*clock), GFP_KERNEL); + clock = kzalloc(struct_size(clock, parents, num_parents), GFP_KERNEL); if (!clock) return ERR_PTR(-ENOMEM); - clock->parents = kmalloc_array(num_parents, sizeof(*clock->parents), - GFP_KERNEL); - if (!clock->parents) { - clk = ERR_PTR(-ENOMEM); - goto free_clock; - } - clock->reg = reg; /* @@ -259,7 +252,7 @@ struct clk * __init cpg_div6_register(const char *name, pr_err("%s: invalid number of parents for DIV6 clock %s\n", __func__, name); clk = ERR_PTR(-EINVAL); - goto free_parents; + goto free_clock; } /* Filter out invalid parents */ @@ -282,7 +275,7 @@ struct clk * __init cpg_div6_register(const char *name, clk = clk_register(NULL, &clock->hw); if (IS_ERR(clk)) - goto free_parents; + goto free_clock; if (notifiers) { clock->nb.notifier_call = cpg_div6_clock_notifier_call; @@ -291,8 +284,6 @@ struct clk * __init cpg_div6_register(const char *name, return clk; -free_parents: - kfree(clock->parents); free_clock: kfree(clock); return clk; diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index 92ece221b0d4..2db9093546c6 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -30,11 +30,12 @@ /** * struct mstp_clock_group - MSTP gating clocks group * - * @data: clocks in this group + * @data: clock specifier translation for clocks in this group * @smstpcr: module stop control register * @mstpsr: module stop status register (optional) * @lock: protects writes to SMSTPCR * @width_8bit: registers are 8-bit, not 32-bit + * @clks: clocks in this group */ struct mstp_clock_group { struct clk_onecell_data data; @@ -42,6 +43,7 @@ struct mstp_clock_group { void __iomem *mstpsr; spinlock_t lock; bool width_8bit; + struct clk *clks[]; }; /** @@ -186,14 +188,13 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) struct clk **clks; unsigned int i; - group = kzalloc(sizeof(*group), GFP_KERNEL); - clks = kmalloc_array(MSTP_MAX_CLOCKS, sizeof(*clks), GFP_KERNEL); - if (group == NULL || clks == NULL) { + group = kzalloc(struct_size(group, clks, MSTP_MAX_CLOCKS), GFP_KERNEL); + if (group == NULL) { kfree(group); - kfree(clks); return; } + clks = group->clks; spin_lock_init(&group->lock); group->data.clks = clks; @@ -203,7 +204,6 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) if (group->smstpcr == NULL) { pr_err("%s: failed to remap SMSTPCR\n", __func__); kfree(group); - kfree(clks); return; } @@ -297,16 +297,12 @@ found: return PTR_ERR(clk); error = pm_clk_create(dev); - if (error) { - dev_err(dev, "pm_clk_create failed %d\n", error); + if (error) goto fail_put; - } error = pm_clk_add_clk(dev, clk); - if (error) { - dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error); + if (error) goto fail_destroy; - } return 0; diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index 76ed7d1bae36..e05bfa200480 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -113,6 +113,11 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { }; static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { + DEF_MOD("tmu4", 121, R8A774A1_CLK_S0D6), + DEF_MOD("tmu3", 122, R8A774A1_CLK_S3D2), + DEF_MOD("tmu2", 123, R8A774A1_CLK_S3D2), + DEF_MOD("tmu1", 124, R8A774A1_CLK_S3D2), + DEF_MOD("tmu0", 125, R8A774A1_CLK_CP), DEF_MOD("fdp1-0", 119, R8A774A1_CLK_S0D1), DEF_MOD("scif5", 202, R8A774A1_CLK_S3D4), DEF_MOD("scif4", 203, R8A774A1_CLK_S3D4), diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index 9e9a6f2c31e8..fbc8c75f4314 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -138,6 +138,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("cmt2", 301, R8A7795_CLK_R), DEF_MOD("cmt1", 302, R8A7795_CLK_R), DEF_MOD("cmt0", 303, R8A7795_CLK_R), + DEF_MOD("tpu0", 304, R8A7795_CLK_S3D4), DEF_MOD("scif2", 310, R8A7795_CLK_S3D4), DEF_MOD("sdif3", 311, R8A7795_CLK_SD3), DEF_MOD("sdif2", 312, R8A7795_CLK_SD2), @@ -201,6 +202,10 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("ehci0", 703, R8A7795_CLK_S3D2), DEF_MOD("hsusb", 704, R8A7795_CLK_S3D2), DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D2), + DEF_MOD("cmm3", 708, R8A7795_CLK_S2D1), + DEF_MOD("cmm2", 709, R8A7795_CLK_S2D1), + DEF_MOD("cmm1", 710, R8A7795_CLK_S2D1), + DEF_MOD("cmm0", 711, R8A7795_CLK_S2D1), DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */ DEF_MOD("csi20", 714, R8A7795_CLK_CSI0), DEF_MOD("csi41", 715, R8A7795_CLK_CSI0), diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index d8e9af5d9ae9..90cc6a102602 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -134,6 +134,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("cmt2", 301, R8A7796_CLK_R), DEF_MOD("cmt1", 302, R8A7796_CLK_R), DEF_MOD("cmt0", 303, R8A7796_CLK_R), + DEF_MOD("tpu0", 304, R8A7796_CLK_S3D4), DEF_MOD("scif2", 310, R8A7796_CLK_S3D4), DEF_MOD("sdif3", 311, R8A7796_CLK_SD3), DEF_MOD("sdif2", 312, R8A7796_CLK_SD2), @@ -180,6 +181,9 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = { DEF_MOD("ehci1", 702, R8A7796_CLK_S3D2), DEF_MOD("ehci0", 703, R8A7796_CLK_S3D2), DEF_MOD("hsusb", 704, R8A7796_CLK_S3D2), + DEF_MOD("cmm2", 709, R8A7796_CLK_S2D1), + DEF_MOD("cmm1", 710, R8A7796_CLK_S2D1), + DEF_MOD("cmm0", 711, R8A7796_CLK_S2D1), DEF_MOD("csi20", 714, R8A7796_CLK_CSI0), DEF_MOD("csi40", 716, R8A7796_CLK_CSI0), DEF_MOD("du2", 722, R8A7796_CLK_S2D1), diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index 8f87e314d949..b4e8c5b7d515 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -132,6 +132,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("cmt2", 301, R8A77965_CLK_R), DEF_MOD("cmt1", 302, R8A77965_CLK_R), DEF_MOD("cmt0", 303, R8A77965_CLK_R), + DEF_MOD("tpu0", 304, R8A77965_CLK_S3D4), DEF_MOD("scif2", 310, R8A77965_CLK_S3D4), DEF_MOD("sdif3", 311, R8A77965_CLK_SD3), DEF_MOD("sdif2", 312, R8A77965_CLK_SD2), @@ -179,6 +180,9 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("ehci1", 702, R8A77965_CLK_S3D2), DEF_MOD("ehci0", 703, R8A77965_CLK_S3D2), DEF_MOD("hsusb", 704, R8A77965_CLK_S3D2), + DEF_MOD("cmm3", 708, R8A77965_CLK_S2D1), + DEF_MOD("cmm1", 710, R8A77965_CLK_S2D1), + DEF_MOD("cmm0", 711, R8A77965_CLK_S2D1), DEF_MOD("csi20", 714, R8A77965_CLK_CSI0), DEF_MOD("csi40", 716, R8A77965_CLK_CSI0), DEF_MOD("du3", 721, R8A77965_CLK_S2D1), diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index 9570404baa58..ceabf55c21c2 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -183,6 +183,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("ehci0", 703, R8A77990_CLK_S3D2), DEF_MOD("hsusb", 704, R8A77990_CLK_S3D2), + DEF_MOD("cmm1", 710, R8A77990_CLK_S1D1), + DEF_MOD("cmm0", 711, R8A77990_CLK_S1D1), DEF_MOD("csi40", 716, R8A77990_CLK_CSI0), DEF_MOD("du1", 723, R8A77990_CLK_S1D1), DEF_MOD("du0", 724, R8A77990_CLK_S1D1), diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index 68707277b17b..962bb337f2e7 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -146,6 +146,8 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { DEF_MOD("vspbs", 627, R8A77995_CLK_S0D1), DEF_MOD("ehci0", 703, R8A77995_CLK_S3D2), DEF_MOD("hsusb", 704, R8A77995_CLK_S3D2), + DEF_MOD("cmm1", 710, R8A77995_CLK_S1D1), + DEF_MOD("cmm0", 711, R8A77995_CLK_S1D1), DEF_MOD("du1", 723, R8A77995_CLK_S1D1), DEF_MOD("du0", 724, R8A77995_CLK_S1D1), DEF_MOD("lvds", 727, R8A77995_CLK_S2D1), diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index 7d042183aa37..b33e1383efe3 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -17,6 +17,8 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_domain.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <dt-bindings/clock/r9a06g032-sysctrl.h> @@ -29,6 +31,7 @@ struct r9a06g032_gate { /* This is used to describe a clock for instantiation */ struct r9a06g032_clkdesc { const char *name; + uint32_t managed: 1; uint32_t type: 3; uint32_t index: 8; uint32_t source : 8; /* source index + 1 (0 == none) */ @@ -61,7 +64,11 @@ struct r9a06g032_clkdesc { #define D_GATE(_idx, _n, _src, ...) \ { .type = K_GATE, .index = R9A06G032_##_idx, \ .source = 1 + R9A06G032_##_src, .name = _n, \ - .gate = I_GATE(__VA_ARGS__), } + .gate = I_GATE(__VA_ARGS__) } +#define D_MODULE(_idx, _n, _src, ...) \ + { .type = K_GATE, .index = R9A06G032_##_idx, \ + .source = 1 + R9A06G032_##_src, .name = _n, \ + .managed = 1, .gate = I_GATE(__VA_ARGS__) } #define D_ROOT(_idx, _n, _mul, _div) \ { .type = K_FFC, .index = R9A06G032_##_idx, .name = _n, \ .div = _div, .mul = _mul } @@ -122,7 +129,7 @@ enum { K_GATE = 0, K_FFC, K_DIV, K_BITSEL, K_DUALGATE }; #define R9A06G032_CLOCK_COUNT (R9A06G032_UART_GROUP_34567 + 1) -static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { +static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { D_ROOT(CLKOUT, "clkout", 25, 1), D_ROOT(CLK_PLL_USB, "clk_pll_usb", 12, 10), D_FFC(CLKOUT_D10, "clkout_d10", CLKOUT, 10), @@ -171,7 +178,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0), D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0), D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0), - D_GATE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0), + D_MODULE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0), D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0), D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0), D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0), @@ -188,17 +195,17 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { D_GATE(CLK_SPI5, "clk_spi5", DIV_P4_PG, 0x822, 0x823, 0, 0, 0, 0, 0), D_GATE(CLK_SWITCH, "clk_switch", DIV_SWITCH, 0x982, 0x983, 0, 0, 0, 0, 0), D_DIV(DIV_MOTOR, "div_motor", CLKOUT_D5, 84, 2, 8), - D_GATE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441), - D_GATE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0), - D_GATE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461), - D_GATE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0), - D_GATE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0), - D_GATE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0), - D_GATE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0), - D_GATE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0), - D_GATE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103), - D_GATE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101), - D_GATE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0), + D_MODULE(HCLK_ECAT125, "hclk_ecat125", CLKOUT_D8, 0x400, 0x401, 0, 0x402, 0, 0x440, 0x441), + D_MODULE(HCLK_PINCONFIG, "hclk_pinconfig", CLKOUT_D40, 0x740, 0x741, 0x742, 0, 0xae0, 0, 0), + D_MODULE(HCLK_SERCOS, "hclk_sercos", CLKOUT_D10, 0x420, 0x422, 0, 0x421, 0, 0x460, 0x461), + D_MODULE(HCLK_SGPIO2, "hclk_sgpio2", DIV_P5_PG, 0x8c3, 0x8c4, 0x8c5, 0, 0xb41, 0, 0), + D_MODULE(HCLK_SGPIO3, "hclk_sgpio3", DIV_P5_PG, 0x8c6, 0x8c7, 0x8c8, 0, 0xb42, 0, 0), + D_MODULE(HCLK_SGPIO4, "hclk_sgpio4", DIV_P5_PG, 0x8c9, 0x8ca, 0x8cb, 0, 0xb43, 0, 0), + D_MODULE(HCLK_TIMER0, "hclk_timer0", CLKOUT_D40, 0x743, 0x744, 0x745, 0, 0xae1, 0, 0), + D_MODULE(HCLK_TIMER1, "hclk_timer1", CLKOUT_D40, 0x746, 0x747, 0x748, 0, 0xae2, 0, 0), + D_MODULE(HCLK_USBF, "hclk_usbf", CLKOUT_D8, 0xe3, 0, 0, 0xe4, 0, 0x102, 0x103), + D_MODULE(HCLK_USBH, "hclk_usbh", CLKOUT_D8, 0xe0, 0xe1, 0, 0xe2, 0, 0x100, 0x101), + D_MODULE(HCLK_USBPM, "hclk_usbpm", CLKOUT_D8, 0xe5, 0, 0, 0, 0, 0, 0), D_GATE(CLK_48_PG_F, "clk_48_pg_f", CLK_48, 0x78c, 0x78d, 0, 0x78e, 0, 0xb04, 0xb05), D_GATE(CLK_48_PG4, "clk_48_pg4", CLK_48, 0x789, 0x78a, 0x78b, 0, 0xb03, 0, 0), D_FFC(CLK_DDRPHY_PLLCLK_D4, "clk_ddrphy_pllclk_d4", CLK_DDRPHY_PLLCLK, 4), @@ -208,13 +215,13 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { D_FFC(CLK_REF_SYNC_D8, "clk_ref_sync_d8", CLK_REF_SYNC, 8), D_FFC(CLK_SERCOS100_D2, "clk_sercos100_d2", CLK_SERCOS100, 2), D_DIV(DIV_CA7, "div_ca7", CLK_REF_SYNC, 57, 1, 4, 1, 2, 4), - D_GATE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0), - D_GATE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0), - D_GATE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0), - D_GATE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0), - D_GATE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0), - D_GATE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0), - D_GATE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0), + D_MODULE(HCLK_CAN0, "hclk_can0", CLK_48, 0x783, 0x784, 0x785, 0, 0xb01, 0, 0), + D_MODULE(HCLK_CAN1, "hclk_can1", CLK_48, 0x786, 0x787, 0x788, 0, 0xb02, 0, 0), + D_MODULE(HCLK_DELTASIGMA, "hclk_deltasigma", DIV_MOTOR, 0x1ef, 0x1f0, 0x1f1, 0, 0, 0, 0), + D_MODULE(HCLK_PWMPTO, "hclk_pwmpto", DIV_MOTOR, 0x1ec, 0x1ed, 0x1ee, 0, 0, 0, 0), + D_MODULE(HCLK_RSV, "hclk_rsv", CLK_48, 0x780, 0x781, 0x782, 0, 0xb00, 0, 0), + D_MODULE(HCLK_SGPIO0, "hclk_sgpio0", DIV_MOTOR, 0x1e0, 0x1e1, 0x1e2, 0, 0, 0, 0), + D_MODULE(HCLK_SGPIO1, "hclk_sgpio1", DIV_MOTOR, 0x1e3, 0x1e4, 0x1e5, 0, 0, 0, 0), D_DIV(RTOS_MDC, "rtos_mdc", CLK_REF_SYNC, 100, 80, 640, 80, 160, 320, 640), D_GATE(CLK_CM3, "clk_cm3", CLK_REF_SYNC_D4, 0xba0, 0xba1, 0, 0xba2, 0, 0xbc0, 0xbc1), D_GATE(CLK_DDRC, "clk_ddrc", CLK_DDRPHY_PLLCLK_D4, 0x323, 0x324, 0, 0, 0, 0, 0), @@ -222,53 +229,53 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = { D_GATE(CLK_HSR50, "clk_hsr50", CLK_HSR100_D2, 0x484, 0x485, 0, 0, 0, 0, 0), D_GATE(CLK_HW_RTOS, "clk_hw_rtos", CLK_REF_SYNC_D4, 0xc60, 0xc61, 0, 0, 0, 0, 0), D_GATE(CLK_SERCOS50, "clk_sercos50", CLK_SERCOS100_D2, 0x424, 0x423, 0, 0, 0, 0, 0), - D_GATE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0), - D_GATE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0), - D_GATE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0), - D_GATE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141), - D_GATE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1), - D_GATE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2), - D_GATE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5), - D_GATE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2), - D_GATE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2), - D_GATE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0), - D_GATE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0), - D_GATE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0), - D_GATE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1), - D_GATE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0), - D_GATE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0), - D_GATE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0), - D_GATE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0), - D_GATE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182), - D_GATE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2), - D_GATE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25), - D_GATE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0), - D_GATE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0), - D_GATE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0), - D_GATE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0), - D_GATE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302), - D_GATE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2), - D_GATE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0), - D_GATE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0), - D_GATE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82), - D_GATE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662), - D_GATE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0), - D_GATE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0), - D_GATE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0), - D_GATE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0), - D_GATE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0), - D_GATE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0), - D_GATE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0), - D_GATE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0), - D_GATE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0), - D_GATE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0), - D_GATE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0), - D_GATE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0), - D_GATE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0), - D_GATE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0), - D_GATE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0), - D_GATE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0), - D_GATE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0), + D_MODULE(HCLK_ADC, "hclk_adc", CLK_REF_SYNC_D8, 0x1af, 0x1b0, 0x1b1, 0, 0, 0, 0), + D_MODULE(HCLK_CM3, "hclk_cm3", CLK_REF_SYNC_D4, 0xc20, 0xc21, 0xc22, 0, 0, 0, 0), + D_MODULE(HCLK_CRYPTO_EIP150, "hclk_crypto_eip150", CLK_REF_SYNC_D4, 0x123, 0x124, 0x125, 0, 0x142, 0, 0), + D_MODULE(HCLK_CRYPTO_EIP93, "hclk_crypto_eip93", CLK_REF_SYNC_D4, 0x120, 0x121, 0, 0x122, 0, 0x140, 0x141), + D_MODULE(HCLK_DDRC, "hclk_ddrc", CLK_REF_SYNC_D4, 0x320, 0x322, 0, 0x321, 0, 0x3a0, 0x3a1), + D_MODULE(HCLK_DMA0, "hclk_dma0", CLK_REF_SYNC_D4, 0x260, 0x261, 0x262, 0x263, 0x2c0, 0x2c1, 0x2c2), + D_MODULE(HCLK_DMA1, "hclk_dma1", CLK_REF_SYNC_D4, 0x264, 0x265, 0x266, 0x267, 0x2c3, 0x2c4, 0x2c5), + D_MODULE(HCLK_GMAC0, "hclk_gmac0", CLK_REF_SYNC_D4, 0x360, 0x361, 0x362, 0x363, 0x3c0, 0x3c1, 0x3c2), + D_MODULE(HCLK_GMAC1, "hclk_gmac1", CLK_REF_SYNC_D4, 0x380, 0x381, 0x382, 0x383, 0x3e0, 0x3e1, 0x3e2), + D_MODULE(HCLK_GPIO0, "hclk_gpio0", CLK_REF_SYNC_D4, 0x212, 0x213, 0x214, 0, 0, 0, 0), + D_MODULE(HCLK_GPIO1, "hclk_gpio1", CLK_REF_SYNC_D4, 0x215, 0x216, 0x217, 0, 0, 0, 0), + D_MODULE(HCLK_GPIO2, "hclk_gpio2", CLK_REF_SYNC_D4, 0x229, 0x22a, 0x22b, 0, 0, 0, 0), + D_MODULE(HCLK_HSR, "hclk_hsr", CLK_HSR100_D2, 0x480, 0x482, 0, 0x481, 0, 0x4c0, 0x4c1), + D_MODULE(HCLK_I2C0, "hclk_i2c0", CLK_REF_SYNC_D8, 0x1a9, 0x1aa, 0x1ab, 0, 0, 0, 0), + D_MODULE(HCLK_I2C1, "hclk_i2c1", CLK_REF_SYNC_D8, 0x1ac, 0x1ad, 0x1ae, 0, 0, 0, 0), + D_MODULE(HCLK_LCD, "hclk_lcd", CLK_REF_SYNC_D4, 0x7a0, 0x7a1, 0x7a2, 0, 0xb20, 0, 0), + D_MODULE(HCLK_MSEBI_M, "hclk_msebi_m", CLK_REF_SYNC_D4, 0x164, 0x165, 0x166, 0, 0x183, 0, 0), + D_MODULE(HCLK_MSEBI_S, "hclk_msebi_s", CLK_REF_SYNC_D4, 0x160, 0x161, 0x162, 0x163, 0x180, 0x181, 0x182), + D_MODULE(HCLK_NAND, "hclk_nand", CLK_REF_SYNC_D4, 0x280, 0x281, 0x282, 0x283, 0x2e0, 0x2e1, 0x2e2), + D_MODULE(HCLK_PG_I, "hclk_pg_i", CLK_REF_SYNC_D4, 0x7ac, 0x7ad, 0, 0x7ae, 0, 0xb24, 0xb25), + D_MODULE(HCLK_PG19, "hclk_pg19", CLK_REF_SYNC_D4, 0x22c, 0x22d, 0x22e, 0, 0, 0, 0), + D_MODULE(HCLK_PG20, "hclk_pg20", CLK_REF_SYNC_D4, 0x22f, 0x230, 0x231, 0, 0, 0, 0), + D_MODULE(HCLK_PG3, "hclk_pg3", CLK_REF_SYNC_D4, 0x7a6, 0x7a7, 0x7a8, 0, 0xb22, 0, 0), + D_MODULE(HCLK_PG4, "hclk_pg4", CLK_REF_SYNC_D4, 0x7a9, 0x7aa, 0x7ab, 0, 0xb23, 0, 0), + D_MODULE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302), + D_MODULE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2), + D_MODULE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0), + D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0), + D_MODULE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82), + D_MODULE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662), + D_MODULE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0), + D_MODULE(HCLK_SPI0, "hclk_spi0", CLK_REF_SYNC_D4, 0x200, 0x201, 0x202, 0, 0, 0, 0), + D_MODULE(HCLK_SPI1, "hclk_spi1", CLK_REF_SYNC_D4, 0x203, 0x204, 0x205, 0, 0, 0, 0), + D_MODULE(HCLK_SPI2, "hclk_spi2", CLK_REF_SYNC_D4, 0x206, 0x207, 0x208, 0, 0, 0, 0), + D_MODULE(HCLK_SPI3, "hclk_spi3", CLK_REF_SYNC_D4, 0x209, 0x20a, 0x20b, 0, 0, 0, 0), + D_MODULE(HCLK_SPI4, "hclk_spi4", CLK_REF_SYNC_D4, 0x20c, 0x20d, 0x20e, 0, 0, 0, 0), + D_MODULE(HCLK_SPI5, "hclk_spi5", CLK_REF_SYNC_D4, 0x20f, 0x210, 0x211, 0, 0, 0, 0), + D_MODULE(HCLK_SWITCH, "hclk_switch", CLK_REF_SYNC_D4, 0x980, 0, 0x981, 0, 0, 0, 0), + D_MODULE(HCLK_SWITCH_RG, "hclk_switch_rg", CLK_REF_SYNC_D4, 0xc40, 0xc41, 0xc42, 0, 0, 0, 0), + D_MODULE(HCLK_UART0, "hclk_uart0", CLK_REF_SYNC_D8, 0x1a0, 0x1a1, 0x1a2, 0, 0, 0, 0), + D_MODULE(HCLK_UART1, "hclk_uart1", CLK_REF_SYNC_D8, 0x1a3, 0x1a4, 0x1a5, 0, 0, 0, 0), + D_MODULE(HCLK_UART2, "hclk_uart2", CLK_REF_SYNC_D8, 0x1a6, 0x1a7, 0x1a8, 0, 0, 0, 0), + D_MODULE(HCLK_UART3, "hclk_uart3", CLK_REF_SYNC_D4, 0x218, 0x219, 0x21a, 0, 0, 0, 0), + D_MODULE(HCLK_UART4, "hclk_uart4", CLK_REF_SYNC_D4, 0x21b, 0x21c, 0x21d, 0, 0, 0, 0), + D_MODULE(HCLK_UART5, "hclk_uart5", CLK_REF_SYNC_D4, 0x220, 0x221, 0x222, 0, 0, 0, 0), + D_MODULE(HCLK_UART6, "hclk_uart6", CLK_REF_SYNC_D4, 0x223, 0x224, 0x225, 0, 0, 0, 0), + D_MODULE(HCLK_UART7, "hclk_uart7", CLK_REF_SYNC_D4, 0x226, 0x227, 0x228, 0, 0, 0, 0), /* * These are not hardware clocks, but are needed to handle the special * case where we have a 'selector bit' that doesn't just change the @@ -345,6 +352,84 @@ struct r9a06g032_clk_gate { #define to_r9a06g032_gate(_hw) container_of(_hw, struct r9a06g032_clk_gate, hw) +static int create_add_module_clock(struct of_phandle_args *clkspec, + struct device *dev) +{ + struct clk *clk; + int error; + + clk = of_clk_get_from_provider(clkspec); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + error = pm_clk_create(dev); + if (error) { + clk_put(clk); + return error; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + pm_clk_destroy(dev); + clk_put(clk); + } + + return error; +} + +static int r9a06g032_attach_dev(struct generic_pm_domain *pd, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct of_phandle_args clkspec; + int i = 0; + int error; + int index; + + while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, + &clkspec)) { + if (clkspec.np != pd->dev.of_node) + continue; + + index = clkspec.args[0]; + if (index < R9A06G032_CLOCK_COUNT && + r9a06g032_clocks[index].managed) { + error = create_add_module_clock(&clkspec, dev); + of_node_put(clkspec.np); + if (error) + return error; + } + i++; + } + + return 0; +} + +static void r9a06g032_detach_dev(struct generic_pm_domain *unused, struct device *dev) +{ + if (!pm_clk_no_clocks(dev)) + pm_clk_destroy(dev); +} + +static int r9a06g032_add_clk_domain(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct generic_pm_domain *pd; + + pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + pd->name = np->name; + pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + pd->attach_dev = r9a06g032_attach_dev; + pd->detach_dev = r9a06g032_detach_dev; + pm_genpd_init(pd, &pm_domain_always_on_gov, false); + + of_genpd_add_provider_simple(np, pd); + return 0; +} + static void r9a06g032_clk_gate_set(struct r9a06g032_priv *clocks, struct r9a06g032_gate *g, int on) @@ -871,8 +956,12 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev) if (error) return error; - return devm_add_action_or_reset(dev, + error = devm_add_action_or_reset(dev, r9a06g032_clocks_del_clk_provider, np); + if (error) + return error; + + return r9a06g032_add_clk_domain(dev); } static const struct of_device_id r9a06g032_match[] = { diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 0201809bbd37..52bbb9ce3807 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -112,14 +112,15 @@ static const u16 srcr[] = { * @dev: CPG/MSSR device * @base: CPG/MSSR register block base address * @rmw_lock: protects RMW register accesses - * @clks: Array containing all Core and Module Clocks + * @np: Device node in DT for this CPG/MSSR module * @num_core_clks: Number of Core Clocks in clks[] * @num_mod_clks: Number of Module Clocks in clks[] * @last_dt_core_clk: ID of the last Core Clock exported to DT + * @stbyctrl: This device has Standby Control Registers * @notifiers: Notifier chain to save/restore clock state for system resume * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control * @smstpcr_saved[].val: Saved values of SMSTPCR[] - * @stbyctrl: This device has Standby Control Registers + * @clks: Array containing all Core and Module Clocks */ struct cpg_mssr_priv { #ifdef CONFIG_RESET_CONTROLLER @@ -130,7 +131,6 @@ struct cpg_mssr_priv { spinlock_t rmw_lock; struct device_node *np; - struct clk **clks; unsigned int num_core_clks; unsigned int num_mod_clks; unsigned int last_dt_core_clk; @@ -141,6 +141,8 @@ struct cpg_mssr_priv { u32 mask; u32 val; } smstpcr_saved[ARRAY_SIZE(smstpcr)]; + + struct clk *clks[]; }; static struct cpg_mssr_priv *cpg_mssr_priv; @@ -447,9 +449,8 @@ fail: struct cpg_mssr_clk_domain { struct generic_pm_domain genpd; - struct device_node *np; unsigned int num_core_pm_clks; - unsigned int core_pm_clks[0]; + unsigned int core_pm_clks[]; }; static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain; @@ -459,7 +460,7 @@ static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec, { unsigned int i; - if (clkspec->np != pd->np || clkspec->args_count != 2) + if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2) return false; switch (clkspec->args[0]) { @@ -510,16 +511,12 @@ found: return PTR_ERR(clk); error = pm_clk_create(dev); - if (error) { - dev_err(dev, "pm_clk_create failed %d\n", error); + if (error) goto fail_put; - } error = pm_clk_add_clk(dev, clk); - if (error) { - dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error); + if (error) goto fail_destroy; - } return 0; @@ -549,7 +546,6 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev, if (!pd) return -ENOMEM; - pd->np = np; pd->num_core_pm_clks = num_core_pm_clks; memcpy(pd->core_pm_clks, core_pm_clks, pm_size); @@ -896,7 +892,6 @@ static int __init cpg_mssr_common_init(struct device *dev, const struct cpg_mssr_info *info) { struct cpg_mssr_priv *priv; - struct clk **clks = NULL; unsigned int nclks, i; int error; @@ -906,7 +901,8 @@ static int __init cpg_mssr_common_init(struct device *dev, return error; } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); + nclks = info->num_total_core_clks + info->num_hw_mod_clks; + priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -920,15 +916,7 @@ static int __init cpg_mssr_common_init(struct device *dev, goto out_err; } - nclks = info->num_total_core_clks + info->num_hw_mod_clks; - clks = kmalloc_array(nclks, sizeof(*clks), GFP_KERNEL); - if (!clks) { - error = -ENOMEM; - goto out_err; - } - cpg_mssr_priv = priv; - priv->clks = clks; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; priv->last_dt_core_clk = info->last_dt_core_clk; @@ -936,7 +924,7 @@ static int __init cpg_mssr_common_init(struct device *dev, priv->stbyctrl = info->stbyctrl; for (i = 0; i < nclks; i++) - clks[i] = ERR_PTR(-ENOENT); + priv->clks[i] = ERR_PTR(-ENOENT); error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv); if (error) @@ -945,7 +933,6 @@ static int __init cpg_mssr_common_init(struct device *dev, return 0; out_err: - kfree(clks); if (priv->base) iounmap(priv->base); kfree(priv); diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index c61f4d3e52e2..4abe7ff31f53 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -46,29 +46,27 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, static int rockchip_mmc_get_phase(struct clk_hw *hw) { struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); - unsigned long rate = clk_get_rate(hw->clk); + unsigned long rate = clk_hw_get_rate(hw); u32 raw_value; u16 degrees; u32 delay_num = 0; /* See the comment for rockchip_mmc_set_phase below */ - if (!rate) { - pr_err("%s: invalid clk rate\n", __func__); + if (!rate) return -EINVAL; - } raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { - /* degrees/delaynum * 10000 */ + /* degrees/delaynum * 1000000 */ unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * - 36 * (rate / 1000000); + 36 * (rate / 10000); delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; - degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000); + degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000); } return degrees % 360; @@ -77,7 +75,7 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) { struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); - unsigned long rate = clk_get_rate(hw->clk); + unsigned long rate = clk_hw_get_rate(hw); u8 nineties, remainder; u8 delay_num; u32 raw_value; diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c index bb39a799bdb5..3a501896b280 100644 --- a/drivers/clk/rockchip/clk-px30.c +++ b/drivers/clk/rockchip/clk-px30.c @@ -794,6 +794,9 @@ static struct rockchip_clk_branch px30_clk_branches[] __initdata = { GATE(ACLK_GIC, "aclk_gic", "aclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 12, GFLAGS), GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_pre", 0, PX30_CLKGATE_CON(13), 15, GFLAGS), + /* aclk_dmac is controlled by sgrf_soc_con1[11]. */ + SGRF_GATE(ACLK_DMAC, "aclk_dmac", "aclk_bus_pre"), + GATE(0, "hclk_bus_niu", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 9, GFLAGS), GATE(0, "hclk_rom", "hclk_bus_pre", CLK_IGNORE_UNUSED, PX30_CLKGATE_CON(13), 14, GFLAGS), GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_pre", 0, PX30_CLKGATE_CON(14), 1, GFLAGS), @@ -957,7 +960,6 @@ static void __init px30_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; - struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -972,14 +974,6 @@ static void __init px30_clk_init(struct device_node *np) return; } - /* aclk_dmac is controlled by sgrf_soc_con1[11]. */ - clk = clk_register_fixed_factor(NULL, "aclk_dmac", "aclk_bus_pre", 0, 1, 1); - if (IS_ERR(clk)) - pr_warn("%s: could not register clock aclk_dmac: %ld\n", - __func__, PTR_ERR(clk)); - else - rockchip_clk_add_lookup(ctx, clk, ACLK_DMAC); - rockchip_clk_register_plls(ctx, px30_pll_clks, ARRAY_SIZE(px30_pll_clks), PX30_GRF_SOC_STATUS0); diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index bdb126321e56..d17cfb7a3ff4 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -101,6 +101,7 @@ static struct rockchip_cpuclk_rate_table rk3228_cpuclk_rates[] __initdata = { RK3228_CPUCLK_RATE(1608000000, 1, 7), RK3228_CPUCLK_RATE(1512000000, 1, 7), RK3228_CPUCLK_RATE(1488000000, 1, 5), + RK3228_CPUCLK_RATE(1464000000, 1, 5), RK3228_CPUCLK_RATE(1416000000, 1, 5), RK3228_CPUCLK_RATE(1392000000, 1, 5), RK3228_CPUCLK_RATE(1296000000, 1, 5), @@ -246,7 +247,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = { RK2928_CLKGATE_CON(4), 0, GFLAGS), /* PD_MISC */ - MUX(0, "hdmiphy", mux_hdmiphy_p, CLK_SET_RATE_PARENT, + MUX(SCLK_HDMI_PHY, "hdmiphy", mux_hdmiphy_p, CLK_SET_RATE_PARENT, RK2928_MISC_CON, 13, 1, MFLAGS), MUX(0, "usb480m_phy", mux_usb480m_phy_p, CLK_SET_RATE_PARENT, RK2928_MISC_CON, 14, 1, MFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 057629685ea1..cc2a177bbdbf 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -113,7 +113,6 @@ static struct rockchip_pll_rate_table rk3288_pll_rates[] = { RK3066_PLL_RATE( 160000000, 1, 80, 12), RK3066_PLL_RATE( 157500000, 1, 105, 16), RK3066_PLL_RATE( 126000000, 1, 84, 16), - RK3066_PLL_RATE( 48000000, 1, 64, 32), { /* sentinel */ }, }; @@ -767,6 +766,9 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS), GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS), + /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */ + SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_pd_alive"), + /* pclk_pd_pmu gates */ GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS), GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS), @@ -915,7 +917,6 @@ static struct syscore_ops rk3288_clk_syscore_ops = { static void __init rk3288_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; - struct clk *clk; rk3288_cru_base = of_iomap(np, 0); if (!rk3288_cru_base) { @@ -930,14 +931,6 @@ static void __init rk3288_clk_init(struct device_node *np) return; } - /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */ - clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1); - if (IS_ERR(clk)) - pr_warn("%s: could not register clock pclk_wdt: %ld\n", - __func__, PTR_ERR(clk)); - else - rockchip_clk_add_lookup(ctx, clk, PCLK_WDT); - rockchip_clk_register_plls(ctx, rk3288_pll_clks, ARRAY_SIZE(rk3288_pll_clks), RK3288_GRF_SOC_STATUS1); diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 076b9777a955..c186a1985bf4 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -791,6 +791,9 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { GATE(PCLK_SARADC, "pclk_saradc", "pclk_bus", 0, RK3328_CLKGATE_CON(17), 15, GFLAGS), GATE(0, "pclk_pmu", "pclk_bus", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(28), 3, GFLAGS), + /* Watchdog pclk is controlled from the secure GRF */ + SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_bus"), + GATE(PCLK_USB3PHY_OTG, "pclk_usb3phy_otg", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(28), 1, GFLAGS), GATE(PCLK_USB3PHY_PIPE, "pclk_usb3phy_pipe", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(28), 2, GFLAGS), GATE(PCLK_USB3_GRF, "pclk_usb3_grf", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 2, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c index 43b022d9393b..55443349439b 100644 --- a/drivers/clk/rockchip/clk-rk3368.c +++ b/drivers/clk/rockchip/clk-rk3368.c @@ -811,6 +811,9 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = { GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS), GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS), + /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */ + SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_pd_alive"), + /* * pclk_vio gates * pclk_vio comes from the exactly same source as hclk_vio @@ -862,7 +865,6 @@ static void __init rk3368_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; - struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -877,14 +879,6 @@ static void __init rk3368_clk_init(struct device_node *np) return; } - /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */ - clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1); - if (IS_ERR(clk)) - pr_warn("%s: could not register clock pclk_wdt: %ld\n", - __func__, PTR_ERR(clk)); - else - rockchip_clk_add_lookup(ctx, clk, PCLK_WDT); - rockchip_clk_register_plls(ctx, rk3368_pll_clks, ARRAY_SIZE(rk3368_pll_clks), RK3368_GRF_SOC_STATUS0); diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c index a7ff71313278..ce1d2446f142 100644 --- a/drivers/clk/rockchip/clk-rk3399.c +++ b/drivers/clk/rockchip/clk-rk3399.c @@ -1295,6 +1295,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = { GATE(PCLK_PMU_INTR_ARB, "pclk_pmu_intr_arb", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 9, GFLAGS), GATE(PCLK_SGRF, "pclk_sgrf", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 10, GFLAGS), + /* Watchdog pclk is controlled by RK3399 SECURE_GRF_SOC_CON3[8]. */ + SGRF_GATE(PCLK_WDT, "pclk_wdt", "pclk_alive"), + GATE(SCLK_MIPIDPHY_REF, "clk_mipidphy_ref", "xin24m", 0, RK3399_CLKGATE_CON(11), 14, GFLAGS), GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 0, GFLAGS), @@ -1522,7 +1525,6 @@ static void __init rk3399_clk_init(struct device_node *np) { struct rockchip_clk_provider *ctx; void __iomem *reg_base; - struct clk *clk; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -1537,14 +1539,6 @@ static void __init rk3399_clk_init(struct device_node *np) return; } - /* Watchdog pclk is controlled by RK3399 SECURE_GRF_SOC_CON3[8]. */ - clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_alive", 0, 1, 1); - if (IS_ERR(clk)) - pr_warn("%s: could not register clock pclk_wdt: %ld\n", - __func__, PTR_ERR(clk)); - else - rockchip_clk_add_lookup(ctx, clk, PCLK_WDT); - rockchip_clk_register_plls(ctx, rk3399_pll_clks, ARRAY_SIZE(rk3399_pll_clks), -1); diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index adb66cc94929..b811597a3d38 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -811,6 +811,10 @@ struct rockchip_clk_branch { .gate_offset = -1, \ } +/* SGRF clocks are only accessible from secure mode, so not controllable */ +#define SGRF_GATE(_id, cname, pname) \ + FACTOR(_id, cname, pname, 0, 1, 1) + struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np, void __iomem *base, unsigned long nr_clks); void rockchip_clk_of_add_provider(struct device_node *np, diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 982eb02bafda..51564fc23c63 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -958,6 +958,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { /* list of gate clocks supported in exynos4x12 soc */ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { + GATE(CLK_ASYNC_G3D, "async_g3d", "aclk200", GATE_IP_LEFTBUS, 6, 0, 0), GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0), GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0), GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0), diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 12d800fd9528..01bca5a498b2 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -131,6 +131,8 @@ #define SRC_CDREX 0x20200 #define DIV_CDREX0 0x20500 #define DIV_CDREX1 0x20504 +#define GATE_BUS_CDREX0 0x20700 +#define GATE_BUS_CDREX1 0x20704 #define KPLL_LOCK 0x28000 #define KPLL_CON0 0x28100 #define SRC_KFC 0x28200 @@ -245,6 +247,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = { DIV_CDREX1, SRC_KFC, DIV_KFC0, + GATE_BUS_CDREX0, + GATE_BUS_CDREX1, }; static const unsigned long exynos5800_clk_regs[] __initconst = { @@ -422,6 +426,9 @@ PNAME(mout_group13_5800_p) = { "dout_osc_div", "mout_sw_aclkfl1_550_cam" }; PNAME(mout_group14_5800_p) = { "dout_aclk550_cam", "dout_sclk_sw" }; PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" }; PNAME(mout_group16_5800_p) = { "dout_osc_div", "mout_mau_epll_clk" }; +PNAME(mout_mx_mspll_ccore_phy_p) = { "sclk_bpll", "mout_sclk_dpll", + "mout_sclk_mpll", "ff_dout_spll2", + "mout_sclk_spll", "mout_sclk_epll"}; /* fixed rate clocks generated outside the soc */ static struct samsung_fixed_rate_clock @@ -447,7 +454,7 @@ static const struct samsung_fixed_factor_clock static const struct samsung_fixed_factor_clock exynos5800_fixed_factor_clks[] __initconst = { FFACTOR(0, "ff_dout_epll2", "mout_sclk_epll", 1, 2, 0), - FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0), + FFACTOR(CLK_FF_DOUT_SPLL2, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0), }; static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = { @@ -469,11 +476,14 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = { MUX(0, "mout_aclk300_disp1", mout_group5_5800_p, SRC_TOP2, 24, 2), MUX(0, "mout_aclk300_gscl", mout_group5_5800_p, SRC_TOP2, 28, 2), + MUX(CLK_MOUT_MX_MSPLL_CCORE_PHY, "mout_mx_mspll_ccore_phy", + mout_mx_mspll_ccore_phy_p, SRC_TOP7, 0, 3), + MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore", - mout_mx_mspll_ccore_p, SRC_TOP7, 16, 2), + mout_mx_mspll_ccore_p, SRC_TOP7, 16, 3), MUX_F(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, SRC_TOP7, 20, 2, CLK_SET_RATE_PARENT, 0), - MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1), + MUX(CLK_SCLK_BPLL, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1), MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1), MUX(0, "mout_aclk550_cam", mout_group3_5800_p, SRC_TOP8, 16, 3), @@ -645,7 +655,7 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { MUX(0, "mout_sclk_mpll", mout_mpll_p, SRC_TOP6, 0, 1), MUX(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1), - MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1), + MUX(CLK_MOUT_SCLK_SPLL, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1), MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1), MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1), MUX_F(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1, @@ -803,8 +813,21 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = { "mout_aclk400_disp1", DIV_TOP2, 4, 3), /* CDREX Block */ - DIV(CLK_DOUT_PCLK_CDREX, "dout_pclk_cdrex", "dout_aclk_cdrex1", - DIV_CDREX0, 28, 3), + /* + * The three clocks below are controlled using the same register and + * bits. They are put into one because there is a need of + * synchronization between the BUS and DREXs (two external memory + * interfaces). + * They are put here to show this HW assumption and for clock + * information summary completeness. + */ + DIV_F(CLK_DOUT_PCLK_CDREX, "dout_pclk_cdrex", "dout_aclk_cdrex1", + DIV_CDREX0, 28, 3, CLK_GET_RATE_NOCACHE, 0), + DIV_F(CLK_DOUT_PCLK_DREX0, "dout_pclk_drex0", "dout_cclk_drex0", + DIV_CDREX0, 28, 3, CLK_GET_RATE_NOCACHE, 0), + DIV_F(CLK_DOUT_PCLK_DREX1, "dout_pclk_drex1", "dout_cclk_drex0", + DIV_CDREX0, 28, 3, CLK_GET_RATE_NOCACHE, 0), + DIV_F(CLK_DOUT_SCLK_CDREX, "dout_sclk_cdrex", "mout_mclk_cdrex", DIV_CDREX0, 24, 3, CLK_SET_RATE_PARENT, 0), DIV(CLK_DOUT_ACLK_CDREX1, "dout_aclk_cdrex1", "dout_clk2x_phy0", @@ -1167,6 +1190,32 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = { GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0), GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0), + + /* CDREX */ + GATE(CLK_CLKM_PHY0, "clkm_phy0", "dout_sclk_cdrex", + GATE_BUS_CDREX0, 0, 0, 0), + GATE(CLK_CLKM_PHY1, "clkm_phy1", "dout_sclk_cdrex", + GATE_BUS_CDREX0, 1, 0, 0), + GATE(0, "mx_mspll_ccore_phy", "mout_mx_mspll_ccore_phy", + SRC_MASK_TOP7, 0, CLK_IGNORE_UNUSED, 0), + + GATE(CLK_ACLK_PPMU_DREX1_1, "aclk_ppmu_drex1_1", "dout_aclk_cdrex1", + GATE_BUS_CDREX1, 12, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ACLK_PPMU_DREX1_0, "aclk_ppmu_drex1_0", "dout_aclk_cdrex1", + GATE_BUS_CDREX1, 13, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ACLK_PPMU_DREX0_1, "aclk_ppmu_drex0_1", "dout_aclk_cdrex1", + GATE_BUS_CDREX1, 14, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ACLK_PPMU_DREX0_0, "aclk_ppmu_drex0_0", "dout_aclk_cdrex1", + GATE_BUS_CDREX1, 15, CLK_IGNORE_UNUSED, 0), + + GATE(CLK_PCLK_PPMU_DREX1_1, "pclk_ppmu_drex1_1", "dout_pclk_cdrex", + GATE_BUS_CDREX1, 26, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PCLK_PPMU_DREX1_0, "pclk_ppmu_drex1_0", "dout_pclk_cdrex", + GATE_BUS_CDREX1, 27, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PCLK_PPMU_DREX0_1, "pclk_ppmu_drex0_1", "dout_pclk_cdrex", + GATE_BUS_CDREX1, 28, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PCLK_PPMU_DREX0_0, "pclk_ppmu_drex0_0", "dout_pclk_cdrex", + GATE_BUS_CDREX1, 29, CLK_IGNORE_UNUSED, 0), }; static const struct samsung_div_clock exynos5x_disp_div_clks[] __initconst = { @@ -1282,6 +1331,17 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini PLL_35XX_RATE(24 * MHZ, 200000000, 200, 3, 3), }; +static const struct samsung_pll_rate_table exynos5422_bpll_rate_table[] = { + PLL_35XX_RATE(24 * MHZ, 825000000, 275, 4, 1), + PLL_35XX_RATE(24 * MHZ, 728000000, 182, 3, 1), + PLL_35XX_RATE(24 * MHZ, 633000000, 211, 4, 1), + PLL_35XX_RATE(24 * MHZ, 543000000, 181, 2, 2), + PLL_35XX_RATE(24 * MHZ, 413000000, 413, 6, 2), + PLL_35XX_RATE(24 * MHZ, 275000000, 275, 3, 3), + PLL_35XX_RATE(24 * MHZ, 206000000, 206, 3, 3), + PLL_35XX_RATE(24 * MHZ, 165000000, 110, 2, 3), +}; + static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { PLL_36XX_RATE(24 * MHZ, 600000000U, 100, 2, 1, 0), PLL_36XX_RATE(24 * MHZ, 400000000U, 200, 3, 2, 0), @@ -1424,9 +1484,13 @@ static void __init exynos5x_clk_init(struct device_node *np, exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl; exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl; exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl; - exynos5x_plls[bpll].rate_table = exynos5420_pll2550x_24mhz_tbl; } + if (soc == EXYNOS5420) + exynos5x_plls[bpll].rate_table = exynos5420_pll2550x_24mhz_tbl; + else + exynos5x_plls[bpll].rate_table = exynos5422_bpll_rate_table; + samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls), reg_base); samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks, diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 945d5f2ad733..7824c2ba3d8e 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -5587,8 +5587,8 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev) data->nr_clk_save = info->nr_clk_regs; data->clk_suspend = info->suspend_regs; data->nr_clk_suspend = info->nr_suspend_regs; - data->nr_pclks = of_count_phandle_with_args(dev->of_node, "clocks", - "#clock-cells"); + data->nr_pclks = of_clk_get_parent_count(dev->of_node); + if (data->nr_pclks > 0) { data->pclks = devm_kcalloc(dev, sizeof(struct clk *), data->nr_pclks, GFP_KERNEL); diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5bed36e12951..993f3a73c71e 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -161,8 +161,12 @@ static const struct stratix10_gate_clock s10_gate_clks[] = { 8, 0, 0, 0, 0, 0, 0}, { STRATIX10_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0xA4, 9, 0, 0, 0, 0, 0, 0}, - { STRATIX10_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0xA4, + { STRATIX10_NAND_X_CLK, "nand_x_clk", "l4_mp_clk", NULL, 1, 0, 0xA4, 10, 0, 0, 0, 0, 0, 0}, + { STRATIX10_NAND_CLK, "nand_clk", "nand_x_clk", NULL, 1, 0, 0xA4, + 10, 0, 0, 0, 0, 0, 4}, + { STRATIX10_NAND_ECC_CLK, "nand_ecc_clk", "nand_x_clk", NULL, 1, 0, 0xA4, + 10, 0, 0, 0, 0, 0, 4}, }; static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks, diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index e038b0447206..a5bdca1de5d0 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -42,6 +42,7 @@ int sprd_clk_regmap_init(struct platform_device *pdev, void __iomem *base; struct device_node *node = pdev->dev.of_node; struct regmap *regmap; + struct resource *res; if (of_find_property(node, "sprd,syscon", NULL)) { regmap = syscon_regmap_lookup_by_phandle(node, "sprd,syscon"); @@ -50,10 +51,14 @@ int sprd_clk_regmap_init(struct platform_device *pdev, return PTR_ERR(regmap); } } else { - base = of_iomap(node, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + regmap = devm_regmap_init_mmio(&pdev->dev, base, &sprdclk_regmap_config); - if (IS_ERR_OR_NULL(regmap)) { + if (IS_ERR(regmap)) { pr_err("failed to init regmap\n"); return PTR_ERR(regmap); } diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c index 9980ab55271b..f76305b4bc8d 100644 --- a/drivers/clk/sprd/sc9860-clk.c +++ b/drivers/clk/sprd/sc9860-clk.c @@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev) { const struct of_device_id *match; const struct sprd_clk_desc *desc; + int ret; match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node); if (!match) { @@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev) } desc = match->data; - sprd_clk_regmap_init(pdev, desc); + ret = sprd_clk_regmap_init(pdev, desc); + if (ret) + return ret; return sprd_clk_probe(&pdev->dev, desc->hw_clks); } diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index df43952e403e..f32366d9336e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c @@ -160,8 +160,9 @@ static struct ccu_nk pll_periph_base_clk = { }, }; -static CLK_FIXED_FACTOR(pll_periph_clk, "pll-periph", "pll-periph-base", - 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph_clk, "pll-periph", + &pll_periph_base_clk.common.hw, + 2, 1, CLK_SET_RATE_PARENT); /* Not documented on A10 */ static struct ccu_div pll_periph_sata_clk = { @@ -1028,19 +1029,29 @@ static struct ccu_common *sun4i_sun7i_ccu_clks[] = { &out_b_clk.common }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* Post-divider for pll-audio is hardcoded to 1 */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", - "pll-video0", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", - "pll-video1", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x", + &pll_video0_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video1_2x_clk, "pll-video1-2x", + &pll_video1_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); static struct clk_hw_onecell_data sun4i_a10_hw_clks = { diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 1786ee8fe8bb..49bd7a4c015c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -597,23 +597,34 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); /* Fixed Factor clocks */ -static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0); + +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", - "pll-periph0", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x", - "pll-periph1", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", - "pll-video0", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x", + &pll_periph0_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_periph1_2x_clk, "pll-periph1-2x", + &pll_periph1_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x", + &pll_video0_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); static struct ccu_common *sun50i_a64_ccu_clks[] = { &pll_cpux_clk.common, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 27554eaf6929..45a1ed3fe674 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -49,7 +49,7 @@ static struct ccu_div ar100_clk = { }, }; -static CLK_FIXED_FACTOR(r_ahb_clk, "r-ahb", "ar100", 1, 1, 0); +static CLK_FIXED_FACTOR_HW(r_ahb_clk, "r-ahb", &ar100_clk.common.hw, 1, 1, 0); static struct ccu_div r_apb1_clk = { .div = _SUNXI_CCU_DIV(0, 2), @@ -104,7 +104,7 @@ static SUNXI_CCU_GATE(r_apb2_i2c_clk, "r-apb2-i2c", "r-apb2", static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1", 0x1cc, BIT(0), 0); static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1", - 0x1cc, BIT(0), 0); + 0x1ec, BIT(0), 0); /* Information of IR(RX) mod clock is gathered from BSP source code */ static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" }; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index 9d3f98962779..aebef4af9861 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -622,8 +622,9 @@ static SUNXI_CCU_GATE(bus_xhci_clk, "bus-xhci", "ahb3", 0xa8c, BIT(5), 0); static SUNXI_CCU_GATE(bus_ehci3_clk, "bus-ehci3", "ahb3", 0xa8c, BIT(7), 0); static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb3", 0xa8c, BIT(8), 0); -static CLK_FIXED_FACTOR(pcie_ref_100m_clk, "pcie-ref-100M", - "pll-periph0-4x", 24, 1, 0); +static struct clk_fixed_factor pll_periph0_4x_clk; +static CLK_FIXED_FACTOR_HW(pcie_ref_100m_clk, "pcie-ref-100M", + &pll_periph0_4x_clk.hw, 24, 1, 0); static SUNXI_CCU_GATE(pcie_ref_clk, "pcie-ref", "pcie-ref-100M", 0xab0, BIT(31), 0); static SUNXI_CCU_GATE(pcie_ref_out_clk, "pcie-ref-out", "pcie-ref", @@ -745,34 +746,52 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdcp_clk, "hdcp", hdcp_parents, 0xc40, static SUNXI_CCU_GATE(bus_hdcp_clk, "bus-hdcp", "ahb3", 0xc4c, BIT(0), 0); /* Fixed factor clocks */ -static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0); + +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; /* * The divider of pll-audio is fixed to 8 now, as pll-audio-4x has a * fixed post-divider 2. */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 8, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); - -static CLK_FIXED_FACTOR(pll_periph0_4x_clk, "pll-periph0-4x", - "pll-periph0", 1, 4, 0); -static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", - "pll-periph0", 1, 2, 0); - -static CLK_FIXED_FACTOR(pll_periph1_4x_clk, "pll-periph1-4x", - "pll-periph1", 1, 4, 0); -static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x", - "pll-periph1", 1, 2, 0); - -static CLK_FIXED_FACTOR(pll_video0_4x_clk, "pll-video0-4x", - "pll-video0", 1, 4, CLK_SET_RATE_PARENT); - -static CLK_FIXED_FACTOR(pll_video1_4x_clk, "pll-video1-4x", - "pll-video1", 1, 4, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 8, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); + +static const struct clk_hw *pll_periph0_parents[] = { + &pll_periph0_clk.common.hw +}; +static CLK_FIXED_FACTOR_HWS(pll_periph0_4x_clk, "pll-periph0-4x", + pll_periph0_parents, + 1, 4, 0); +static CLK_FIXED_FACTOR_HWS(pll_periph0_2x_clk, "pll-periph0-2x", + pll_periph0_parents, + 1, 2, 0); + +static const struct clk_hw *pll_periph1_parents[] = { + &pll_periph1_clk.common.hw +}; +static CLK_FIXED_FACTOR_HWS(pll_periph1_4x_clk, "pll-periph1-4x", + pll_periph1_parents, + 1, 4, 0); +static CLK_FIXED_FACTOR_HWS(pll_periph1_2x_clk, "pll-periph1-2x", + pll_periph1_parents, + 1, 2, 0); + +static CLK_FIXED_FACTOR_HW(pll_video0_4x_clk, "pll-video0-4x", + &pll_video0_clk.common.hw, + 1, 4, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video1_4x_clk, "pll-video1-4x", + &pll_video1_clk.common.hw, + 1, 4, CLK_SET_RATE_PARENT); static struct ccu_common *sun50i_h6_ccu_clks[] = { &pll_cpux_clk.common, diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index b71ed0f6f785..b78e9b507c1c 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -603,19 +603,29 @@ static struct ccu_common *sun5i_a10s_ccu_clks[] = { &iep_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", - "pll-video0", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", - "pll-video1", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x", + &pll_video0_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video1_2x_clk, "pll-video1-2x", + &pll_video1_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); static struct clk_hw_onecell_data sun5i_a10s_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 2ff7b082df28..9b40d53266a3 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -955,21 +955,32 @@ static struct ccu_common *sun6i_a31_ccu_clks[] = { &out_c_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x", - "pll-periph", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", - "pll-video0", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", - "pll-video1", 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph_2x_clk, "pll-periph-2x", + &pll_periph_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x", + &pll_video0_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video1_2x_clk, "pll-video1-2x", + &pll_video1_clk.common.hw, + 1, 2, CLK_SET_RATE_PARENT); static struct clk_hw_onecell_data sun6i_a31_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c index 14ced502788a..103aa504f6c8 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c @@ -543,19 +543,29 @@ static struct ccu_common *sun8i_a23_ccu_clks[] = { &ats_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x", - "pll-periph", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x", - "pll-video", 1, 2, 0); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph_2x_clk, "pll-periph-2x", + &pll_periph_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video_2x_clk, "pll-video-2x", + &pll_video_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data sun8i_a23_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 61fb41f4903c..91838cd11037 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -580,19 +580,29 @@ static struct ccu_common *sun8i_a33_ccu_clks[] = { &ats_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph_2x_clk, "pll-periph-2x", - "pll-periph", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x", - "pll-video", 1, 2, 0); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph_2x_clk, "pll-periph-2x", + &pll_periph_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video_2x_clk, "pll-video-2x", + &pll_video_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data sun8i_a33_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c index 9601504905b2..6b636362379e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c @@ -717,17 +717,26 @@ static struct ccu_common *sun50i_h5_ccu_clks[] = { &gpu_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 1 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", - "pll-periph0", 1, 2, 0); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x", + &pll_periph0_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data sun8i_h3_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c index b5be11e5de0d..4646fdc61053 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c @@ -17,10 +17,13 @@ #include "ccu-sun8i-r.h" -static const char * const ar100_parents[] = { "osc32k", "osc24M", - "pll-periph0", "iosc" }; -static const char * const a83t_ar100_parents[] = { "osc16M-d512", "osc24M", - "pll-periph0", "iosc" }; +static const struct clk_parent_data ar100_parents[] = { + { .fw_name = "losc" }, + { .fw_name = "hosc" }, + { .fw_name = "pll-periph" }, + { .fw_name = "iosc" }, +}; + static const struct ccu_mux_var_prediv ar100_predivs[] = { { .index = 2, .shift = 8, .width = 5 }, }; @@ -39,64 +42,49 @@ static struct ccu_div ar100_clk = { .common = { .reg = 0x00, .features = CCU_FEATURE_VARIABLE_PREDIV, - .hw.init = CLK_HW_INIT_PARENTS("ar100", - ar100_parents, - &ccu_div_ops, - 0), - }, -}; - -static struct ccu_div a83t_ar100_clk = { - .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO), - - .mux = { - .shift = 16, - .width = 2, - - .var_predivs = ar100_predivs, - .n_var_predivs = ARRAY_SIZE(ar100_predivs), - }, - - .common = { - .reg = 0x00, - .features = CCU_FEATURE_VARIABLE_PREDIV, - .hw.init = CLK_HW_INIT_PARENTS("ar100", - a83t_ar100_parents, - &ccu_div_ops, - 0), + .hw.init = CLK_HW_INIT_PARENTS_DATA("ar100", + ar100_parents, + &ccu_div_ops, + 0), }, }; -static CLK_FIXED_FACTOR(ahb0_clk, "ahb0", "ar100", 1, 1, 0); +static CLK_FIXED_FACTOR_HW(ahb0_clk, "ahb0", &ar100_clk.common.hw, 1, 1, 0); static struct ccu_div apb0_clk = { .div = _SUNXI_CCU_DIV_FLAGS(0, 2, CLK_DIVIDER_POWER_OF_TWO), .common = { .reg = 0x0c, - .hw.init = CLK_HW_INIT("apb0", - "ahb0", - &ccu_div_ops, - 0), + .hw.init = CLK_HW_INIT_HW("apb0", + &ahb0_clk.hw, + &ccu_div_ops, + 0), }, }; static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0); -static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0", - 0x28, BIT(0), 0); -static SUNXI_CCU_GATE(apb0_ir_clk, "apb0-ir", "apb0", - 0x28, BIT(1), 0); -static SUNXI_CCU_GATE(apb0_timer_clk, "apb0-timer", "apb0", - 0x28, BIT(2), 0); -static SUNXI_CCU_GATE(apb0_rsb_clk, "apb0-rsb", "apb0", - 0x28, BIT(3), 0); -static SUNXI_CCU_GATE(apb0_uart_clk, "apb0-uart", "apb0", - 0x28, BIT(4), 0); -static SUNXI_CCU_GATE(apb0_i2c_clk, "apb0-i2c", "apb0", - 0x28, BIT(6), 0); -static SUNXI_CCU_GATE(apb0_twd_clk, "apb0-twd", "apb0", - 0x28, BIT(7), 0); +/* + * Define the parent as an array that can be reused to save space + * instead of having compound literals for each gate. Also have it + * non-const so we can change it on the A83T. + */ +static const struct clk_hw *apb0_gate_parent[] = { &apb0_clk.common.hw }; +static SUNXI_CCU_GATE_HWS(apb0_pio_clk, "apb0-pio", + apb0_gate_parent, 0x28, BIT(0), 0); +static SUNXI_CCU_GATE_HWS(apb0_ir_clk, "apb0-ir", + apb0_gate_parent, 0x28, BIT(1), 0); +static SUNXI_CCU_GATE_HWS(apb0_timer_clk, "apb0-timer", + apb0_gate_parent, 0x28, BIT(2), 0); +static SUNXI_CCU_GATE_HWS(apb0_rsb_clk, "apb0-rsb", + apb0_gate_parent, 0x28, BIT(3), 0); +static SUNXI_CCU_GATE_HWS(apb0_uart_clk, "apb0-uart", + apb0_gate_parent, 0x28, BIT(4), 0); +static SUNXI_CCU_GATE_HWS(apb0_i2c_clk, "apb0-i2c", + apb0_gate_parent, 0x28, BIT(6), 0); +static SUNXI_CCU_GATE_HWS(apb0_twd_clk, "apb0-twd", + apb0_gate_parent, 0x28, BIT(7), 0); static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" }; static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", @@ -107,7 +95,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir", BIT(31), /* gate */ 0); -static const char *const a83t_r_mod0_parents[] = { "osc16M", "osc24M" }; +static const struct clk_parent_data a83t_r_mod0_parents[] = { + { .fw_name = "iosc" }, + { .fw_name = "hosc" }, +}; static const struct ccu_mux_fixed_prediv a83t_ir_predivs[] = { { .index = 0, .div = 16 }, }; @@ -127,15 +118,15 @@ static struct ccu_mp a83t_ir_clk = { .common = { .reg = 0x54, .features = CCU_FEATURE_VARIABLE_PREDIV, - .hw.init = CLK_HW_INIT_PARENTS("ir", - a83t_r_mod0_parents, - &ccu_mp_ops, - 0), + .hw.init = CLK_HW_INIT_PARENTS_DATA("ir", + a83t_r_mod0_parents, + &ccu_mp_ops, + 0), }, }; static struct ccu_common *sun8i_a83t_r_ccu_clks[] = { - &a83t_ar100_clk.common, + &ar100_clk.common, &a83t_apb0_clk.common, &apb0_pio_clk.common, &apb0_ir_clk.common, @@ -174,7 +165,7 @@ static struct ccu_common *sun50i_a64_r_ccu_clks[] = { static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = { .hws = { - [CLK_AR100] = &a83t_ar100_clk.common.hw, + [CLK_AR100] = &ar100_clk.common.hw, [CLK_AHB0] = &ahb0_clk.hw, [CLK_APB0] = &a83t_apb0_clk.common.hw, [CLK_APB0_PIO] = &apb0_pio_clk.common.hw, @@ -291,6 +282,9 @@ static void __init sunxi_r_ccu_init(struct device_node *node, static void __init sun8i_a83t_r_ccu_setup(struct device_node *node) { + /* Fix apb0 bus gate parents here */ + apb0_gate_parent[0] = &a83t_apb0_clk.common.hw; + sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc); } CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c index 540f5f7454fc..897490800102 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c @@ -944,25 +944,37 @@ static struct ccu_common *sun8i_r40_ccu_clks[] = { }; /* Fixed Factor clocks */ -static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(osc12M_clk, "osc12M", "hosc", 2, 1, 0); + +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; /* We hardcode the divider to 4 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", - "pll-periph0", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_periph1_2x_clk, "pll-periph1-2x", - "pll-periph1", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video0_2x_clk, "pll-video0-2x", - "pll-video0", 1, 2, 0); -static CLK_FIXED_FACTOR(pll_video1_2x_clk, "pll-video1-2x", - "pll-video1", 1, 2, 0); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x", + &pll_periph0_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_periph1_2x_clk, "pll-periph1-2x", + &pll_periph1_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video0_2x_clk, "pll-video0-2x", + &pll_video0_clk.common.hw, + 1, 2, 0); +static CLK_FIXED_FACTOR_HW(pll_video1_2x_clk, "pll-video1-2x", + &pll_video1_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data sun8i_r40_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index cbbf06d42c2c..9b3939fc7faa 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -429,17 +429,26 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = { &mipi_csi_clk.common, }; +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + /* We hardcode the divider to 4 for now */ -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_periph0_2x_clk, "pll-periph0-2x", - "pll-periph0", 1, 2, 0); +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_periph0_2x_clk, "pll-periph0-2x", + &pll_periph0_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data sun8i_v3s_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c index 2f82cd855b0f..4b4a507d04ed 100644 --- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c +++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c @@ -14,18 +14,26 @@ #include "ccu-sun9i-a80-usb.h" -static SUNXI_CCU_GATE(bus_hci0_clk, "bus-hci0", "bus-usb", 0x0, BIT(1), 0); -static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M", 0x0, BIT(2), 0); -static SUNXI_CCU_GATE(bus_hci1_clk, "bus-hci1", "bus-usb", 0x0, BIT(3), 0); -static SUNXI_CCU_GATE(bus_hci2_clk, "bus-hci2", "bus-usb", 0x0, BIT(5), 0); -static SUNXI_CCU_GATE(usb_ohci2_clk, "usb-ohci2", "osc24M", 0x0, BIT(6), 0); - -static SUNXI_CCU_GATE(usb0_phy_clk, "usb0-phy", "osc24M", 0x4, BIT(1), 0); -static SUNXI_CCU_GATE(usb1_hsic_clk, "usb1-hsic", "osc24M", 0x4, BIT(2), 0); -static SUNXI_CCU_GATE(usb1_phy_clk, "usb1-phy", "osc24M", 0x4, BIT(3), 0); -static SUNXI_CCU_GATE(usb2_hsic_clk, "usb2-hsic", "osc24M", 0x4, BIT(4), 0); -static SUNXI_CCU_GATE(usb2_phy_clk, "usb2-phy", "osc24M", 0x4, BIT(5), 0); -static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "osc24M", 0x4, BIT(10), 0); +static const struct clk_parent_data clk_parent_hosc[] = { + { .fw_name = "hosc" }, +}; + +static const struct clk_parent_data clk_parent_bus[] = { + { .fw_name = "bus" }, +}; + +static SUNXI_CCU_GATE_DATA(bus_hci0_clk, "bus-hci0", clk_parent_bus, 0x0, BIT(1), 0); +static SUNXI_CCU_GATE_DATA(usb_ohci0_clk, "usb-ohci0", clk_parent_hosc, 0x0, BIT(2), 0); +static SUNXI_CCU_GATE_DATA(bus_hci1_clk, "bus-hci1", clk_parent_bus, 0x0, BIT(3), 0); +static SUNXI_CCU_GATE_DATA(bus_hci2_clk, "bus-hci2", clk_parent_bus, 0x0, BIT(5), 0); +static SUNXI_CCU_GATE_DATA(usb_ohci2_clk, "usb-ohci2", clk_parent_hosc, 0x0, BIT(6), 0); + +static SUNXI_CCU_GATE_DATA(usb0_phy_clk, "usb0-phy", clk_parent_hosc, 0x4, BIT(1), 0); +static SUNXI_CCU_GATE_DATA(usb1_hsic_clk, "usb1-hsic", clk_parent_hosc, 0x4, BIT(2), 0); +static SUNXI_CCU_GATE_DATA(usb1_phy_clk, "usb1-phy", clk_parent_hosc, 0x4, BIT(3), 0); +static SUNXI_CCU_GATE_DATA(usb2_hsic_clk, "usb2-hsic", clk_parent_hosc, 0x4, BIT(4), 0); +static SUNXI_CCU_GATE_DATA(usb2_phy_clk, "usb2-phy", clk_parent_hosc, 0x4, BIT(5), 0); +static SUNXI_CCU_GATE_DATA(usb_hsic_clk, "usb-hsic", clk_parent_hosc, 0x4, BIT(10), 0); static struct ccu_common *sun9i_a80_usb_clks[] = { &bus_hci0_clk.common, diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c index e748b8a6f3c5..7ecc3a5a5b5e 100644 --- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c +++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c @@ -374,16 +374,25 @@ static struct ccu_common *suniv_ccu_clks[] = { &avs_clk.common, }; -static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", - "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x", - "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x", - "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x", - "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT); -static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x", - "pll-video", 1, 2, 0); +static const struct clk_hw *clk_parent_pll_audio[] = { + &pll_audio_base_clk.common.hw +}; + +static CLK_FIXED_FACTOR_HWS(pll_audio_clk, "pll-audio", + clk_parent_pll_audio, + 4, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_2x_clk, "pll-audio-2x", + clk_parent_pll_audio, + 2, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_4x_clk, "pll-audio-4x", + clk_parent_pll_audio, + 1, 1, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HWS(pll_audio_8x_clk, "pll-audio-8x", + clk_parent_pll_audio, + 1, 2, CLK_SET_RATE_PARENT); +static CLK_FIXED_FACTOR_HW(pll_video_2x_clk, "pll-video-2x", + &pll_video_clk.common.hw, + 1, 2, 0); static struct clk_hw_onecell_data suniv_hw_clks = { .hws = { diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index c173778c8a78..7fe3ac980e5f 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -101,7 +101,7 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg, if (!hw) continue; - ret = clk_hw_register(NULL, hw); + ret = of_clk_hw_register(node, hw); if (ret) { pr_err("Couldn't register clock %d - %s\n", i, clk_hw_get_name(hw)); diff --git a/drivers/clk/sunxi-ng/ccu_gate.h b/drivers/clk/sunxi-ng/ccu_gate.h index da8100e8846d..c386689a952b 100644 --- a/drivers/clk/sunxi-ng/ccu_gate.h +++ b/drivers/clk/sunxi-ng/ccu_gate.h @@ -28,6 +28,59 @@ struct ccu_gate { } \ } +#define SUNXI_CCU_GATE_HW(_struct, _name, _parent, _reg, _gate, _flags) \ + struct ccu_gate _struct = { \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_HW(_name, \ + _parent, \ + &ccu_gate_ops, \ + _flags), \ + } \ + } + +#define SUNXI_CCU_GATE_FW(_struct, _name, _parent, _reg, _gate, _flags) \ + struct ccu_gate _struct = { \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_FW_NAME(_name, \ + _parent, \ + &ccu_gate_ops, \ + _flags), \ + } \ + } + +/* + * The following two macros allow the re-use of the data structure + * holding the parent info. + */ +#define SUNXI_CCU_GATE_HWS(_struct, _name, _parent, _reg, _gate, _flags) \ + struct ccu_gate _struct = { \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = CLK_HW_INIT_HWS(_name, \ + _parent, \ + &ccu_gate_ops, \ + _flags), \ + } \ + } + +#define SUNXI_CCU_GATE_DATA(_struct, _name, _data, _reg, _gate, _flags) \ + struct ccu_gate _struct = { \ + .enable = _gate, \ + .common = { \ + .reg = _reg, \ + .hw.init = \ + CLK_HW_INIT_PARENTS_DATA(_name, \ + _data, \ + &ccu_gate_ops, \ + _flags), \ + } \ + } + static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw) { struct ccu_common *common = hw_to_ccu_common(hw); diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 623fda5e911f..d3a43381a792 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -980,6 +980,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, if (endp) { derived_name = kstrndup(clk_name, endp - clk_name, GFP_KERNEL); + if (!derived_name) + return NULL; factors.name = derived_name; } else { factors.name = clk_name; diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index ac1d27a8c650..df172d5772d7 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -984,8 +984,6 @@ static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) pllre->params->defaults_set = true; if (val & PLL_ENABLE) { - pr_warn("PLL_RE already enabled. Postponing set full defaults\n"); - /* * PLL is ON: check if defaults already set, then set those * that can be updated in flight. @@ -1005,13 +1003,20 @@ static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) _pll_misc_chk_default(clk_base, pllre->params, 0, val, ~mask & PLLRE_MISC0_WRITE_MASK); - /* Enable lock detect */ + /* The PLL doesn't work if it's in IDDQ. */ val = readl_relaxed(clk_base + pllre->params->ext_misc_reg[0]); + if (val & PLLRE_MISC0_IDDQ) + pr_warn("unexpected IDDQ bit set for enabled clock\n"); + + /* Enable lock detect */ val &= ~mask; val |= PLLRE_MISC0_DEFAULT_VALUE & mask; writel_relaxed(val, clk_base + pllre->params->ext_misc_reg[0]); udelay(1); + if (!pllre->params->defaults_set) + pr_warn("PLL_RE already enabled. Postponing set full defaults\n"); + return; } @@ -2204,9 +2209,9 @@ static struct div_nmp pllu_nmp = { }; static struct tegra_clk_pll_freq_table pll_u_freq_table[] = { - { 12000000, 480000000, 40, 1, 0, 0 }, - { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */ - { 38400000, 480000000, 25, 2, 0, 0 }, + { 12000000, 480000000, 40, 1, 1, 0 }, + { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */ + { 38400000, 480000000, 25, 2, 1, 0 }, { 0, 0, 0, 0, 0, 0 }, }; @@ -3332,7 +3337,7 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, - { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, + { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, @@ -3357,7 +3362,6 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 }, { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 }, { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 }, - { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 }, { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 }, { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 }, diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 4786e0ebc2e8..6cb863c13648 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -425,91 +425,6 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, return 0; } -static const struct clk_div_table * -_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) -{ - const struct clk_div_table *table = NULL; - - ti_clk_parse_divider_data(setup->dividers, setup->num_dividers, - setup->max_div, setup->flags, width, - &table); - - return table; -} - -struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) -{ - struct clk_omap_divider *div; - struct clk_omap_reg *reg; - int ret; - - if (!setup) - return NULL; - - div = kzalloc(sizeof(*div), GFP_KERNEL); - if (!div) - return ERR_PTR(-ENOMEM); - - reg = (struct clk_omap_reg *)&div->reg; - reg->index = setup->module; - reg->offset = setup->reg; - - if (setup->flags & CLKF_INDEX_STARTS_AT_ONE) - div->flags |= CLK_DIVIDER_ONE_BASED; - - if (setup->flags & CLKF_INDEX_POWER_OF_TWO) - div->flags |= CLK_DIVIDER_POWER_OF_TWO; - - div->table = _get_div_table_from_setup(setup, &div->width); - if (IS_ERR(div->table)) { - ret = PTR_ERR(div->table); - kfree(div); - return ERR_PTR(ret); - } - - - div->shift = setup->bit_shift; - div->latch = -EINVAL; - - return &div->hw; -} - -struct clk *ti_clk_register_divider(struct ti_clk *setup) -{ - struct ti_clk_divider *div = setup->data; - struct clk_omap_reg reg = { - .index = div->module, - .offset = div->reg, - }; - u8 width; - u32 flags = 0; - u8 div_flags = 0; - const struct clk_div_table *table; - struct clk *clk; - - if (div->flags & CLKF_INDEX_STARTS_AT_ONE) - div_flags |= CLK_DIVIDER_ONE_BASED; - - if (div->flags & CLKF_INDEX_POWER_OF_TWO) - div_flags |= CLK_DIVIDER_POWER_OF_TWO; - - if (div->flags & CLKF_SET_RATE_PARENT) - flags |= CLK_SET_RATE_PARENT; - - table = _get_div_table_from_setup(div, &width); - if (IS_ERR(table)) - return (struct clk *)table; - - clk = _register_divider(NULL, setup->name, div->parent, - flags, ®, div->bit_shift, - width, -EINVAL, div_flags, table); - - if (IS_ERR(clk)) - kfree(table); - - return clk; -} - static struct clk_div_table * __init ti_clk_get_div_table(struct device_node *node) { diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index 504c0e91cdc7..42389558418c 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -131,36 +131,6 @@ static struct clk *_register_gate(struct device *dev, const char *name, return clk; } -struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup) -{ - struct clk_hw_omap *gate; - struct clk_omap_reg *reg; - const struct clk_hw_omap_ops *ops = &clkhwops_wait; - - if (!setup) - return NULL; - - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) - return ERR_PTR(-ENOMEM); - - reg = (struct clk_omap_reg *)&gate->enable_reg; - reg->index = setup->module; - reg->offset = setup->reg; - - gate->enable_bit = setup->bit_shift; - - if (setup->flags & CLKF_NO_WAIT) - ops = NULL; - - if (setup->flags & CLKF_INTERFACE) - ops = &clkhwops_iclk_wait; - - gate->ops = ops; - - return &gate->hw; -} - static void __init _of_ti_gate_clk_setup(struct device_node *node, const struct clk_ops *ops, const struct clk_hw_omap_ops *hw_ops) diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index b7f9a4f068bf..0069e7cf3ebc 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -164,37 +164,6 @@ static struct clk *_register_mux(struct device *dev, const char *name, return clk; } -struct clk *ti_clk_register_mux(struct ti_clk *setup) -{ - struct ti_clk_mux *mux; - u32 flags; - u8 mux_flags = 0; - struct clk_omap_reg reg; - u32 mask; - - mux = setup->data; - flags = CLK_SET_RATE_NO_REPARENT; - - mask = mux->num_parents; - if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE)) - mask--; - - mask = (1 << fls(mask)) - 1; - reg.index = mux->module; - reg.offset = mux->reg; - reg.ptr = NULL; - - if (mux->flags & CLKF_INDEX_STARTS_AT_ONE) - mux_flags |= CLK_MUX_INDEX_ONE; - - if (mux->flags & CLKF_SET_RATE_PARENT) - flags |= CLK_SET_RATE_PARENT; - - return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, - flags, ®, mux->bit_shift, mask, -EINVAL, - mux_flags, NULL); -} - /** * of_mux_clk_setup - Setup function for simple mux rate clock * @node: DT node for the clock diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h index c915889d1769..6ccca3b890d6 100644 --- a/drivers/dax/dax-private.h +++ b/drivers/dax/dax-private.h @@ -43,6 +43,7 @@ struct dax_region { * @target_node: effective numa node if dev_dax memory range is onlined * @dev - device core * @pgmap - pgmap for memmap setup / lifetime (driver owned) + * @dax_mem_res: physical address range of hotadded DAX memory */ struct dev_dax { struct dax_region *region; @@ -50,6 +51,7 @@ struct dev_dax { int target_node; struct device dev; struct dev_pagemap pgmap; + struct resource *dax_kmem_res; }; static inline struct dev_dax *to_dev_dax(struct device *dev) diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c index a02318c6d28a..3d0a7e702c94 100644 --- a/drivers/dax/kmem.c +++ b/drivers/dax/kmem.c @@ -66,23 +66,59 @@ int dev_dax_kmem_probe(struct device *dev) new_res->name = dev_name(dev); rc = add_memory(numa_node, new_res->start, resource_size(new_res)); - if (rc) + if (rc) { + release_resource(new_res); + kfree(new_res); return rc; + } + dev_dax->dax_kmem_res = new_res; return 0; } +#ifdef CONFIG_MEMORY_HOTREMOVE +static int dev_dax_kmem_remove(struct device *dev) +{ + struct dev_dax *dev_dax = to_dev_dax(dev); + struct resource *res = dev_dax->dax_kmem_res; + resource_size_t kmem_start = res->start; + resource_size_t kmem_size = resource_size(res); + int rc; + + /* + * We have one shot for removing memory, if some memory blocks were not + * offline prior to calling this function remove_memory() will fail, and + * there is no way to hotremove this memory until reboot because device + * unbind will succeed even if we return failure. + */ + rc = remove_memory(dev_dax->target_node, kmem_start, kmem_size); + if (rc) { + dev_err(dev, + "DAX region %pR cannot be hotremoved until the next reboot\n", + res); + return rc; + } + + /* Release and free dax resources */ + release_resource(res); + kfree(res); + dev_dax->dax_kmem_res = NULL; + + return 0; +} +#else static int dev_dax_kmem_remove(struct device *dev) { /* - * Purposely leak the request_mem_region() for the device-dax - * range and return '0' to ->remove() attempts. The removal of - * the device from the driver always succeeds, but the region - * is permanently pinned as reserved by the unreleased + * Without hotremove purposely leak the request_mem_region() for the + * device-dax range and return '0' to ->remove() attempts. The removal + * of the device from the driver always succeeds, but the region is + * permanently pinned as reserved by the unreleased * request_mem_region(). */ return 0; } +#endif /* CONFIG_MEMORY_HOTREMOVE */ static struct dax_device_driver device_dax_kmem_driver = { .drv = { diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 703275cc29de..03fa0c58cef3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -103,6 +103,7 @@ config AXI_DMAC depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + select REGMAP_MMIO help Enable support for the Analog Devices AXI-DMAC peripheral. This DMA controller is often used in Analog Device's reference designs for FPGA @@ -584,7 +585,7 @@ config TEGRA20_APB_DMA config TEGRA210_ADMA tristate "NVIDIA Tegra210 ADMA support" - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK + depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help @@ -666,6 +667,8 @@ source "drivers/dma/qcom/Kconfig" source "drivers/dma/dw/Kconfig" +source "drivers/dma/dw-edma/Kconfig" + source "drivers/dma/hsu/Kconfig" source "drivers/dma/sh/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6126e1c3a875..5bddf6f8790f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ +obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 464725dcad00..9adc7a2fa3d3 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2508,9 +2508,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), - S_IFREG | S_IRUGO, NULL, pl08x, - &pl08x_debugfs_fops); + debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, + NULL, pl08x, &pl08x_debugfs_fops); } #else diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 627ef3e5b312..b58ac720d9a1 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1568,11 +1568,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; + if (!list_empty(&atchan->xfers_list)) { + desc = list_first_entry(&atchan->xfers_list, + struct at_xdmac_desc, xfer_node); + txd = &desc->tx_dma_desc; - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); + } } static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index fa81d0177765..275e90fa829d 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c @@ -164,7 +164,6 @@ struct sba_device { struct list_head reqs_free_list; /* DebugFS directory entries */ struct dentry *root; - struct dentry *stats; }; /* ====== Command helper routines ===== */ @@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev) /* Create debugfs root entry */ sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); - if (IS_ERR_OR_NULL(sba->root)) { - dev_err(sba->dev, "failed to create debugfs root entry\n"); - sba->root = NULL; - goto skip_debugfs; - } /* Create debugfs stats entry */ - sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, - sba_debugfs_stats_show); - if (IS_ERR_OR_NULL(sba->stats)) - dev_err(sba->dev, "failed to create debugfs stats file\n"); + debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, + sba_debugfs_stats_show); + skip_debugfs: /* Register DMA device with Linux async framework */ diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 547786ac342b..e51d836afcc7 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void) dma_dentry = debugfs_create_dir("dma", NULL); - (void) debugfs_create_file("status", - S_IFREG | S_IRUGO, - dma_dentry, NULL, - &coh901318_debugfs_status_operations); + debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL, + &coh901318_debugfs_status_operations); return 0; } diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 8a3f1043917b..a0ee404b736e 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -2,7 +2,7 @@ /* * Driver for the Analog Devices AXI-DMAC core * - * Copyright 2013-2015 Analog Devices Inc. + * Copyright 2013-2019 Analog Devices Inc. * Author: Lars-Peter Clausen <lars@metafoo.de> */ @@ -18,7 +18,9 @@ #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/slab.h> +#include <linux/fpga/adi-axi-common.h> #include <dt-bindings/dma/axi-dmac.h> @@ -62,6 +64,8 @@ #define AXI_DMAC_REG_STATUS 0x430 #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 +#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c +#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 #define AXI_DMAC_CTRL_ENABLE BIT(0) #define AXI_DMAC_CTRL_PAUSE BIT(1) @@ -70,6 +74,10 @@ #define AXI_DMAC_IRQ_EOT BIT(1) #define AXI_DMAC_FLAG_CYCLIC BIT(0) +#define AXI_DMAC_FLAG_LAST BIT(1) +#define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) + +#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) /* The maximum ID allocated by the hardware is 31 */ #define AXI_DMAC_SG_UNUSED 32U @@ -82,12 +90,14 @@ struct axi_dmac_sg { unsigned int dest_stride; unsigned int src_stride; unsigned int id; + unsigned int partial_len; bool schedule_when_free; }; struct axi_dmac_desc { struct virt_dma_desc vdesc; bool cyclic; + bool have_partial_xfer; unsigned int num_submitted; unsigned int num_completed; @@ -108,8 +118,10 @@ struct axi_dmac_chan { unsigned int dest_type; unsigned int max_length; - unsigned int align_mask; + unsigned int address_align_mask; + unsigned int length_align_mask; + bool hw_partial_xfer; bool hw_cyclic; bool hw_2d; }; @@ -167,14 +179,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) { if (len == 0) return false; - if ((len & chan->align_mask) != 0) /* Not aligned */ + if ((len & chan->length_align_mask) != 0) /* Not aligned */ return false; return true; } static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) { - if ((addr & chan->align_mask) != 0) /* Not aligned */ + if ((addr & chan->address_align_mask) != 0) /* Not aligned */ return false; return true; } @@ -210,11 +222,13 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) } desc->num_submitted++; - if (desc->num_submitted == desc->num_sgs) { + if (desc->num_submitted == desc->num_sgs || + desc->have_partial_xfer) { if (desc->cyclic) desc->num_submitted = 0; /* Start again */ else chan->next_desc = NULL; + flags |= AXI_DMAC_FLAG_LAST; } else { chan->next_desc = desc; } @@ -240,6 +254,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) desc->num_sgs == 1) flags |= AXI_DMAC_FLAG_CYCLIC; + if (chan->hw_partial_xfer) + flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); @@ -252,6 +269,83 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) struct axi_dmac_desc, vdesc.node); } +static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, + struct axi_dmac_sg *sg) +{ + if (chan->hw_2d) + return sg->x_len * sg->y_len; + else + return sg->x_len; +} + +static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) +{ + struct axi_dmac *dmac = chan_to_axi_dmac(chan); + struct axi_dmac_desc *desc; + struct axi_dmac_sg *sg; + u32 xfer_done, len, id, i; + bool found_sg; + + do { + len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); + id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); + + found_sg = false; + list_for_each_entry(desc, &chan->active_descs, vdesc.node) { + for (i = 0; i < desc->num_sgs; i++) { + sg = &desc->sg[i]; + if (sg->id == AXI_DMAC_SG_UNUSED) + continue; + if (sg->id == id) { + desc->have_partial_xfer = true; + sg->partial_len = len; + found_sg = true; + break; + } + } + if (found_sg) + break; + } + + if (found_sg) { + dev_dbg(dmac->dma_dev.dev, + "Found partial segment id=%u, len=%u\n", + id, len); + } else { + dev_warn(dmac->dma_dev.dev, + "Not found partial segment id=%u, len=%u\n", + id, len); + } + + /* Check if we have any more partial transfers */ + xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); + xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); + + } while (!xfer_done); +} + +static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, + struct axi_dmac_desc *active) +{ + struct dmaengine_result *rslt = &active->vdesc.tx_result; + unsigned int start = active->num_completed - 1; + struct axi_dmac_sg *sg; + unsigned int i, total; + + rslt->result = DMA_TRANS_NOERROR; + rslt->residue = 0; + + /* + * We get here if the last completed segment is partial, which + * means we can compute the residue from that segment onwards + */ + for (i = start; i < active->num_sgs; i++) { + sg = &active->sg[i]; + total = axi_dmac_total_sg_bytes(chan, sg); + rslt->residue += (total - sg->partial_len); + } +} + static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, unsigned int completed_transfers) { @@ -263,6 +357,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, if (!active) return false; + if (chan->hw_partial_xfer && + (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) + axi_dmac_dequeue_partial_xfers(chan); + do { sg = &active->sg[active->num_completed]; if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ @@ -276,10 +374,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, start_next = true; } + if (sg->partial_len) + axi_dmac_compute_residue(chan, active); + if (active->cyclic) vchan_cyclic_callback(&active->vdesc); - if (active->num_completed == active->num_sgs) { + if (active->num_completed == active->num_sgs || + sg->partial_len) { if (active->cyclic) { active->num_completed = 0; /* wrap around */ } else { @@ -391,7 +493,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, num_segments = DIV_ROUND_UP(period_len, chan->max_length); segment_size = DIV_ROUND_UP(period_len, num_segments); /* Take care of alignment */ - segment_size = ((segment_size - 1) | chan->align_mask) + 1; + segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; for (i = 0; i < num_periods; i++) { len = period_len; @@ -561,6 +663,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( desc->sg[0].y_len = 1; } + if (flags & DMA_CYCLIC) + desc->cyclic = true; + return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); } @@ -574,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); } +static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) +{ + switch (reg) { + case AXI_DMAC_REG_IRQ_MASK: + case AXI_DMAC_REG_IRQ_SOURCE: + case AXI_DMAC_REG_IRQ_PENDING: + case AXI_DMAC_REG_CTRL: + case AXI_DMAC_REG_TRANSFER_ID: + case AXI_DMAC_REG_START_TRANSFER: + case AXI_DMAC_REG_FLAGS: + case AXI_DMAC_REG_DEST_ADDRESS: + case AXI_DMAC_REG_SRC_ADDRESS: + case AXI_DMAC_REG_X_LENGTH: + case AXI_DMAC_REG_Y_LENGTH: + case AXI_DMAC_REG_DEST_STRIDE: + case AXI_DMAC_REG_SRC_STRIDE: + case AXI_DMAC_REG_TRANSFER_DONE: + case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: + case AXI_DMAC_REG_STATUS: + case AXI_DMAC_REG_CURRENT_SRC_ADDR: + case AXI_DMAC_REG_CURRENT_DEST_ADDR: + case AXI_DMAC_REG_PARTIAL_XFER_LEN: + case AXI_DMAC_REG_PARTIAL_XFER_ID: + return true; + default: + return false; + } +} + +static const struct regmap_config axi_dmac_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, + .readable_reg = axi_dmac_regmap_rdwr, + .writeable_reg = axi_dmac_regmap_rdwr, +}; + /* * The configuration stored in the devicetree matches the configuration * parameters of the peripheral instance and allows the driver to know which @@ -617,7 +760,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, return ret; chan->dest_width = val / 8; - chan->align_mask = max(chan->dest_width, chan->src_width) - 1; + chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) chan->direction = DMA_MEM_TO_MEM; @@ -631,9 +774,12 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, return 0; } -static void axi_dmac_detect_caps(struct axi_dmac *dmac) +static int axi_dmac_detect_caps(struct axi_dmac *dmac) { struct axi_dmac_chan *chan = &dmac->chan; + unsigned int version; + + version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) @@ -647,6 +793,35 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac) chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); if (chan->max_length != UINT_MAX) chan->max_length++; + + axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); + if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && + chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { + dev_err(dmac->dma_dev.dev, + "Destination memory-mapped interface not supported."); + return -ENODEV; + } + + axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); + if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && + chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { + dev_err(dmac->dma_dev.dev, + "Source memory-mapped interface not supported."); + return -ENODEV; + } + + if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) + chan->hw_partial_xfer = true; + + if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { + axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); + chan->length_align_mask = + axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); + } else { + chan->length_align_mask = chan->address_align_mask; + } + + return 0; } static int axi_dmac_probe(struct platform_device *pdev) @@ -722,7 +897,11 @@ static int axi_dmac_probe(struct platform_device *pdev) if (ret < 0) return ret; - axi_dmac_detect_caps(dmac); + ret = axi_dmac_detect_caps(dmac); + if (ret) + goto err_clk_disable; + + dma_dev->copy_align = (dmac->chan.address_align_mask + 1); axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); @@ -742,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); + devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + return 0; err_unregister_of: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 6b8c4c458e8a..7fe9309a876b 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -156,7 +156,6 @@ struct jz4780_dma_dev { }; struct jz4780_dma_filter_data { - struct device_node *of_node; uint32_t transfer_type; int channel; }; @@ -772,8 +771,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); struct jz4780_dma_filter_data *data = param; - if (jzdma->dma_device.dev->of_node != data->of_node) - return false; if (data->channel > -1) { if (data->channel != jzchan->id) @@ -797,7 +794,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 2) return NULL; - data.of_node = ofdma->of_node; data.transfer_type = dma_spec->args[0]; data.channel = dma_spec->args[1]; @@ -822,7 +818,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, return dma_get_slave_channel( &jzdma->chan[data.channel].vchan.chan); } else { - return dma_request_channel(mask, jz4780_dma_filter_fn, &data); + return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, + ofdma->of_node); } } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 58cbf9fd5a46..03ac4b96117c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -61,7 +61,7 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ /** - * dev_to_dma_chan - convert a device pointer to the its sysfs container object + * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node * * Must be called under dma_list_mutex @@ -629,11 +629,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); * @mask: capabilities that the channel must satisfy * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn + * @np: device node to look for DMA channels * * Returns pointer to appropriate DMA channel on success or NULL. */ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, - dma_filter_fn fn, void *fn_param) + dma_filter_fn fn, void *fn_param, + struct device_node *np) { struct dma_device *device, *_d; struct dma_chan *chan = NULL; @@ -641,6 +643,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, /* Find a channel */ mutex_lock(&dma_list_mutex); list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { + /* Finds a DMA controller with matching device node */ + if (np && device->dev->of_node && np != device->dev->of_node) + continue; + chan = find_candidate(device, mask, fn, fn_param); if (!IS_ERR(chan)) break; @@ -699,7 +705,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) chan = acpi_dma_request_slave_chan_by_name(dev, name); if (chan) { - /* Valid channel found or requester need to be deferred */ + /* Valid channel found or requester needs to be deferred */ if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) return chan; } @@ -757,7 +763,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) if (!mask) return ERR_PTR(-ENODEV); - chan = __dma_request_channel(mask, NULL, NULL); + chan = __dma_request_channel(mask, NULL, NULL, NULL); if (!chan) { mutex_lock(&dma_list_mutex); if (list_empty(&dma_device_list)) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d0ad46e916a6..3d22ae8dca72 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources, static int timeout = 3000; module_param(timeout, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " - "Pass -1 for infinite timeout"); + "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); static bool noverify; module_param(noverify, bool, S_IRUGO | S_IWUSR); @@ -94,7 +94,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default * @iterations: iterations before stopping test * @xor_sources: number of xor source buffers * @pq_sources: number of p+q source buffers - * @timeout: transfer timeout in msec, -1 for infinite timeout + * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) */ struct dmatest_params { unsigned int buf_size; @@ -105,7 +105,7 @@ struct dmatest_params { unsigned int iterations; unsigned int xor_sources; unsigned int pq_sources; - int timeout; + unsigned int timeout; bool noverify; bool norandom; int alignment; diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 000000000000..7ff17b2db6a1 --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 + +config DW_EDMA + tristate "Synopsys DesignWare eDMA controller driver" + depends on PCI && PCI_MSI + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the Synopsys DesignWare eDMA controller, normally + implemented on endpoints SoCs. + +config DW_EDMA_PCIE + tristate "Synopsys DesignWare eDMA PCIe driver" + depends on PCI && PCI_MSI + select DW_EDMA + help + Provides a glue-logic between the Synopsys DesignWare + eDMA controller and an endpoint PCIe device. This also serves + as a reference design to whom desires to use this IP. diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 000000000000..8d45c0d5689d --- /dev/null +++ b/drivers/dma/dw-edma/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DW_EDMA) += dw-edma.o +dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o +dw-edma-objs := dw-edma-core.o \ + dw-edma-v0-core.o $(dw-edma-y) +obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 000000000000..ff392c01bad1 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pm_runtime.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/dma/edma.h> +#include <linux/pci.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "../dmaengine.h" +#include "../virt-dma.h" + +static inline +struct device *dchan2dev(struct dma_chan *dchan) +{ + return &dchan->dev->device; +} + +static inline +struct device *chan2dev(struct dw_edma_chan *chan) +{ + return &chan->vc.chan.dev->device; +} + +static inline +struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct dw_edma_desc, vd); +} + +static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *burst; + + burst = kzalloc(sizeof(*burst), GFP_NOWAIT); + if (unlikely(!burst)) + return NULL; + + INIT_LIST_HEAD(&burst->list); + if (chunk->burst) { + /* Create and add new element into the linked list */ + chunk->bursts_alloc++; + list_add_tail(&burst->list, &chunk->burst->list); + } else { + /* List head */ + chunk->bursts_alloc = 0; + chunk->burst = burst; + } + + return burst; +} + +static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chan *chan = desc->chan; + struct dw_edma *dw = chan->chip->dw; + struct dw_edma_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); + if (unlikely(!chunk)) + return NULL; + + INIT_LIST_HEAD(&chunk->list); + chunk->chan = chan; + /* Toggling change bit (CB) in each chunk, this is a mechanism to + * inform the eDMA HW block that this is a new linked list ready + * to be consumed. + * - Odd chunks originate CB equal to 0 + * - Even chunks originate CB equal to 1 + */ + chunk->cb = !(desc->chunks_alloc % 2); + chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; + chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; + + if (desc->chunk) { + /* Create and add new element into the linked list */ + desc->chunks_alloc++; + list_add_tail(&chunk->list, &desc->chunk->list); + if (!dw_edma_alloc_burst(chunk)) { + kfree(chunk); + return NULL; + } + } else { + /* List head */ + chunk->burst = NULL; + desc->chunks_alloc = 0; + desc->chunk = chunk; + } + + return chunk; +} + +static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (unlikely(!desc)) + return NULL; + + desc->chan = chan; + if (!dw_edma_alloc_chunk(desc)) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void dw_edma_free_burst(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child, *_next; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { + list_del(&child->list); + kfree(child); + chunk->bursts_alloc--; + } + + /* Remove the list head */ + kfree(child); + chunk->burst = NULL; +} + +static void dw_edma_free_chunk(struct dw_edma_desc *desc) +{ + struct dw_edma_chunk *child, *_next; + + if (!desc->chunk) + return; + + /* Remove all the list elements */ + list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; + } + + /* Remove the list head */ + kfree(child); + desc->chunk = NULL; +} + +static void dw_edma_free_desc(struct dw_edma_desc *desc) +{ + dw_edma_free_chunk(desc); + kfree(desc); +} + +static void vchan_free_desc(struct virt_dma_desc *vdesc) +{ + dw_edma_free_desc(vd2dw_edma_desc(vdesc)); +} + +static void dw_edma_start_transfer(struct dw_edma_chan *chan) +{ + struct dw_edma_chunk *child; + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) + return; + + desc = vd2dw_edma_desc(vd); + if (!desc) + return; + + child = list_first_entry_or_null(&desc->chunk->list, + struct dw_edma_chunk, list); + if (!child) + return; + + dw_edma_v0_core_start(child, !desc->xfer_sz); + desc->xfer_sz += child->ll_region.sz; + dw_edma_free_burst(child); + list_del(&child->list); + kfree(child); + desc->chunks_alloc--; +} + +static int dw_edma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + memcpy(&chan->config, config, sizeof(*config)); + chan->configured = true; + + return 0; +} + +static int dw_edma_device_pause(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) + err = -EPERM; + else if (chan->status != EDMA_ST_BUSY) + err = -EPERM; + else if (chan->request != EDMA_REQ_NONE) + err = -EPERM; + else + chan->request = EDMA_REQ_PAUSE; + + return err; +} + +static int dw_edma_device_resume(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + + if (!chan->configured) { + err = -EPERM; + } else if (chan->status != EDMA_ST_PAUSE) { + err = -EPERM; + } else if (chan->request != EDMA_REQ_NONE) { + err = -EPERM; + } else { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + + return err; +} + +static int dw_edma_device_terminate_all(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int err = 0; + LIST_HEAD(head); + + if (!chan->configured) { + /* Do nothing */ + } else if (chan->status == EDMA_ST_PAUSE) { + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->status == EDMA_ST_IDLE) { + chan->configured = false; + } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { + /* + * The channel is in a false BUSY state, probably didn't + * receive or lost an interrupt + */ + chan->status = EDMA_ST_IDLE; + chan->configured = false; + } else if (chan->request > EDMA_REQ_PAUSE) { + err = -EPERM; + } else { + chan->request = EDMA_REQ_STOP; + } + + return err; +} + +static void dw_edma_device_issue_pending(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + if (chan->configured && chan->request == EDMA_REQ_NONE && + chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static enum dma_status +dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + enum dma_status ret; + u32 residue = 0; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) + ret = DMA_PAUSED; + + if (!txstate) + goto ret_residue; + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_find_desc(&chan->vc, cookie); + if (vd) { + desc = vd2dw_edma_desc(vd); + if (desc) + residue = desc->alloc_sz - desc->xfer_sz; + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + +ret_residue: + dma_set_residue(txstate, residue); + + return ret; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_transfer(struct dw_edma_transfer *xfer) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); + enum dma_transfer_direction direction = xfer->direction; + phys_addr_t src_addr, dst_addr; + struct scatterlist *sg = NULL; + struct dw_edma_chunk *chunk; + struct dw_edma_burst *burst; + struct dw_edma_desc *desc; + u32 cnt; + int i; + + if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || + (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) + return NULL; + + if (xfer->cyclic) { + if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) + return NULL; + } else { + if (xfer->xfer.sg.len < 1) + return NULL; + } + + if (!chan->configured) + return NULL; + + desc = dw_edma_alloc_desc(chan); + if (unlikely(!desc)) + goto err_alloc; + + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + + src_addr = chan->config.src_addr; + dst_addr = chan->config.dst_addr; + + if (xfer->cyclic) { + cnt = xfer->xfer.cyclic.cnt; + } else { + cnt = xfer->xfer.sg.len; + sg = xfer->xfer.sg.sgl; + } + + for (i = 0; i < cnt; i++) { + if (!xfer->cyclic && !sg) + break; + + if (chunk->bursts_alloc == chan->ll_max) { + chunk = dw_edma_alloc_chunk(desc); + if (unlikely(!chunk)) + goto err_alloc; + } + + burst = dw_edma_alloc_burst(chunk); + if (unlikely(!burst)) + goto err_alloc; + + if (xfer->cyclic) + burst->sz = xfer->xfer.cyclic.len; + else + burst->sz = sg_dma_len(sg); + + chunk->ll_region.sz += burst->sz; + desc->alloc_sz += burst->sz; + + if (direction == DMA_DEV_TO_MEM) { + burst->sar = src_addr; + if (xfer->cyclic) { + burst->dar = xfer->xfer.cyclic.paddr; + } else { + burst->dar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + src_addr += sg_dma_len(sg); + } + } else { + burst->dar = dst_addr; + if (xfer->cyclic) { + burst->sar = xfer->xfer.cyclic.paddr; + } else { + burst->sar = sg_dma_address(sg); + /* Unlike the typical assumption by other + * drivers/IPs the peripheral memory isn't + * a FIFO memory, in this case, it's a + * linear memory and that why the source + * and destination addresses are increased + * by the same portion (data length) + */ + dst_addr += sg_dma_len(sg); + } + } + + if (!xfer->cyclic) + sg = sg_next(sg); + } + + return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); + +err_alloc: + if (desc) + dw_edma_free_desc(desc); + + return NULL; +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, + unsigned int len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.sg.sgl = sgl; + xfer.xfer.sg.len = len; + xfer.flags = flags; + xfer.cyclic = false; + + return dw_edma_device_transfer(&xfer); +} + +static struct dma_async_tx_descriptor * +dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, + size_t len, size_t count, + enum dma_transfer_direction direction, + unsigned long flags) +{ + struct dw_edma_transfer xfer; + + xfer.dchan = dchan; + xfer.direction = direction; + xfer.xfer.cyclic.paddr = paddr; + xfer.xfer.cyclic.len = len; + xfer.xfer.cyclic.cnt = count; + xfer.flags = flags; + xfer.cyclic = true; + + return dw_edma_device_transfer(&xfer); +} + +static void dw_edma_done_interrupt(struct dw_edma_chan *chan) +{ + struct dw_edma_desc *desc; + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_done_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + switch (chan->request) { + case EDMA_REQ_NONE: + desc = vd2dw_edma_desc(vd); + if (desc->chunks_alloc) { + chan->status = EDMA_ST_BUSY; + dw_edma_start_transfer(chan); + } else { + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->status = EDMA_ST_IDLE; + } + break; + + case EDMA_REQ_STOP: + list_del(&vd->node); + vchan_cookie_complete(vd); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + break; + + case EDMA_REQ_PAUSE: + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_PAUSE; + break; + + default: + break; + } + } + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) +{ + struct virt_dma_desc *vd; + unsigned long flags; + + dw_edma_v0_core_clear_abort_int(chan); + + spin_lock_irqsave(&chan->vc.lock, flags); + vd = vchan_next_desc(&chan->vc); + if (vd) { + list_del(&vd->node); + vchan_cookie_complete(vd); + } + spin_unlock_irqrestore(&chan->vc.lock, flags); + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; +} + +static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) +{ + struct dw_edma_irq *dw_irq = data; + struct dw_edma *dw = dw_irq->dw; + unsigned long total, pos, val; + unsigned long off; + u32 mask; + + if (write) { + total = dw->wr_ch_cnt; + off = 0; + mask = dw_irq->wr_mask; + } else { + total = dw->rd_ch_cnt; + off = dw->wr_ch_cnt; + mask = dw_irq->rd_mask; + } + + val = dw_edma_v0_core_status_done_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_done_interrupt(chan); + } + + val = dw_edma_v0_core_status_abort_int(dw, write ? + EDMA_DIR_WRITE : + EDMA_DIR_READ); + val &= mask; + for_each_set_bit(pos, &val, total) { + struct dw_edma_chan *chan = &dw->chan[pos + off]; + + dw_edma_abort_interrupt(chan); + } + + return IRQ_HANDLED; +} + +static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, true); +} + +static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) +{ + return dw_edma_interrupt(irq, data, false); +} + +static irqreturn_t dw_edma_interrupt_common(int irq, void *data) +{ + dw_edma_interrupt(irq, data, true); + dw_edma_interrupt(irq, data, false); + + return IRQ_HANDLED; +} + +static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) +{ + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + + if (chan->status != EDMA_ST_IDLE) + return -EBUSY; + + pm_runtime_get(chan->chip->dev); + + return 0; +} + +static void dw_edma_free_chan_resources(struct dma_chan *dchan) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(5000); + struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); + int ret; + + while (time_before(jiffies, timeout)) { + ret = dw_edma_device_terminate_all(dchan); + if (!ret) + break; + + if (time_after_eq(jiffies, timeout)) + return; + + cpu_relax(); + } + + pm_runtime_put(chan->chip->dev); +} + +static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, + u32 wr_alloc, u32 rd_alloc) +{ + struct dw_edma_region *dt_region; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + struct dw_edma_chan *chan; + size_t ll_chunk, dt_chunk; + struct dw_edma_irq *irq; + struct dma_device *dma; + u32 i, j, cnt, ch_cnt; + u32 alloc, off_alloc; + int err = 0; + u32 pos; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + ll_chunk = dw->ll_region.sz; + dt_chunk = dw->dt_region.sz; + + /* Calculate linked list chunk for each channel */ + ll_chunk /= roundup_pow_of_two(ch_cnt); + + /* Calculate linked list chunk for each channel */ + dt_chunk /= roundup_pow_of_two(ch_cnt); + + if (write) { + i = 0; + cnt = dw->wr_ch_cnt; + dma = &dw->wr_edma; + alloc = wr_alloc; + off_alloc = 0; + } else { + i = dw->wr_ch_cnt; + cnt = dw->rd_ch_cnt; + dma = &dw->rd_edma; + alloc = rd_alloc; + off_alloc = wr_alloc; + } + + INIT_LIST_HEAD(&dma->channels); + for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { + chan = &dw->chan[i]; + + dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); + if (!dt_region) + return -ENOMEM; + + chan->vc.chan.private = dt_region; + + chan->chip = chip; + chan->id = j; + chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; + chan->configured = false; + chan->request = EDMA_REQ_NONE; + chan->status = EDMA_ST_IDLE; + + chan->ll_off = (ll_chunk * i); + chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; + + chan->dt_off = (dt_chunk * i); + + dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", + write ? "write" : "read", j, + chan->ll_off, chan->ll_max); + + if (dw->nr_irqs == 1) + pos = 0; + else + pos = off_alloc + (j % alloc); + + irq = &dw->irq[pos]; + + if (write) + irq->wr_mask |= BIT(j); + else + irq->rd_mask |= BIT(j); + + irq->dw = dw; + memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); + + dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", + write ? "write" : "read", j, + chan->msi.address_hi, chan->msi.address_lo, + chan->msi.data); + + chan->vc.desc_free = vchan_free_desc; + vchan_init(&chan->vc, dma); + + dt_region->paddr = dw->dt_region.paddr + chan->dt_off; + dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; + dt_region->sz = dt_chunk; + + dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", + write ? "write" : "read", j, chan->dt_off); + + dw_edma_v0_core_device_config(chan); + } + + /* Set DMA channel capabilities */ + dma_cap_zero(dma->cap_mask); + dma_cap_set(DMA_SLAVE, dma->cap_mask); + dma_cap_set(DMA_CYCLIC, dma->cap_mask); + dma_cap_set(DMA_PRIVATE, dma->cap_mask); + dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); + dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma->chancnt = cnt; + + /* Set DMA channel callbacks */ + dma->dev = chip->dev; + dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; + dma->device_free_chan_resources = dw_edma_free_chan_resources; + dma->device_config = dw_edma_device_config; + dma->device_pause = dw_edma_device_pause; + dma->device_resume = dw_edma_device_resume; + dma->device_terminate_all = dw_edma_device_terminate_all; + dma->device_issue_pending = dw_edma_device_issue_pending; + dma->device_tx_status = dw_edma_device_tx_status; + dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; + dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; + + dma_set_max_seg_size(dma->dev, U32_MAX); + + /* Register DMA device */ + err = dma_async_device_register(dma); + + return err; +} + +static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) +{ + if (*nr_irqs && *alloc < cnt) { + (*alloc)++; + (*nr_irqs)--; + } +} + +static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) +{ + while (*mask * alloc < cnt) + (*mask)++; +} + +static int dw_edma_irq_request(struct dw_edma_chip *chip, + u32 *wr_alloc, u32 *rd_alloc) +{ + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + u32 wr_mask = 1; + u32 rd_mask = 1; + int i, err = 0; + u32 ch_cnt; + + ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; + + if (dw->nr_irqs < 1) + return -EINVAL; + + if (dw->nr_irqs == 1) { + /* Common IRQ shared among all channels */ + err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), + dw_edma_interrupt_common, + IRQF_SHARED, dw->name, &dw->irq[0]); + if (err) { + dw->nr_irqs = 0; + return err; + } + + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), + &dw->irq[0].msi); + } else { + /* Distribute IRQs equally among all channels */ + int tmp = dw->nr_irqs; + + while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { + dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); + dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); + } + + dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); + dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); + + for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { + err = request_irq(pci_irq_vector(to_pci_dev(dev), i), + i < *wr_alloc ? + dw_edma_interrupt_write : + dw_edma_interrupt_read, + IRQF_SHARED, dw->name, + &dw->irq[i]); + if (err) { + dw->nr_irqs = i; + return err; + } + + get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), + &dw->irq[i].msi); + } + + dw->nr_irqs = i; + } + + return err; +} + +int dw_edma_probe(struct dw_edma_chip *chip) +{ + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + u32 wr_alloc = 0; + u32 rd_alloc = 0; + int i, err; + + raw_spin_lock_init(&dw->lock); + + /* Find out how many write channels are supported by hardware */ + dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); + if (!dw->wr_ch_cnt) + return -EINVAL; + + /* Find out how many read channels are supported by hardware */ + dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); + if (!dw->rd_ch_cnt) + return -EINVAL; + + dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", + dw->wr_ch_cnt, dw->rd_ch_cnt); + + /* Allocate channels */ + dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, + sizeof(*dw->chan), GFP_KERNEL); + if (!dw->chan) + return -ENOMEM; + + snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); + + /* Disable eDMA, only to establish the ideal initial conditions */ + dw_edma_v0_core_off(dw); + + /* Request IRQs */ + err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); + if (err) + return err; + + /* Setup write channels */ + err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Setup read channels */ + err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); + if (err) + goto err_irq_free; + + /* Power management */ + pm_runtime_enable(dev); + + /* Turn debugfs on */ + dw_edma_v0_core_debugfs_on(chip); + + return 0; + +err_irq_free: + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + + dw->nr_irqs = 0; + + return err; +} +EXPORT_SYMBOL_GPL(dw_edma_probe); + +int dw_edma_remove(struct dw_edma_chip *chip) +{ + struct dw_edma_chan *chan, *_chan; + struct device *dev = chip->dev; + struct dw_edma *dw = chip->dw; + int i; + + /* Disable eDMA */ + dw_edma_v0_core_off(dw); + + /* Free irqs */ + for (i = (dw->nr_irqs - 1); i >= 0; i--) + free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); + + /* Power management */ + pm_runtime_disable(dev); + + list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, + vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } + + list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, + vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); + tasklet_kill(&chan->vc.task); + } + + /* Deregister eDMA device */ + dma_async_device_unregister(&dw->wr_edma); + dma_async_device_unregister(&dw->rd_edma); + + /* Turn debugfs off */ + dw_edma_v0_core_debugfs_off(); + + return 0; +} +EXPORT_SYMBOL_GPL(dw_edma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 000000000000..b6cc90cbc9dc --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA core driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_CORE_H +#define _DW_EDMA_CORE_H + +#include <linux/msi.h> +#include <linux/dma/edma.h> + +#include "../virt-dma.h" + +#define EDMA_LL_SZ 24 + +enum dw_edma_dir { + EDMA_DIR_WRITE = 0, + EDMA_DIR_READ +}; + +enum dw_edma_mode { + EDMA_MODE_LEGACY = 0, + EDMA_MODE_UNROLL +}; + +enum dw_edma_request { + EDMA_REQ_NONE = 0, + EDMA_REQ_STOP, + EDMA_REQ_PAUSE +}; + +enum dw_edma_status { + EDMA_ST_IDLE = 0, + EDMA_ST_PAUSE, + EDMA_ST_BUSY +}; + +struct dw_edma_chan; +struct dw_edma_chunk; + +struct dw_edma_burst { + struct list_head list; + u64 sar; + u64 dar; + u32 sz; +}; + +struct dw_edma_region { + phys_addr_t paddr; + dma_addr_t vaddr; + size_t sz; +}; + +struct dw_edma_chunk { + struct list_head list; + struct dw_edma_chan *chan; + struct dw_edma_burst *burst; + + u32 bursts_alloc; + + u8 cb; + struct dw_edma_region ll_region; /* Linked list */ +}; + +struct dw_edma_desc { + struct virt_dma_desc vd; + struct dw_edma_chan *chan; + struct dw_edma_chunk *chunk; + + u32 chunks_alloc; + + u32 alloc_sz; + u32 xfer_sz; +}; + +struct dw_edma_chan { + struct virt_dma_chan vc; + struct dw_edma_chip *chip; + int id; + enum dw_edma_dir dir; + + off_t ll_off; + u32 ll_max; + + off_t dt_off; + + struct msi_msg msi; + + enum dw_edma_request request; + enum dw_edma_status status; + u8 configured; + + struct dma_slave_config config; +}; + +struct dw_edma_irq { + struct msi_msg msi; + u32 wr_mask; + u32 rd_mask; + struct dw_edma *dw; +}; + +struct dw_edma { + char name[20]; + + struct dma_device wr_edma; + u16 wr_ch_cnt; + + struct dma_device rd_edma; + u16 rd_ch_cnt; + + struct dw_edma_region rg_region; /* Registers */ + struct dw_edma_region ll_region; /* Linked list */ + struct dw_edma_region dt_region; /* Data */ + + struct dw_edma_irq *irq; + int nr_irqs; + + u32 version; + enum dw_edma_mode mode; + + struct dw_edma_chan *chan; + const struct dw_edma_core_ops *ops; + + raw_spinlock_t lock; /* Only for legacy */ +}; + +struct dw_edma_sg { + struct scatterlist *sgl; + unsigned int len; +}; + +struct dw_edma_cyclic { + dma_addr_t paddr; + size_t len; + size_t cnt; +}; + +struct dw_edma_transfer { + struct dma_chan *dchan; + union dw_edma_xfer { + struct dw_edma_sg sg; + struct dw_edma_cyclic cyclic; + } xfer; + enum dma_transfer_direction direction; + unsigned long flags; + bool cyclic; +}; + +static inline +struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct dw_edma_chan, vc); +} + +static inline +struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) +{ + return vc2dw_edma_chan(to_virt_chan(dchan)); +} + +#endif /* _DW_EDMA_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 000000000000..4c96e1c948f2 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA PCIe driver + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/dma/edma.h> +#include <linux/pci-epf.h> +#include <linux/msi.h> + +#include "dw-edma-core.h" + +struct dw_edma_pcie_data { + /* eDMA registers location */ + enum pci_barno rg_bar; + off_t rg_off; + size_t rg_sz; + /* eDMA memory linked list location */ + enum pci_barno ll_bar; + off_t ll_off; + size_t ll_sz; + /* eDMA memory data location */ + enum pci_barno dt_bar; + off_t dt_off; + size_t dt_sz; + /* Other */ + u32 version; + enum dw_edma_mode mode; + u8 irqs; +}; + +static const struct dw_edma_pcie_data snps_edda_data = { + /* eDMA registers location */ + .rg_bar = BAR_0, + .rg_off = 0x00001000, /* 4 Kbytes */ + .rg_sz = 0x00002000, /* 8 Kbytes */ + /* eDMA memory linked list location */ + .ll_bar = BAR_2, + .ll_off = 0x00000000, /* 0 Kbytes */ + .ll_sz = 0x00800000, /* 8 Mbytes */ + /* eDMA memory data location */ + .dt_bar = BAR_2, + .dt_off = 0x00800000, /* 8 Mbytes */ + .dt_sz = 0x03800000, /* 56 Mbytes */ + /* Other */ + .version = 0, + .mode = EDMA_MODE_UNROLL, + .irqs = 1, +}; + +static int dw_edma_pcie_probe(struct pci_dev *pdev, + const struct pci_device_id *pid) +{ + const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; + struct device *dev = &pdev->dev; + struct dw_edma_chip *chip; + int err, nr_irqs; + struct dw_edma *dw; + + /* Enable PCI device */ + err = pcim_enable_device(pdev); + if (err) { + pci_err(pdev, "enabling device failed\n"); + return err; + } + + /* Mapping PCI BAR regions */ + err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | + BIT(pdata->ll_bar) | + BIT(pdata->dt_bar), + pci_name(pdev)); + if (err) { + pci_err(pdev, "eDMA BAR I/O remapping failed\n"); + return err; + } + + pci_set_master(pdev); + + /* DMA configuration */ + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + pci_err(pdev, "consistent DMA mask 64 set failed\n"); + return err; + } + } else { + pci_err(pdev, "DMA mask 64 set failed\n"); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "DMA mask 32 set failed\n"); + return err; + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + pci_err(pdev, "consistent DMA mask 32 set failed\n"); + return err; + } + } + + /* Data structure allocation */ + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); + if (!dw) + return -ENOMEM; + + /* IRQs allocation */ + nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (nr_irqs < 1) { + pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", + nr_irqs); + return -EPERM; + } + + /* Data structure initialization */ + chip->dw = dw; + chip->dev = dev; + chip->id = pdev->devfn; + chip->irq = pdev->irq; + + dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; + dw->rg_region.vaddr += pdata->rg_off; + dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; + dw->rg_region.paddr += pdata->rg_off; + dw->rg_region.sz = pdata->rg_sz; + + dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; + dw->ll_region.vaddr += pdata->ll_off; + dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; + dw->ll_region.paddr += pdata->ll_off; + dw->ll_region.sz = pdata->ll_sz; + + dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; + dw->dt_region.vaddr += pdata->dt_off; + dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; + dw->dt_region.paddr += pdata->dt_off; + dw->dt_region.sz = pdata->dt_sz; + + dw->version = pdata->version; + dw->mode = pdata->mode; + dw->nr_irqs = nr_irqs; + + /* Debug info */ + pci_dbg(pdev, "Version:\t%u\n", dw->version); + + pci_dbg(pdev, "Mode:\t%s\n", + dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); + + pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->rg_bar, pdata->rg_off, pdata->rg_sz, + &dw->rg_region.vaddr, &dw->rg_region.paddr); + + pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->ll_bar, pdata->ll_off, pdata->ll_sz, + &dw->ll_region.vaddr, &dw->ll_region.paddr); + + pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", + pdata->dt_bar, pdata->dt_off, pdata->dt_sz, + &dw->dt_region.vaddr, &dw->dt_region.paddr); + + pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); + + /* Validating if PCI interrupts were enabled */ + if (!pci_dev_msi_enabled(pdev)) { + pci_err(pdev, "enable interrupt failed\n"); + return -EPERM; + } + + dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); + if (!dw->irq) + return -ENOMEM; + + /* Starting eDMA driver */ + err = dw_edma_probe(chip); + if (err) { + pci_err(pdev, "eDMA probe failed\n"); + return err; + } + + /* Saving data structure reference */ + pci_set_drvdata(pdev, chip); + + return 0; +} + +static void dw_edma_pcie_remove(struct pci_dev *pdev) +{ + struct dw_edma_chip *chip = pci_get_drvdata(pdev); + int err; + + /* Stopping eDMA driver */ + err = dw_edma_remove(chip); + if (err) + pci_warn(pdev, "can't remove device properly: %d\n", err); + + /* Freeing IRQs */ + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id dw_edma_pcie_id_table[] = { + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, + { } +}; +MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); + +static struct pci_driver dw_edma_pcie_driver = { + .name = "dw-edma-pcie", + .id_table = dw_edma_pcie_id_table, + .probe = dw_edma_pcie_probe, + .remove = dw_edma_pcie_remove, +}; + +module_pci_driver(dw_edma_pcie_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); +MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 000000000000..8a3180ed49a6 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/bitfield.h> + +#include "dw-edma-core.h" +#include "dw-edma-v0-core.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-v0-debugfs.h" + +enum dw_edma_control { + DW_EDMA_V0_CB = BIT(0), + DW_EDMA_V0_TCB = BIT(1), + DW_EDMA_V0_LLP = BIT(2), + DW_EDMA_V0_LIE = BIT(3), + DW_EDMA_V0_RIE = BIT(4), + DW_EDMA_V0_CCS = BIT(8), + DW_EDMA_V0_LLE = BIT(9), +}; + +static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) +{ + return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; +} + +#define SET(dw, name, value) \ + writel(value, &(__dw_regs(dw)->name)) + +#define GET(dw, name) \ + readl(&(__dw_regs(dw)->name)) + +#define SET_RW(dw, dir, name, value) \ + do { \ + if ((dir) == EDMA_DIR_WRITE) \ + SET(dw, wr_##name, value); \ + else \ + SET(dw, rd_##name, value); \ + } while (0) + +#define GET_RW(dw, dir, name) \ + ((dir) == EDMA_DIR_WRITE \ + ? GET(dw, wr_##name) \ + : GET(dw, rd_##name)) + +#define SET_BOTH(dw, name, value) \ + do { \ + SET(dw, wr_##name, value); \ + SET(dw, rd_##name, value); \ + } while (0) + +static inline struct dw_edma_v0_ch_regs __iomem * +__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) +{ + if (dw->mode == EDMA_MODE_LEGACY) + return &(__dw_regs(dw)->type.legacy.ch); + + if (dir == EDMA_DIR_WRITE) + return &__dw_regs(dw)->type.unroll.ch[ch].wr; + + return &__dw_regs(dw)->type.unroll.ch[ch].rd; +} + +static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + u32 value, void __iomem *addr) +{ + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + writel(value, addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + writel(value, addr); + } +} + +static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, + const void __iomem *addr) +{ + u32 value; + + if (dw->mode == EDMA_MODE_LEGACY) { + u32 viewport_sel; + unsigned long flags; + + raw_spin_lock_irqsave(&dw->lock, flags); + + viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + if (dir == EDMA_DIR_READ) + viewport_sel |= BIT(31); + + writel(viewport_sel, + &(__dw_regs(dw)->type.legacy.viewport_sel)); + value = readl(addr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + value = readl(addr); + } + + return value; +} + +#define SET_CH(dw, dir, ch, name, value) \ + writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define GET_CH(dw, dir, ch, name) \ + readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) + +#define SET_LL(ll, value) \ + writel(value, ll) + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *dw) +{ + SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); + SET_BOTH(dw, engine_en, 0); +} + +u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) +{ + u32 num_ch; + + if (dir == EDMA_DIR_WRITE) + num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); + else + num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); + + if (num_ch > EDMA_V0_MAX_NR_CH) + num_ch = EDMA_V0_MAX_NR_CH; + + return (u16)num_ch; +} + +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + + tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, + GET_CH(dw, chan->dir, chan->id, ch_control1)); + + if (tmp == 1) + return DMA_IN_PROGRESS; + else if (tmp == 3) + return DMA_COMPLETE; + else + return DMA_ERROR; +} + +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); +} + +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + + SET_RW(dw, chan->dir, int_clear, + FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); +} + +u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); +} + +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) +{ + return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); +} + +static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) +{ + struct dw_edma_burst *child; + struct dw_edma_v0_lli *lli; + struct dw_edma_v0_llp *llp; + u32 control = 0, i = 0; + u64 sar, dar, addr; + int j; + + lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; + + if (chunk->cb) + control = DW_EDMA_V0_CB; + + j = chunk->bursts_alloc; + list_for_each_entry(child, &chunk->burst->list, list) { + j--; + if (!j) + control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); + + /* Channel control */ + SET_LL(&lli[i].control, control); + /* Transfer size */ + SET_LL(&lli[i].transfer_size, child->sz); + /* SAR - low, high */ + sar = cpu_to_le64(child->sar); + SET_LL(&lli[i].sar_low, lower_32_bits(sar)); + SET_LL(&lli[i].sar_high, upper_32_bits(sar)); + /* DAR - low, high */ + dar = cpu_to_le64(child->dar); + SET_LL(&lli[i].dar_low, lower_32_bits(dar)); + SET_LL(&lli[i].dar_high, upper_32_bits(dar)); + i++; + } + + llp = (struct dw_edma_v0_llp *)&lli[i]; + control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; + if (!chunk->cb) + control |= DW_EDMA_V0_CB; + + /* Channel control */ + SET_LL(&llp->control, control); + /* Linked list - low, high */ + addr = cpu_to_le64(chunk->ll_region.paddr); + SET_LL(&llp->llp_low, lower_32_bits(addr)); + SET_LL(&llp->llp_high, upper_32_bits(addr)); +} + +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) +{ + struct dw_edma_chan *chan = chunk->chan; + struct dw_edma *dw = chan->chip->dw; + u32 tmp; + u64 llp; + + dw_edma_v0_core_write_chunk(chunk); + + if (first) { + /* Enable engine */ + SET_RW(dw, chan->dir, engine_en, BIT(0)); + /* Interrupt unmask - done, abort */ + tmp = GET_RW(dw, chan->dir, int_mask); + tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); + tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, int_mask, tmp); + /* Linked list error */ + tmp = GET_RW(dw, chan->dir, linked_list_err_en); + tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); + SET_RW(dw, chan->dir, linked_list_err_en, tmp); + /* Channel control */ + SET_CH(dw, chan->dir, chan->id, ch_control1, + (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); + /* Linked list - low, high */ + llp = cpu_to_le64(chunk->ll_region.paddr); + SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); + SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); + } + /* Doorbell */ + SET_RW(dw, chan->dir, doorbell, + FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); +} + +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) +{ + struct dw_edma *dw = chan->chip->dw; + u32 tmp = 0; + + /* MSI done addr - low, high */ + SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); + /* MSI abort addr - low, high */ + SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); + SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); + /* MSI data - low, high */ + switch (chan->id) { + case 0: + case 1: + tmp = GET_RW(dw, chan->dir, ch01_imwr_data); + break; + + case 2: + case 3: + tmp = GET_RW(dw, chan->dir, ch23_imwr_data); + break; + + case 4: + case 5: + tmp = GET_RW(dw, chan->dir, ch45_imwr_data); + break; + + case 6: + case 7: + tmp = GET_RW(dw, chan->dir, ch67_imwr_data); + break; + } + + if (chan->id & BIT(0)) { + /* Channel odd {1, 3, 5, 7} */ + tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, + chan->msi.data); + } else { + /* Channel even {0, 2, 4, 6} */ + tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; + tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, + chan->msi.data); + } + + switch (chan->id) { + case 0: + case 1: + SET_RW(dw, chan->dir, ch01_imwr_data, tmp); + break; + + case 2: + case 3: + SET_RW(dw, chan->dir, ch23_imwr_data, tmp); + break; + + case 4: + case 5: + SET_RW(dw, chan->dir, ch45_imwr_data, tmp); + break; + + case 6: + case 7: + SET_RW(dw, chan->dir, ch67_imwr_data, tmp); + break; + } + + return 0; +} + +/* eDMA debugfs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) +{ + dw_edma_v0_debugfs_on(chip); +} + +void dw_edma_v0_core_debugfs_off(void) +{ + dw_edma_v0_debugfs_off(); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 000000000000..abae1527f1f9 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_CORE_H +#define _DW_EDMA_V0_CORE_H + +#include <linux/dma/edma.h> + +/* eDMA management callbacks */ +void dw_edma_v0_core_off(struct dw_edma *chan); +u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); +enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); +void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); +u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); +u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); +void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); +int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); +/* eDMA debug fs callbacks */ +void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_core_debugfs_off(void); + +#endif /* _DW_EDMA_V0_CORE_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 000000000000..3226f528cc11 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#include <linux/debugfs.h> +#include <linux/bitfield.h> + +#include "dw-edma-v0-debugfs.h" +#include "dw-edma-v0-regs.h" +#include "dw-edma-core.h" + +#define REGS_ADDR(name) \ + ((dma_addr_t *)®s->name) +#define REGISTER(name) \ + { #name, REGS_ADDR(name) } + +#define WR_REGISTER(name) \ + { #name, REGS_ADDR(wr_##name) } +#define RD_REGISTER(name) \ + { #name, REGS_ADDR(rd_##name) } + +#define WR_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.wr_##name) } +#define RD_REGISTER_LEGACY(name) \ + { #name, REGS_ADDR(type.legacy.rd_##name) } + +#define WR_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.wr_##name) } +#define RD_REGISTER_UNROLL(name) \ + { #name, REGS_ADDR(type.unroll.rd_##name) } + +#define WRITE_STR "write" +#define READ_STR "read" +#define CHANNEL_STR "channel" +#define REGISTERS_STR "registers" + +static struct dentry *base_dir; +static struct dw_edma *dw; +static struct dw_edma_v0_regs *regs; + +static struct { + void *start; + void *end; +} lim[2][EDMA_V0_MAX_NR_CH]; + +struct debugfs_entries { + char name[24]; + dma_addr_t *reg; +}; + +static int dw_edma_debugfs_u32_get(void *data, u64 *val) +{ + if (dw->mode == EDMA_MODE_LEGACY && + data >= (void *)®s->type.legacy.ch) { + void *ptr = (void *)®s->type.legacy.ch; + u32 viewport_sel = 0; + unsigned long flags; + u16 ch; + + for (ch = 0; ch < dw->wr_ch_cnt; ch++) + if (lim[0][ch].start >= data && data < lim[0][ch].end) { + ptr += (data - lim[0][ch].start); + goto legacy_sel_wr; + } + + for (ch = 0; ch < dw->rd_ch_cnt; ch++) + if (lim[1][ch].start >= data && data < lim[1][ch].end) { + ptr += (data - lim[1][ch].start); + goto legacy_sel_rd; + } + + return 0; +legacy_sel_rd: + viewport_sel = BIT(31); +legacy_sel_wr: + viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); + + raw_spin_lock_irqsave(&dw->lock, flags); + + writel(viewport_sel, ®s->type.legacy.viewport_sel); + *val = readl(ptr); + + raw_spin_unlock_irqrestore(&dw->lock, flags); + } else { + *val = readl(data); + } + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); + +static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], + int nr_entries, struct dentry *dir) +{ + int i; + + for (i = 0; i < nr_entries; i++) { + if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, + entries[i].reg, &fops_x32)) + break; + } +} + +static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, + struct dentry *dir) +{ + int nr_entries; + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ch_control1), + REGISTER(ch_control2), + REGISTER(transfer_size), + REGISTER(sar_low), + REGISTER(sar_high), + REGISTER(dar_low), + REGISTER(dar_high), + REGISTER(llp_low), + REGISTER(llp_high), + }; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); +} + +static void dw_edma_debugfs_regs_wr(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + WR_REGISTER(engine_en), + WR_REGISTER(doorbell), + WR_REGISTER(ch_arb_weight_low), + WR_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + WR_REGISTER(int_status), + WR_REGISTER(int_mask), + WR_REGISTER(int_clear), + WR_REGISTER(err_status), + WR_REGISTER(done_imwr_low), + WR_REGISTER(done_imwr_high), + WR_REGISTER(abort_imwr_low), + WR_REGISTER(abort_imwr_high), + WR_REGISTER(ch01_imwr_data), + WR_REGISTER(ch23_imwr_data), + WR_REGISTER(ch45_imwr_data), + WR_REGISTER(ch67_imwr_data), + WR_REGISTER(linked_list_err_en), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + WR_REGISTER_UNROLL(engine_chgroup), + WR_REGISTER_UNROLL(engine_hshake_cnt_low), + WR_REGISTER_UNROLL(engine_hshake_cnt_high), + WR_REGISTER_UNROLL(ch0_pwr_en), + WR_REGISTER_UNROLL(ch1_pwr_en), + WR_REGISTER_UNROLL(ch2_pwr_en), + WR_REGISTER_UNROLL(ch3_pwr_en), + WR_REGISTER_UNROLL(ch4_pwr_en), + WR_REGISTER_UNROLL(ch5_pwr_en), + WR_REGISTER_UNROLL(ch6_pwr_en), + WR_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(WRITE_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->wr_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); + + lim[0][i].start = ®s->type.unroll.ch[i].wr; + lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; + } +} + +static void dw_edma_debugfs_regs_rd(struct dentry *dir) +{ + const struct debugfs_entries debugfs_regs[] = { + /* eDMA global registers */ + RD_REGISTER(engine_en), + RD_REGISTER(doorbell), + RD_REGISTER(ch_arb_weight_low), + RD_REGISTER(ch_arb_weight_high), + /* eDMA interrupts registers */ + RD_REGISTER(int_status), + RD_REGISTER(int_mask), + RD_REGISTER(int_clear), + RD_REGISTER(err_status_low), + RD_REGISTER(err_status_high), + RD_REGISTER(linked_list_err_en), + RD_REGISTER(done_imwr_low), + RD_REGISTER(done_imwr_high), + RD_REGISTER(abort_imwr_low), + RD_REGISTER(abort_imwr_high), + RD_REGISTER(ch01_imwr_data), + RD_REGISTER(ch23_imwr_data), + RD_REGISTER(ch45_imwr_data), + RD_REGISTER(ch67_imwr_data), + }; + const struct debugfs_entries debugfs_unroll_regs[] = { + /* eDMA channel context grouping */ + RD_REGISTER_UNROLL(engine_chgroup), + RD_REGISTER_UNROLL(engine_hshake_cnt_low), + RD_REGISTER_UNROLL(engine_hshake_cnt_high), + RD_REGISTER_UNROLL(ch0_pwr_en), + RD_REGISTER_UNROLL(ch1_pwr_en), + RD_REGISTER_UNROLL(ch2_pwr_en), + RD_REGISTER_UNROLL(ch3_pwr_en), + RD_REGISTER_UNROLL(ch4_pwr_en), + RD_REGISTER_UNROLL(ch5_pwr_en), + RD_REGISTER_UNROLL(ch6_pwr_en), + RD_REGISTER_UNROLL(ch7_pwr_en), + }; + struct dentry *regs_dir, *ch_dir; + int nr_entries, i; + char name[16]; + + regs_dir = debugfs_create_dir(READ_STR, dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + if (dw->mode == EDMA_MODE_UNROLL) { + nr_entries = ARRAY_SIZE(debugfs_unroll_regs); + dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, + regs_dir); + } + + for (i = 0; i < dw->rd_ch_cnt; i++) { + snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); + + ch_dir = debugfs_create_dir(name, regs_dir); + if (!ch_dir) + return; + + dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); + + lim[1][i].start = ®s->type.unroll.ch[i].rd; + lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; + } +} + +static void dw_edma_debugfs_regs(void) +{ + const struct debugfs_entries debugfs_regs[] = { + REGISTER(ctrl_data_arb_prior), + REGISTER(ctrl), + }; + struct dentry *regs_dir; + int nr_entries; + + regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); + if (!regs_dir) + return; + + nr_entries = ARRAY_SIZE(debugfs_regs); + dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); + + dw_edma_debugfs_regs_wr(regs_dir); + dw_edma_debugfs_regs_rd(regs_dir); +} + +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ + dw = chip->dw; + if (!dw) + return; + + regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; + if (!regs) + return; + + base_dir = debugfs_create_dir(dw->name, 0); + if (!base_dir) + return; + + debugfs_create_u32("version", 0444, base_dir, &dw->version); + debugfs_create_u32("mode", 0444, base_dir, &dw->mode); + debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); + debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); + + dw_edma_debugfs_regs(); +} + +void dw_edma_v0_debugfs_off(void) +{ + debugfs_remove_recursive(base_dir); +} diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 000000000000..5450a0a94193 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_DEBUG_FS_H +#define _DW_EDMA_V0_DEBUG_FS_H + +#include <linux/dma/edma.h> + +#ifdef CONFIG_DEBUG_FS +void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); +void dw_edma_v0_debugfs_off(void); +#else +static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) +{ +} + +static inline void dw_edma_v0_debugfs_off(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* _DW_EDMA_V0_DEBUG_FS_H */ diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 000000000000..cd6476884507 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. + * Synopsys DesignWare eDMA v0 core + * + * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> + */ + +#ifndef _DW_EDMA_V0_REGS_H +#define _DW_EDMA_V0_REGS_H + +#include <linux/dmaengine.h> + +#define EDMA_V0_MAX_NR_CH 8 +#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) +#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) +#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) +#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) +#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) +#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) +#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) +#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) + +#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) +#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) + +struct dw_edma_v0_ch_regs { + u32 ch_control1; /* 0x000 */ + u32 ch_control2; /* 0x004 */ + u32 transfer_size; /* 0x008 */ + u32 sar_low; /* 0x00c */ + u32 sar_high; /* 0x010 */ + u32 dar_low; /* 0x014 */ + u32 dar_high; /* 0x018 */ + u32 llp_low; /* 0x01c */ + u32 llp_high; /* 0x020 */ +}; + +struct dw_edma_v0_ch { + struct dw_edma_v0_ch_regs wr; /* 0x200 */ + u32 padding_1[55]; /* [0x224..0x2fc] */ + struct dw_edma_v0_ch_regs rd; /* 0x300 */ + u32 padding_2[55]; /* [0x224..0x2fc] */ +}; + +struct dw_edma_v0_unroll { + u32 padding_1; /* 0x0f8 */ + u32 wr_engine_chgroup; /* 0x100 */ + u32 rd_engine_chgroup; /* 0x104 */ + u32 wr_engine_hshake_cnt_low; /* 0x108 */ + u32 wr_engine_hshake_cnt_high; /* 0x10c */ + u32 padding_2[2]; /* [0x110..0x114] */ + u32 rd_engine_hshake_cnt_low; /* 0x118 */ + u32 rd_engine_hshake_cnt_high; /* 0x11c */ + u32 padding_3[2]; /* [0x120..0x124] */ + u32 wr_ch0_pwr_en; /* 0x128 */ + u32 wr_ch1_pwr_en; /* 0x12c */ + u32 wr_ch2_pwr_en; /* 0x130 */ + u32 wr_ch3_pwr_en; /* 0x134 */ + u32 wr_ch4_pwr_en; /* 0x138 */ + u32 wr_ch5_pwr_en; /* 0x13c */ + u32 wr_ch6_pwr_en; /* 0x140 */ + u32 wr_ch7_pwr_en; /* 0x144 */ + u32 padding_4[8]; /* [0x148..0x164] */ + u32 rd_ch0_pwr_en; /* 0x168 */ + u32 rd_ch1_pwr_en; /* 0x16c */ + u32 rd_ch2_pwr_en; /* 0x170 */ + u32 rd_ch3_pwr_en; /* 0x174 */ + u32 rd_ch4_pwr_en; /* 0x178 */ + u32 rd_ch5_pwr_en; /* 0x18c */ + u32 rd_ch6_pwr_en; /* 0x180 */ + u32 rd_ch7_pwr_en; /* 0x184 */ + u32 padding_5[30]; /* [0x188..0x1fc] */ + struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ +}; + +struct dw_edma_v0_legacy { + u32 viewport_sel; /* 0x0f8 */ + struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ +}; + +struct dw_edma_v0_regs { + /* eDMA global registers */ + u32 ctrl_data_arb_prior; /* 0x000 */ + u32 padding_1; /* 0x004 */ + u32 ctrl; /* 0x008 */ + u32 wr_engine_en; /* 0x00c */ + u32 wr_doorbell; /* 0x010 */ + u32 padding_2; /* 0x014 */ + u32 wr_ch_arb_weight_low; /* 0x018 */ + u32 wr_ch_arb_weight_high; /* 0x01c */ + u32 padding_3[3]; /* [0x020..0x028] */ + u32 rd_engine_en; /* 0x02c */ + u32 rd_doorbell; /* 0x030 */ + u32 padding_4; /* 0x034 */ + u32 rd_ch_arb_weight_low; /* 0x038 */ + u32 rd_ch_arb_weight_high; /* 0x03c */ + u32 padding_5[3]; /* [0x040..0x048] */ + /* eDMA interrupts registers */ + u32 wr_int_status; /* 0x04c */ + u32 padding_6; /* 0x050 */ + u32 wr_int_mask; /* 0x054 */ + u32 wr_int_clear; /* 0x058 */ + u32 wr_err_status; /* 0x05c */ + u32 wr_done_imwr_low; /* 0x060 */ + u32 wr_done_imwr_high; /* 0x064 */ + u32 wr_abort_imwr_low; /* 0x068 */ + u32 wr_abort_imwr_high; /* 0x06c */ + u32 wr_ch01_imwr_data; /* 0x070 */ + u32 wr_ch23_imwr_data; /* 0x074 */ + u32 wr_ch45_imwr_data; /* 0x078 */ + u32 wr_ch67_imwr_data; /* 0x07c */ + u32 padding_7[4]; /* [0x080..0x08c] */ + u32 wr_linked_list_err_en; /* 0x090 */ + u32 padding_8[3]; /* [0x094..0x09c] */ + u32 rd_int_status; /* 0x0a0 */ + u32 padding_9; /* 0x0a4 */ + u32 rd_int_mask; /* 0x0a8 */ + u32 rd_int_clear; /* 0x0ac */ + u32 padding_10; /* 0x0b0 */ + u32 rd_err_status_low; /* 0x0b4 */ + u32 rd_err_status_high; /* 0x0b8 */ + u32 padding_11[2]; /* [0x0bc..0x0c0] */ + u32 rd_linked_list_err_en; /* 0x0c4 */ + u32 padding_12; /* 0x0c8 */ + u32 rd_done_imwr_low; /* 0x0cc */ + u32 rd_done_imwr_high; /* 0x0d0 */ + u32 rd_abort_imwr_low; /* 0x0d4 */ + u32 rd_abort_imwr_high; /* 0x0d8 */ + u32 rd_ch01_imwr_data; /* 0x0dc */ + u32 rd_ch23_imwr_data; /* 0x0e0 */ + u32 rd_ch45_imwr_data; /* 0x0e4 */ + u32 rd_ch67_imwr_data; /* 0x0e8 */ + u32 padding_13[4]; /* [0x0ec..0x0f8] */ + /* eDMA channel context grouping */ + union dw_edma_v0_type { + struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ + struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ + } type; +}; + +struct dw_edma_v0_lli { + u32 control; + u32 transfer_size; + u32 sar_low; + u32 sar_high; + u32 dar_low; + u32 dar_high; +}; + +struct dw_edma_v0_llp { + u32 control; + u32 reserved; + u32 llp_low; + u32 llp_high; +}; + +#endif /* _DW_EDMA_V0_REGS_H */ diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index e79a75db0852..8de87b15a988 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -15,10 +15,13 @@ struct dw_dma_pci_data { const struct dw_dma_platform_data *pdata; int (*probe)(struct dw_dma_chip *chip); + int (*remove)(struct dw_dma_chip *chip); + struct dw_dma_chip *chip; }; static const struct dw_dma_pci_data dw_pci_data = { .probe = dw_dma_probe, + .remove = dw_dma_remove, }; static const struct dw_dma_platform_data idma32_pdata = { @@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = { static const struct dw_dma_pci_data idma32_pci_data = { .pdata = &idma32_pdata, .probe = idma32_dma_probe, + .remove = idma32_dma_remove, }; static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { - const struct dw_dma_pci_data *data = (void *)pid->driver_data; + const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data; + struct dw_dma_pci_data *data; struct dw_dma_chip *chip; int ret; @@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) if (ret) return ret; + data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; @@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) chip->irq = pdev->irq; chip->pdata = data->pdata; + data->chip = chip; + ret = data->probe(chip); if (ret) return ret; - pci_set_drvdata(pdev, chip); + pci_set_drvdata(pdev, data); return 0; } static void dw_pci_remove(struct pci_dev *pdev) { - struct dw_dma_chip *chip = pci_get_drvdata(pdev); + struct dw_dma_pci_data *data = pci_get_drvdata(pdev); + struct dw_dma_chip *chip = data->chip; int ret; - ret = dw_dma_remove(chip); + ret = data->remove(chip); if (ret) dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); } @@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev) static int dw_pci_suspend_late(struct device *dev) { - struct pci_dev *pci = to_pci_dev(dev); - struct dw_dma_chip *chip = pci_get_drvdata(pci); + struct dw_dma_pci_data *data = dev_get_drvdata(dev); + struct dw_dma_chip *chip = data->chip; return do_dw_dma_disable(chip); }; static int dw_pci_resume_early(struct device *dev) { - struct pci_dev *pci = to_pci_dev(dev); - struct dw_dma_chip *chip = pci_get_drvdata(pci); + struct dw_dma_pci_data *data = dev_get_drvdata(dev); + struct dw_dma_chip *chip = data->chip; return do_dw_dma_enable(chip); }; @@ -131,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = { { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, + /* Elkhart Lake iDMA 32-bit (OSE DMA) */ + { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data }, + { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data }, + { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data }, + /* Haswell */ { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 680b2a00a953..44d92c34dec3 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) struct edma_regs *regs = &fsl_chan->edma->regs; u32 ch = fsl_chan->vchan.chan.chan_id; - if (fsl_chan->edma->version == v1) { + if (fsl_chan->edma->drvdata->version == v1) { edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); edma_writeb(fsl_chan->edma, ch, regs->serq); } else { @@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) struct edma_regs *regs = &fsl_chan->edma->regs; u32 ch = fsl_chan->vchan.chan.chan_id; - if (fsl_chan->edma->version == v1) { + if (fsl_chan->edma->drvdata->version == v1) { edma_writeb(fsl_chan->edma, ch, regs->cerq); edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); } else { @@ -77,22 +77,33 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) } EXPORT_SYMBOL_GPL(fsl_edma_disable_request); +static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, + u32 off, u32 slot, bool enable) +{ + u8 val8; + + if (enable) + val8 = EDMAMUX_CHCFG_ENBL | slot; + else + val8 = EDMAMUX_CHCFG_DIS; + + iowrite8(val8, addr + off); +} + void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; - chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; + chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); - if (enable) - iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); - else - iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); + mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); } EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); @@ -647,28 +658,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma) edma->regs.erql = edma->membase + EDMA_ERQ; edma->regs.eeil = edma->membase + EDMA_EEI; - edma->regs.serq = edma->membase + ((edma->version == v1) ? - EDMA_SERQ : EDMA64_SERQ); - edma->regs.cerq = edma->membase + ((edma->version == v1) ? - EDMA_CERQ : EDMA64_CERQ); - edma->regs.seei = edma->membase + ((edma->version == v1) ? - EDMA_SEEI : EDMA64_SEEI); - edma->regs.ceei = edma->membase + ((edma->version == v1) ? - EDMA_CEEI : EDMA64_CEEI); - edma->regs.cint = edma->membase + ((edma->version == v1) ? - EDMA_CINT : EDMA64_CINT); - edma->regs.cerr = edma->membase + ((edma->version == v1) ? - EDMA_CERR : EDMA64_CERR); - edma->regs.ssrt = edma->membase + ((edma->version == v1) ? - EDMA_SSRT : EDMA64_SSRT); - edma->regs.cdne = edma->membase + ((edma->version == v1) ? - EDMA_CDNE : EDMA64_CDNE); - edma->regs.intl = edma->membase + ((edma->version == v1) ? - EDMA_INTR : EDMA64_INTL); - edma->regs.errl = edma->membase + ((edma->version == v1) ? - EDMA_ERR : EDMA64_ERRL); - - if (edma->version == v2) { + edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SERQ : EDMA_SERQ); + edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CERQ : EDMA_CERQ); + edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SEEI : EDMA_SEEI); + edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CEEI : EDMA_CEEI); + edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CINT : EDMA_CINT); + edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CERR : EDMA_CERR); + edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_SSRT : EDMA_SSRT); + edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_CDNE : EDMA_CDNE); + edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_INTL : EDMA_INTR); + edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? + EDMA64_ERRL : EDMA_ERR); + + if (edma->drvdata->version == v2) { edma->regs.erqh = edma->membase + EDMA64_ERQH; edma->regs.eeih = edma->membase + EDMA64_EEIH; edma->regs.errh = edma->membase + EDMA64_ERRH; diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index c53f76eeb4d3..4e175560292c 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -7,6 +7,7 @@ #define _FSL_EDMA_COMMON_H_ #include <linux/dma-direction.h> +#include <linux/platform_device.h> #include "virt-dma.h" #define EDMA_CR_EDBG BIT(1) @@ -140,17 +141,24 @@ enum edma_version { v2, /* 64ch Coldfire */ }; +struct fsl_edma_drvdata { + enum edma_version version; + u32 dmamuxs; + int (*setup_irq)(struct platform_device *pdev, + struct fsl_edma_engine *fsl_edma); +}; + struct fsl_edma_engine { struct dma_device dma_dev; void __iomem *membase; void __iomem *muxbase[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR]; struct mutex fsl_edma_mutex; + const struct fsl_edma_drvdata *drvdata; u32 n_chans; int txirq; int errirq; bool big_endian; - enum edma_version version; struct edma_regs regs; struct fsl_edma_chan chans[]; }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 0ddad3adb761..fcbad6ae954a 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -92,7 +92,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; struct dma_chan *chan, *_chan; struct fsl_edma_chan *fsl_chan; - unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; + u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; + unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; if (dma_spec->args_count != 2) return NULL; @@ -180,16 +181,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) clk_disable_unprepare(fsl_edma->muxclk[i]); } +static struct fsl_edma_drvdata vf610_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .setup_irq = fsl_edma_irq_init, +}; + +static const struct of_device_id fsl_edma_dt_ids[] = { + { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); + static int fsl_edma_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(fsl_edma_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct fsl_edma_engine *fsl_edma; + const struct fsl_edma_drvdata *drvdata = NULL; struct fsl_edma_chan *fsl_chan; struct edma_regs *regs; struct resource *res; int len, chans; int ret, i; + if (of_id) + drvdata = of_id->data; + if (!drvdata) { + dev_err(&pdev->dev, "unable to find driver data\n"); + return -EINVAL; + } + ret = of_property_read_u32(np, "dma-channels", &chans); if (ret) { dev_err(&pdev->dev, "Can't get dma-channels.\n"); @@ -201,7 +224,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (!fsl_edma) return -ENOMEM; - fsl_edma->version = v1; + fsl_edma->drvdata = drvdata; fsl_edma->n_chans = chans; mutex_init(&fsl_edma->fsl_edma_mutex); @@ -213,7 +236,7 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma_setup_regs(fsl_edma); regs = &fsl_edma->regs; - for (i = 0; i < DMAMUX_NR; i++) { + for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { char clkname[32]; res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); @@ -259,7 +282,7 @@ static int fsl_edma_probe(struct platform_device *pdev) } edma_writel(fsl_edma, ~0, regs->intl); - ret = fsl_edma_irq_init(pdev, fsl_edma); + ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); if (ret) return ret; @@ -291,7 +314,7 @@ static int fsl_edma_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Can't register Freescale eDMA engine. (%d)\n", ret); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return ret; } @@ -300,7 +323,7 @@ static int fsl_edma_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma. (%d)\n", ret); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return ret; } @@ -319,7 +342,7 @@ static int fsl_edma_remove(struct platform_device *pdev) fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); of_dma_controller_free(np); dma_async_device_unregister(&fsl_edma->dma_dev); - fsl_disable_clocks(fsl_edma, DMAMUX_NR); + fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); return 0; } @@ -378,12 +401,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = { .resume_early = fsl_edma_resume_early, }; -static const struct of_device_id fsl_edma_dt_ids[] = { - { .compatible = "fsl,vf610-edma", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); - static struct platform_driver fsl_edma_driver = { .driver = { .name = "fsl-edma", diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 60b062c3647b..8e341c0c13bc 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -113,6 +113,7 @@ /* Field definition for Descriptor offset */ #define QDMA_CCDF_STATUS 20 #define QDMA_CCDF_OFFSET 20 +#define QDMA_SDDF_CMD(x) (((u64)(x)) << 32) /* Field definition for safe loop count*/ #define FSL_QDMA_HALT_COUNT 1500 @@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, dma_addr_t dst, dma_addr_t src, u32 len) { + u32 cmd; struct fsl_qdma_format *sdf, *ddf; struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; @@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, /* This entry is the last entry. */ qdma_csgf_set_f(csgf_dest, len); /* Descriptor Buffer */ - sdf->data = - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->data = - cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); - ddf->data |= - cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << + FSL_QDMA_CMD_RWTTYPE_OFFSET); + sdf->data = QDMA_SDDF_CMD(cmd); + + cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << + FSL_QDMA_CMD_RWTTYPE_OFFSET); + cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); + ddf->data = QDMA_SDDF_CMD(cmd); } /* diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 0c2610066ba9..025d8ad5a63c 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -61,10 +61,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) if (hsuc->direction == DMA_MEM_TO_DEV) { bsr = config->dst_maxburst; - mtsr = config->src_addr_width; + mtsr = config->dst_addr_width; } else if (hsuc->direction == DMA_DEV_TO_MEM) { bsr = config->src_maxburst; - mtsr = config->dst_addr_width; + mtsr = config->src_addr_width; } hsu_chan_disable(hsuc); diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4ec84a633bd3..a01f4b5d793c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1934,16 +1934,11 @@ disable_clk_ipg: static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_engine *sdma = sdmac->sdma; struct imx_dma_data *data = fn_param; if (!imx_dma_is_general_purpose(chan)) return false; - /* return false if it's not the right device */ - if (sdma->dev->of_node != data->of_node) - return false; - sdmac->data = *data; chan->private = &sdmac->data; @@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, * be set to sdmac->event_id1. */ data.dma_request2 = 0; - data.of_node = ofdma->of_node; - return dma_request_channel(mask, sdma_filter_fn, &data); + return __dma_request_channel(&mask, sdma_filter_fn, &data, + ofdma->of_node); } static int sdma_probe(struct platform_device *pdev) diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index 7de54b2fafdb..e15bd15a9ef6 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev, free_irq(irq, mcf_edma); } +static struct fsl_edma_drvdata mcf_data = { + .version = v2, + .setup_irq = mcf_edma_irq_init, +}; + static int mcf_edma_probe(struct platform_device *pdev) { struct mcf_edma_platform_data *pdata; @@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->n_chans = chans; - /* Set up version for ColdFire edma */ - mcf_edma->version = v2; + /* Set up drvdata for ColdFire edma */ + mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; if (!mcf_edma->n_chans) { @@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev) iowrite32(~0, regs->inth); iowrite32(~0, regs->intl); - ret = mcf_edma_irq_init(pdev, mcf_edma); + ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma); if (ret) return ret; diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 7411eb3d419e..1ad63ddc292d 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig @@ -25,3 +25,14 @@ config MTK_CQDMA This controller provides the channels which is dedicated to memory-to-memory transfer to offload from CPU. + +config MTK_UART_APDMA + tristate "MediaTek SoCs APDMA support for UART" + depends on OF && SERIAL_8250_MT6577 + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the UART DMA engine found on MediaTek MTK SoCs. + When SERIAL_8250_MT6577 is enabled, and if you want to use DMA, + you can enable the config. The DMA engine can only be used + with MediaTek SoCs. diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 13b144594510..5ba39a5edc13 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c new file mode 100644 index 000000000000..546995c20876 --- /dev/null +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MediaTek UART APDMA driver. + * + * Copyright (c) 2019 MediaTek Inc. + * Author: Long Cheng <long.cheng@mediatek.com> + */ + +#include <linux/clk.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "../virt-dma.h" + +/* The default number of virtual channel */ +#define MTK_UART_APDMA_NR_VCHANS 8 + +#define VFF_EN_B BIT(0) +#define VFF_STOP_B BIT(0) +#define VFF_FLUSH_B BIT(0) +#define VFF_4G_EN_B BIT(0) +/* rx valid size >= vff thre */ +#define VFF_RX_INT_EN_B (BIT(0) | BIT(1)) +/* tx left size >= vff thre */ +#define VFF_TX_INT_EN_B BIT(0) +#define VFF_WARM_RST_B BIT(0) +#define VFF_RX_INT_CLR_B (BIT(0) | BIT(1)) +#define VFF_TX_INT_CLR_B 0 +#define VFF_STOP_CLR_B 0 +#define VFF_EN_CLR_B 0 +#define VFF_INT_EN_CLR_B 0 +#define VFF_4G_SUPPORT_CLR_B 0 + +/* + * interrupt trigger level for tx + * if threshold is n, no polling is required to start tx. + * otherwise need polling VFF_FLUSH. + */ +#define VFF_TX_THRE(n) (n) +/* interrupt trigger level for rx */ +#define VFF_RX_THRE(n) ((n) * 3 / 4) + +#define VFF_RING_SIZE 0xffff +/* invert this bit when wrap ring head again */ +#define VFF_RING_WRAP 0x10000 + +#define VFF_INT_FLAG 0x00 +#define VFF_INT_EN 0x04 +#define VFF_EN 0x08 +#define VFF_RST 0x0c +#define VFF_STOP 0x10 +#define VFF_FLUSH 0x14 +#define VFF_ADDR 0x1c +#define VFF_LEN 0x24 +#define VFF_THRE 0x28 +#define VFF_WPT 0x2c +#define VFF_RPT 0x30 +/* TX: the buffer size HW can read. RX: the buffer size SW can read. */ +#define VFF_VALID_SIZE 0x3c +/* TX: the buffer size SW can write. RX: the buffer size HW can write. */ +#define VFF_LEFT_SIZE 0x40 +#define VFF_DEBUG_STATUS 0x50 +#define VFF_4G_SUPPORT 0x54 + +struct mtk_uart_apdmadev { + struct dma_device ddev; + struct clk *clk; + bool support_33bits; + unsigned int dma_requests; +}; + +struct mtk_uart_apdma_desc { + struct virt_dma_desc vd; + + dma_addr_t addr; + unsigned int avail_len; +}; + +struct mtk_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct mtk_uart_apdma_desc *desc; + enum dma_transfer_direction dir; + + void __iomem *base; + unsigned int irq; + + unsigned int rx_status; +}; + +static inline struct mtk_uart_apdmadev * +to_mtk_uart_apdma_dev(struct dma_device *d) +{ + return container_of(d, struct mtk_uart_apdmadev, ddev); +} + +static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) +{ + return container_of(c, struct mtk_chan, vc.chan); +} + +static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc + (struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct mtk_uart_apdma_desc, vd.tx); +} + +static void mtk_uart_apdma_write(struct mtk_chan *c, + unsigned int reg, unsigned int val) +{ + writel(val, c->base + reg); +} + +static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) +{ + return readl(c->base + reg); +} + +static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) +{ + struct dma_chan *chan = vd->tx.chan; + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + kfree(c->desc); +} + +static void mtk_uart_apdma_start_tx(struct mtk_chan *c) +{ + struct mtk_uart_apdmadev *mtkd = + to_mtk_uart_apdma_dev(c->vc.chan.device); + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int wpt, vff_sz; + + vff_sz = c->cfg.dst_port_window_size; + if (!mtk_uart_apdma_read(c, VFF_LEN)) { + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); + mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); + mtk_uart_apdma_write(c, VFF_WPT, 0); + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + } + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) + dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); + + if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); + return; + } + + wpt = mtk_uart_apdma_read(c, VFF_WPT); + + wpt += c->desc->avail_len; + if ((wpt & VFF_RING_SIZE) == vff_sz) + wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP; + + /* Let DMA start moving data */ + mtk_uart_apdma_write(c, VFF_WPT, wpt); + + /* HW auto set to 0 when left size >= threshold */ + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); + if (!mtk_uart_apdma_read(c, VFF_FLUSH)) + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); +} + +static void mtk_uart_apdma_start_rx(struct mtk_chan *c) +{ + struct mtk_uart_apdmadev *mtkd = + to_mtk_uart_apdma_dev(c->vc.chan.device); + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int vff_sz; + + vff_sz = c->cfg.src_port_window_size; + if (!mtk_uart_apdma_read(c, VFF_LEN)) { + mtk_uart_apdma_write(c, VFF_ADDR, d->addr); + mtk_uart_apdma_write(c, VFF_LEN, vff_sz); + mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); + mtk_uart_apdma_write(c, VFF_RPT, 0); + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); + } + + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); + if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) + dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); +} + +static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) +{ + struct mtk_uart_apdma_desc *d = c->desc; + + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + + list_del(&d->vd.node); + vchan_cookie_complete(&d->vd); +} + +static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) +{ + struct mtk_uart_apdma_desc *d = c->desc; + unsigned int len, wg, rg; + int cnt; + + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + + if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) + return; + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + len = c->cfg.src_port_window_size; + rg = mtk_uart_apdma_read(c, VFF_RPT); + wg = mtk_uart_apdma_read(c, VFF_WPT); + cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE); + + /* + * The buffer is ring buffer. If wrap bit different, + * represents the start of the next cycle for WPT + */ + if ((rg ^ wg) & VFF_RING_WRAP) + cnt += len; + + c->rx_status = d->avail_len - cnt; + mtk_uart_apdma_write(c, VFF_RPT, wg); + + list_del(&d->vd.node); + vchan_cookie_complete(&d->vd); +} + +static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) +{ + struct dma_chan *chan = (struct dma_chan *)dev_id; + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_rx_handler(c); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_tx_handler(c); + spin_unlock_irqrestore(&c->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned int status; + int ret; + + ret = pm_runtime_get_sync(mtkd->ddev.dev); + if (ret < 0) { + pm_runtime_put_noidle(chan->device->dev); + return ret; + } + + mtk_uart_apdma_write(c, VFF_ADDR, 0); + mtk_uart_apdma_write(c, VFF_THRE, 0); + mtk_uart_apdma_write(c, VFF_LEN, 0); + mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); + + ret = readx_poll_timeout(readl, c->base + VFF_EN, + status, !status, 10, 100); + if (ret) + return ret; + + ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, + IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); + if (ret < 0) { + dev_err(chan->device->dev, "Can't request dma IRQ\n"); + return -EINVAL; + } + + if (mtkd->support_33bits) + mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); + + return ret; +} + +static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan) +{ + struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + free_irq(c->irq, chan); + + tasklet_kill(&c->vc.task); + + vchan_free_chan_resources(&c->vc); + + pm_runtime_put_sync(mtkd->ddev.dev); +} + +static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (!txstate) + return ret; + + dma_set_residue(txstate, c->rx_status); + + return ret; +} + +/* + * dmaengine_prep_slave_single will call the function. and sglen is 1. + * 8250 uart using one ring buffer, and deal with one sg. + */ +static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg + (struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + struct mtk_uart_apdma_desc *d; + + if (!is_slave_direction(dir) || sglen != 1) + return NULL; + + /* Now allocate and setup the descriptor */ + d = kzalloc(sizeof(*d), GFP_ATOMIC); + if (!d) + return NULL; + + d->avail_len = sg_dma_len(sgl); + d->addr = sg_dma_address(sgl); + c->dir = dir; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + vd = vchan_next_desc(&c->vc); + c->desc = to_mtk_uart_apdma_desc(&vd->tx); + + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_start_rx(c); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_start_tx(c); + } + + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static int mtk_uart_apdma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + + memcpy(&c->cfg, config, sizeof(*config)); + + return 0; +} + +static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + unsigned int status; + LIST_HEAD(head); + int ret; + + mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); + + ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, + status, status != VFF_FLUSH_B, 10, 100); + if (ret) + dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); + + /* + * Stop need 3 steps. + * 1. set stop to 1 + * 2. wait en to 0 + * 3. set stop as 0 + */ + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); + ret = readx_poll_timeout(readl, c->base + VFF_EN, + status, !status, 10, 100); + if (ret) + dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", + mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); + + mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + if (c->dir == DMA_DEV_TO_MEM) + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); + else if (c->dir == DMA_MEM_TO_DEV) + mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); + + synchronize_irq(c->irq); + + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + vchan_dma_desc_free_list(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + + return 0; +} + +static int mtk_uart_apdma_device_pause(struct dma_chan *chan) +{ + struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + + mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); + mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); + + synchronize_irq(c->irq); + + spin_unlock_irqrestore(&c->vc.lock, flags); + + return 0; +} + +static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) +{ + while (!list_empty(&mtkd->ddev.channels)) { + struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, + struct mtk_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + } +} + +static const struct of_device_id mtk_uart_apdma_match[] = { + { .compatible = "mediatek,mt6577-uart-dma", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); + +static int mtk_uart_apdma_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mtk_uart_apdmadev *mtkd; + int bit_mask = 32, rc; + struct resource *res; + struct mtk_chan *c; + unsigned int i; + + mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); + if (!mtkd) + return -ENOMEM; + + mtkd->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mtkd->clk)) { + dev_err(&pdev->dev, "No clock specified\n"); + rc = PTR_ERR(mtkd->clk); + return rc; + } + + if (of_property_read_bool(np, "mediatek,dma-33bits")) + mtkd->support_33bits = true; + + if (mtkd->support_33bits) + bit_mask = 33; + + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); + if (rc) + return rc; + + dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask); + mtkd->ddev.device_alloc_chan_resources = + mtk_uart_apdma_alloc_chan_resources; + mtkd->ddev.device_free_chan_resources = + mtk_uart_apdma_free_chan_resources; + mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status; + mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending; + mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg; + mtkd->ddev.device_config = mtk_uart_apdma_slave_config; + mtkd->ddev.device_pause = mtk_uart_apdma_device_pause; + mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all; + mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); + mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); + mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + mtkd->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&mtkd->ddev.channels); + + mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS; + if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) { + dev_info(&pdev->dev, + "Using %u as missing dma-requests property\n", + MTK_UART_APDMA_NR_VCHANS); + } + + for (i = 0; i < mtkd->dma_requests; i++) { + c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); + if (!c) { + rc = -ENODEV; + goto err_no_dma; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) { + rc = -ENODEV; + goto err_no_dma; + } + + c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(c->base)) { + rc = PTR_ERR(c->base); + goto err_no_dma; + } + c->vc.desc_free = mtk_uart_apdma_desc_free; + vchan_init(&c->vc, &mtkd->ddev); + + rc = platform_get_irq(pdev, i); + if (rc < 0) { + dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i); + goto err_no_dma; + } + c->irq = rc; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + + rc = dma_async_device_register(&mtkd->ddev); + if (rc) + goto rpm_disable; + + platform_set_drvdata(pdev, mtkd); + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd); + if (rc) + goto dma_remove; + + return rc; + +dma_remove: + dma_async_device_unregister(&mtkd->ddev); +rpm_disable: + pm_runtime_disable(&pdev->dev); +err_no_dma: + mtk_uart_apdma_free(mtkd); + return rc; +} + +static int mtk_uart_apdma_remove(struct platform_device *pdev) +{ + struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + + mtk_uart_apdma_free(mtkd); + + dma_async_device_unregister(&mtkd->ddev); + + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_uart_apdma_suspend(struct device *dev) +{ + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + if (!pm_runtime_suspended(dev)) + clk_disable_unprepare(mtkd->clk); + + return 0; +} + +static int mtk_uart_apdma_resume(struct device *dev) +{ + int ret; + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + if (!pm_runtime_suspended(dev)) { + ret = clk_prepare_enable(mtkd->clk); + if (ret) + return ret; + } + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int mtk_uart_apdma_runtime_suspend(struct device *dev) +{ + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + clk_disable_unprepare(mtkd->clk); + + return 0; +} + +static int mtk_uart_apdma_runtime_resume(struct device *dev) +{ + int ret; + struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); + + ret = clk_prepare_enable(mtkd->clk); + if (ret) + return ret; + + return 0; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume) + SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend, + mtk_uart_apdma_runtime_resume, NULL) +}; + +static struct platform_driver mtk_uart_apdma_driver = { + .probe = mtk_uart_apdma_probe, + .remove = mtk_uart_apdma_remove, + .driver = { + .name = KBUILD_MODNAME, + .pm = &mtk_uart_apdma_pm_ops, + .of_match_table = of_match_ptr(mtk_uart_apdma_match), + }, +}; + +module_platform_driver(mtk_uart_apdma_driver); + +MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver"); +MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 730a18d0c6d6..fea8608a7810 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -717,10 +717,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) if (mic_dma_dbg) { mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), mic_dma_dbg); - if (mic_dma_dev->dbg_dir) - debugfs_create_file("mic_dma_reg", 0444, - mic_dma_dev->dbg_dir, mic_dma_dev, - &mic_dma_reg_fops); + debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, + mic_dma_dev, &mic_dma_reg_fops); } return 0; } diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index bb3ccbf90a31..e7d1e12bf464 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -582,18 +582,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, } struct mmp_tdma_filter_param { - struct device_node *of_node; unsigned int chan_id; }; static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) { struct mmp_tdma_filter_param *param = fn_param; - struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); - struct dma_device *pdma_device = tdmac->chan.device; - - if (pdma_device->dev->of_node != param->of_node) - return false; if (chan->chan_id != param->chan_id) return false; @@ -611,13 +605,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 1) return NULL; - param.of_node = ofdma->of_node; param.chan_id = dma_spec->args[0]; if (param.chan_id >= TDMA_CHANNEL_NUM) return NULL; - return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); + return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m, + ofdma->of_node); } static const struct of_device_id mmp_tdma_dt_ids[] = { diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 20a9cb7cb6d3..3039bba0e4d5 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -719,7 +719,6 @@ err_out: } struct mxs_dma_filter_param { - struct device_node *of_node; unsigned int chan_id; }; @@ -730,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_irq; - if (mxs_dma->dma_device.dev->of_node != param->of_node) - return false; - if (chan->chan_id != param->chan_id) return false; @@ -755,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, if (dma_spec->args_count != 1) return NULL; - param.of_node = ofdma->of_node; param.chan_id = dma_spec->args[0]; if (param.chan_id >= mxs_dma->nr_channels) return NULL; - return dma_request_channel(mask, mxs_dma_filter_fn, ¶m); + return __dma_request_channel(&mask, mxs_dma_filter_fn, ¶m, + ofdma->of_node); } static int __init mxs_dma_probe(struct platform_device *pdev) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 1e4d9ef2aea1..c2d779daa4b5 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -313,8 +313,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, if (count != 1) return NULL; - return dma_request_channel(info->dma_cap, info->filter_fn, - &dma_spec->args[0]); + return __dma_request_channel(&info->dma_cap, info->filter_fn, + &dma_spec->args[0], dma_spec->np); } EXPORT_SYMBOL_GPL(of_dma_simple_xlate); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 56f9fabc99c4..1163af2ba4a3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/bug.h> +#include <linux/reset.h> #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -496,6 +497,9 @@ struct pl330_dmac { unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ int quirks; + + struct reset_control *rstc; + struct reset_control *rstc_ocp; }; static struct pl330_of_quirks { @@ -3024,6 +3028,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) amba_set_drvdata(adev, pl330); + pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); + if (IS_ERR(pl330->rstc)) { + if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER) + dev_err(&adev->dev, "Failed to get reset!\n"); + return PTR_ERR(pl330->rstc); + } else { + ret = reset_control_deassert(pl330->rstc); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); + return ret; + } + } + + pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); + if (IS_ERR(pl330->rstc_ocp)) { + if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER) + dev_err(&adev->dev, "Failed to get OCP reset!\n"); + return PTR_ERR(pl330->rstc_ocp); + } else { + ret = reset_control_deassert(pl330->rstc_ocp); + if (ret) { + dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); + return ret; + } + } + for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; if (irq) { @@ -3164,6 +3194,11 @@ probe_err3: probe_err2: pl330_del(pl330); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); return ret; } @@ -3202,6 +3237,11 @@ static int pl330_remove(struct amba_device *adev) pl330_del(pl330); + if (pl330->rstc_ocp) + reset_control_assert(pl330->rstc_ocp); + + if (pl330->rstc) + reset_control_assert(pl330->rstc); return 0; } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 468c234cb3be..349fb312c872 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -129,7 +129,6 @@ struct pxad_device { spinlock_t phy_lock; /* Phy association */ #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_root; - struct dentry *dbgfs_state; struct dentry **dbgfs_chan; #endif }; @@ -323,31 +322,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, int ch, struct dentry *chandir) { char chan_name[11]; - struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; - struct dentry *chan_reqs = NULL; + struct dentry *chan; void *dt; scnprintf(chan_name, sizeof(chan_name), "%d", ch); chan = debugfs_create_dir(chan_name, chandir); dt = (void *)&pdev->phys[ch]; - if (chan) - chan_state = debugfs_create_file("state", 0400, chan, dt, - &chan_state_fops); - if (chan_state) - chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, - &descriptors_fops); - if (chan_descr) - chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, - &requester_chan_fops); - if (!chan_reqs) - goto err_state; + debugfs_create_file("state", 0400, chan, dt, &chan_state_fops); + debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops); + debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops); return chan; - -err_state: - debugfs_remove_recursive(chan); - return NULL; } static void pxad_init_debugfs(struct pxad_device *pdev) @@ -355,40 +341,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev) int i; struct dentry *chandir; - pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); - if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) - goto err_root; - - pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, - pdev, &state_fops); - if (!pdev->dbgfs_state) - goto err_state; - pdev->dbgfs_chan = - kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), + kmalloc_array(pdev->nr_chans, sizeof(struct dentry *), GFP_KERNEL); if (!pdev->dbgfs_chan) - goto err_alloc; + return; + + pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); + + debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops); chandir = debugfs_create_dir("channels", pdev->dbgfs_root); - if (!chandir) - goto err_chandir; - for (i = 0; i < pdev->nr_chans; i++) { + for (i = 0; i < pdev->nr_chans; i++) pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); - if (!pdev->dbgfs_chan[i]) - goto err_chans; - } - - return; -err_chans: -err_chandir: - kfree(pdev->dbgfs_chan); -err_alloc: -err_state: - debugfs_remove_recursive(pdev->dbgfs_root); -err_root: - pr_err("pxad: debugfs is not available\n"); } static void pxad_cleanup_debugfs(struct pxad_device *pdev) diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index f337e2789ddc..f212466744f3 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h @@ -93,8 +93,6 @@ struct hidma_chan { * It is used by the DMA complete notification to * locate the descriptor that initiated the transfer. */ - struct dentry *debugfs; - struct dentry *stats; struct hidma_dev *dmadev; struct hidma_desc *running; @@ -126,7 +124,6 @@ struct hidma_dev { struct dma_device ddev; struct dentry *debugfs; - struct dentry *stats; /* sysfs entry for the channel id */ struct device_attribute *chid_attrs; @@ -158,6 +155,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg); irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, u8 err_code); -int hidma_debug_init(struct hidma_dev *dmadev); +void hidma_debug_init(struct hidma_dev *dmadev); void hidma_debug_uninit(struct hidma_dev *dmadev); #endif diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 75b0691a670d..ce87c7937a0e 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c @@ -138,17 +138,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev) debugfs_remove_recursive(dmadev->debugfs); } -int hidma_debug_init(struct hidma_dev *dmadev) +void hidma_debug_init(struct hidma_dev *dmadev) { - int rc = 0; int chidx = 0; struct list_head *position = NULL; + struct dentry *dir; dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); - if (!dmadev->debugfs) { - rc = -ENODEV; - return rc; - } /* walk through the virtual channel list */ list_for_each(position, &dmadev->ddev.channels) { @@ -157,32 +153,13 @@ int hidma_debug_init(struct hidma_dev *dmadev) chan = list_entry(position, struct hidma_chan, chan.device_node); sprintf(chan->dbg_name, "chan%d", chidx); - chan->debugfs = debugfs_create_dir(chan->dbg_name, + dir = debugfs_create_dir(chan->dbg_name, dmadev->debugfs); - if (!chan->debugfs) { - rc = -ENOMEM; - goto cleanup; - } - chan->stats = debugfs_create_file("stats", S_IRUGO, - chan->debugfs, chan, - &hidma_chan_fops); - if (!chan->stats) { - rc = -ENOMEM; - goto cleanup; - } + debugfs_create_file("stats", S_IRUGO, dir, chan, + &hidma_chan_fops); chidx++; } - dmadev->stats = debugfs_create_file("stats", S_IRUGO, - dmadev->debugfs, dmadev, - &hidma_dma_fops); - if (!dmadev->stats) { - rc = -ENOMEM; - goto cleanup; - } - - return 0; -cleanup: - hidma_debug_uninit(dmadev); - return rc; + debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev, + &hidma_dma_fops); } diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 4d6b02b3b1f1..54d5d0369d3c 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -47,9 +47,3 @@ config RENESAS_USB_DMAC help This driver supports the USB-DMA controller found in the Renesas SoCs. - -config SUDMAC - tristate "Renesas SUDMAC support" - depends on SH_DMAE_BASE - help - Enable support for the Renesas SUDMAC controllers. diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 42110dd57a56..112fbd22bb3f 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o -obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 33ab1b607e2b..9c41a4e42575 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); /* Someone calling slave DMA on a generic channel? */ - if (rchan->mid_rid < 0 || !sg_len) { + if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { dev_warn(chan->device->dev, "%s: bad parameter: len=%d, id=%d\n", __func__, sg_len, rchan->mid_rid); @@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) * Forcing it to call dma_request_channel() and iterate through all * channels from all controllers is just pointless. */ - if (chan->device->device_config != rcar_dmac_device_config || - dma_spec->np != chan->device->dev->of_node) + if (chan->device->device_config != rcar_dmac_device_config) return false; return !test_and_set_bit(dma_spec->args[0], dmac->modules); @@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); + chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, + ofdma->of_node); if (!chan) return NULL; diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c deleted file mode 100644 index 30cc3553cb8b..000000000000 --- a/drivers/dma/sh/sudmac.c +++ /dev/null @@ -1,414 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Renesas SUDMAC support - * - * Copyright (C) 2013 Renesas Solutions Corp. - * - * based on drivers/dma/sh/shdma.c: - * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> - * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - */ - -#include <linux/dmaengine.h> -#include <linux/err.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/sudmac.h> - -struct sudmac_chan { - struct shdma_chan shdma_chan; - void __iomem *base; - char dev_id[16]; /* unique name per DMAC of channel */ - - u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ - u32 cfg; - u32 dint_end_bit; -}; - -struct sudmac_device { - struct shdma_dev shdma_dev; - struct sudmac_pdata *pdata; - void __iomem *chan_reg; -}; - -struct sudmac_regs { - u32 base_addr; - u32 base_byte_count; -}; - -struct sudmac_desc { - struct sudmac_regs hw; - struct shdma_desc shdma_desc; -}; - -#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) -#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) -#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ - struct sudmac_device, shdma_dev.dma_dev) - -/* SUDMAC register */ -#define SUDMAC_CH0CFG 0x00 -#define SUDMAC_CH0BA 0x10 -#define SUDMAC_CH0BBC 0x18 -#define SUDMAC_CH0CA 0x20 -#define SUDMAC_CH0CBC 0x28 -#define SUDMAC_CH0DEN 0x30 -#define SUDMAC_DSTSCLR 0x38 -#define SUDMAC_DBUFCTRL 0x3C -#define SUDMAC_DINTCTRL 0x40 -#define SUDMAC_DINTSTS 0x44 -#define SUDMAC_DINTSTSCLR 0x48 -#define SUDMAC_CH0SHCTRL 0x50 - -/* Definitions for the sudmac_channel.config */ -#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ -#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ -#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ - -/* Definitions for the sudmac_channel.dint_end_bit */ -#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ -#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ - -#define SUDMAC_DRV_NAME "sudmac" - -static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) -{ - iowrite32(data, sc->base + reg); -} - -static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) -{ - return ioread32(sc->base + reg); -} - -static bool sudmac_is_busy(struct sudmac_chan *sc) -{ - u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); - - if (den) - return true; /* working */ - - return false; /* waiting */ -} - -static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, - struct shdma_desc *sdesc) -{ - sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); - sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); - sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); -} - -static void sudmac_start(struct sudmac_chan *sc) -{ - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); - - sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); - sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); -} - -static void sudmac_start_xfer(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - - sudmac_set_reg(sc, &sd->hw, sdesc); - sudmac_start(sc); -} - -static bool sudmac_channel_busy(struct shdma_chan *schan) -{ - struct sudmac_chan *sc = to_chan(schan); - - return sudmac_is_busy(sc); -} - -static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) -{ -} - -static const struct sudmac_slave_config *sudmac_find_slave( - struct sudmac_chan *sc, int slave_id) -{ - struct sudmac_device *sdev = to_sdev(sc); - struct sudmac_pdata *pdata = sdev->pdata; - const struct sudmac_slave_config *cfg; - int i; - - for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) - if (cfg->slave_id == slave_id) - return cfg; - - return NULL; -} - -static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, - dma_addr_t slave_addr, bool try) -{ - struct sudmac_chan *sc = to_chan(schan); - const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); - - if (!cfg) - return -ENODEV; - - return 0; -} - -static inline void sudmac_dma_halt(struct sudmac_chan *sc) -{ - u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); - - sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); - sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); - sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); -} - -static int sudmac_desc_setup(struct shdma_chan *schan, - struct shdma_desc *sdesc, - dma_addr_t src, dma_addr_t dst, size_t *len) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - - dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", - __func__, &src, &dst, *len); - - if (*len > schan->max_xfer_len) - *len = schan->max_xfer_len; - - if (dst) - sd->hw.base_addr = dst; - else if (src) - sd->hw.base_addr = src; - sd->hw.base_byte_count = *len; - - return 0; -} - -static void sudmac_halt(struct shdma_chan *schan) -{ - struct sudmac_chan *sc = to_chan(schan); - - sudmac_dma_halt(sc); -} - -static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) -{ - struct sudmac_chan *sc = to_chan(schan); - u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); - - if (!(dintsts & sc->dint_end_bit)) - return false; - - /* DMA stop */ - sudmac_dma_halt(sc); - - return true; -} - -static size_t sudmac_get_partial(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); - - return sd->hw.base_byte_count - current_byte_count; -} - -static bool sudmac_desc_completed(struct shdma_chan *schan, - struct shdma_desc *sdesc) -{ - struct sudmac_chan *sc = to_chan(schan); - struct sudmac_desc *sd = to_desc(sdesc); - u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); - - return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; -} - -static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, - unsigned long flags) -{ - struct shdma_dev *sdev = &su_dev->shdma_dev; - struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); - struct sudmac_chan *sc; - struct shdma_chan *schan; - int err; - - sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); - if (!sc) - return -ENOMEM; - - schan = &sc->shdma_chan; - schan->max_xfer_len = 64 * 1024 * 1024 - 1; - - shdma_chan_probe(sdev, schan, id); - - sc->base = su_dev->chan_reg; - - /* get platform_data */ - sc->offset = su_dev->pdata->channel->offset; - if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) - sc->cfg |= SUDMAC_SENDBUFM; - if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) - sc->cfg |= SUDMAC_RCVENDM; - sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; - - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) - sc->dint_end_bit |= SUDMAC_CH0ENDE; - if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) - sc->dint_end_bit |= SUDMAC_CH1ENDE; - - /* set up channel irq */ - if (pdev->id >= 0) - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", - pdev->id, id); - else - snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); - - err = shdma_request_irq(schan, irq, flags, sc->dev_id); - if (err) { - dev_err(sdev->dma_dev.dev, - "DMA channel %d request_irq failed %d\n", id, err); - goto err_no_irq; - } - - return 0; - -err_no_irq: - /* remove from dmaengine device node */ - shdma_chan_remove(schan); - return err; -} - -static void sudmac_chan_remove(struct sudmac_device *su_dev) -{ - struct shdma_chan *schan; - int i; - - shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { - BUG_ON(!schan); - - shdma_chan_remove(schan); - } -} - -static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) -{ - /* SUDMAC doesn't need the address */ - return 0; -} - -static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) -{ - return &((struct sudmac_desc *)buf)[i].shdma_desc; -} - -static const struct shdma_ops sudmac_shdma_ops = { - .desc_completed = sudmac_desc_completed, - .halt_channel = sudmac_halt, - .channel_busy = sudmac_channel_busy, - .slave_addr = sudmac_slave_addr, - .desc_setup = sudmac_desc_setup, - .set_slave = sudmac_set_slave, - .setup_xfer = sudmac_setup_xfer, - .start_xfer = sudmac_start_xfer, - .embedded_desc = sudmac_embedded_desc, - .chan_irq = sudmac_chan_irq, - .get_partial = sudmac_get_partial, -}; - -static int sudmac_probe(struct platform_device *pdev) -{ - struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); - int err, i; - struct sudmac_device *su_dev; - struct dma_device *dma_dev; - struct resource *chan, *irq_res; - - /* get platform data */ - if (!pdata) - return -ENODEV; - - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!irq_res) - return -ENODEV; - - err = -ENOMEM; - su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), - GFP_KERNEL); - if (!su_dev) - return err; - - dma_dev = &su_dev->shdma_dev.dma_dev; - - chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); - su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); - if (IS_ERR(su_dev->chan_reg)) - return PTR_ERR(su_dev->chan_reg); - - dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); - - su_dev->shdma_dev.ops = &sudmac_shdma_ops; - su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); - err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); - if (err < 0) - return err; - - /* platform data */ - su_dev->pdata = dev_get_platdata(&pdev->dev); - - platform_set_drvdata(pdev, su_dev); - - /* Create DMA Channel */ - for (i = 0; i < pdata->channel_num; i++) { - err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); - if (err) - goto chan_probe_err; - } - - err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); - if (err < 0) - goto chan_probe_err; - - return err; - -chan_probe_err: - sudmac_chan_remove(su_dev); - - shdma_cleanup(&su_dev->shdma_dev); - - return err; -} - -static int sudmac_remove(struct platform_device *pdev) -{ - struct sudmac_device *su_dev = platform_get_drvdata(pdev); - struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; - - dma_async_device_unregister(dma_dev); - sudmac_chan_remove(su_dev); - shdma_cleanup(&su_dev->shdma_dev); - - return 0; -} - -static struct platform_driver sudmac_driver = { - .driver = { - .name = SUDMAC_DRV_NAME, - }, - .probe = sudmac_probe, - .remove = sudmac_remove, -}; -module_platform_driver(sudmac_driver); - -MODULE_AUTHOR("Yoshihiro Shimoda"); -MODULE_DESCRIPTION("Renesas SUDMAC driver"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 59403f6d008a..17063aaf51bc 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -57,7 +57,7 @@ struct usb_dmac_desc { u32 residue; struct list_head node; dma_cookie_t done_cookie; - struct usb_dmac_sg sg[0]; + struct usb_dmac_sg sg[]; }; #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) @@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); struct of_phandle_args *dma_spec = arg; - if (dma_spec->np != chan->device->dev->of_node) - return false; - /* USB-DMAC should be used with fixed usb controller's FIFO */ if (uchan->index != dma_spec->args[0]) return false; @@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec); + chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec, + ofdma->of_node); if (!chan) return NULL; diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index da41bab98f5b..ef4d109e7189 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1365,7 +1365,6 @@ static int stm32_dma_probe(struct platform_device *pdev) for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { chan = &dmadev->chan[i]; - chan->irq = platform_get_irq(pdev, i); ret = platform_get_irq(pdev, i); if (ret < 0) { if (ret != -EPROBE_DEFER) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index 715aad7a9192..b552949da14b 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -295,8 +295,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) #ifdef CONFIG_PM static int stm32_dmamux_runtime_suspend(struct device *dev) { - struct platform_device *pdev = - container_of(dev, struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); clk_disable_unprepare(stm32_dmamux->clk); @@ -306,8 +305,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev) static int stm32_dmamux_runtime_resume(struct device *dev) { - struct platform_device *pdev = - container_of(dev, struct platform_device, dev); + struct platform_device *pdev = to_platform_device(dev); struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); int ret; diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index e8fcc69b1de9..ed5b68dcfe50 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -64,17 +64,20 @@ #define DMA_CHAN_LLI_ADDR 0x08 #define DMA_CHAN_CUR_CFG 0x0c -#define DMA_CHAN_MAX_DRQ 0x1f -#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) -#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) -#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) +#define DMA_CHAN_MAX_DRQ_A31 0x1f +#define DMA_CHAN_MAX_DRQ_H6 0x3f +#define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) +#define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6) +#define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) +#define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8) #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) -#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) -#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) -#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) +#define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) +#define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16) +#define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) +#define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16) #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) @@ -94,6 +97,8 @@ #define LLI_LAST_ITEM 0xfffff800 #define NORMAL_WAIT 8 #define DRQ_SDRAM 1 +#define LINEAR_MODE 0 +#define IO_MODE 1 /* forward declaration */ struct sun6i_dma_dev; @@ -121,10 +126,13 @@ struct sun6i_dma_config { */ void (*clock_autogate_enable)(struct sun6i_dma_dev *); void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); + void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); + void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode); u32 src_burst_lengths; u32 dst_burst_lengths; u32 src_addr_widths; u32 dst_addr_widths; + bool has_mbus_clk; }; /* @@ -178,6 +186,7 @@ struct sun6i_dma_dev { struct dma_device slave; void __iomem *base; struct clk *clk; + struct clk *clk_mbus; int irq; spinlock_t lock; struct reset_control *rstc; @@ -305,6 +314,30 @@ static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) DMA_CHAN_CFG_DST_BURST_H3(dst_burst); } +static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) | + DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); +} + +static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) | + DMA_CHAN_CFG_DST_DRQ_H6(dst_drq); +} + +static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | + DMA_CHAN_CFG_DST_MODE_A31(dst_mode); +} + +static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode) +{ + *p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) | + DMA_CHAN_CFG_DST_MODE_H6(dst_mode); +} + static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) { struct sun6i_desc *txd = pchan->desc; @@ -628,14 +661,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); - v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_WIDTH(width) | + v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) | DMA_CHAN_CFG_DST_WIDTH(width); sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -687,11 +718,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( if (dir == DMA_MEM_TO_DEV) { v_lli->src = sg_dma_address(sg); v_lli->dst = sconfig->dst_addr; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(vchan->port); + v_lli->cfg = lli_cfg; + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -702,11 +731,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( } else { v_lli->src = sconfig->src_addr; v_lli->dst = sg_dma_address(sg); - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_SRC_DRQ(vchan->port); + v_lli->cfg = lli_cfg; + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", @@ -772,19 +799,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( if (dir == DMA_MEM_TO_DEV) { v_lli->src = buf_addr + period_len * i; v_lli->dst = sconfig->dst_addr; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_IO_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE | - DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_DST_DRQ(vchan->port); + v_lli->cfg = lli_cfg; + sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); + sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); } else { v_lli->src = sconfig->src_addr; v_lli->dst = buf_addr + period_len * i; - v_lli->cfg = lli_cfg | - DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_IO_MODE | - DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | - DMA_CHAN_CFG_SRC_DRQ(vchan->port); + v_lli->cfg = lli_cfg; + sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); + sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); } prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); @@ -1049,6 +1072,8 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { .nr_max_requests = 30, .nr_max_vchans = 53, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1070,6 +1095,8 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { .nr_max_vchans = 37, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1086,6 +1113,8 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { .nr_max_vchans = 39, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1109,6 +1138,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { .nr_max_vchans = 34, .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1128,6 +1159,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { static struct sun6i_dma_config sun50i_a64_dma_cfg = { .clock_autogate_enable = sun6i_enable_clock_autogate_h3, .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1141,6 +1174,28 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { }; /* + * The H6 binding uses the number of dma channels from the + * device tree node. + */ +static struct sun6i_dma_config sun50i_h6_dma_cfg = { + .clock_autogate_enable = sun6i_enable_clock_autogate_h3, + .set_burst_length = sun6i_set_burst_length_h3, + .set_drq = sun6i_set_drq_h6, + .set_mode = sun6i_set_mode_h6, + .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), + .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .has_mbus_clk = true, +}; + +/* * The V3s have only 8 physical channels, a maximum DRQ port id of 23, * and a total of 24 usable source and destination endpoints. */ @@ -1151,6 +1206,8 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { .nr_max_vchans = 24, .clock_autogate_enable = sun6i_enable_clock_autogate_a23, .set_burst_length = sun6i_set_burst_length_a31, + .set_drq = sun6i_set_drq_a31, + .set_mode = sun6i_set_mode_a31, .src_burst_lengths = BIT(1) | BIT(8), .dst_burst_lengths = BIT(1) | BIT(8), .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1168,6 +1225,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, + { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun6i_dma_match); @@ -1204,6 +1262,14 @@ static int sun6i_dma_probe(struct platform_device *pdev) return PTR_ERR(sdc->clk); } + if (sdc->cfg->has_mbus_clk) { + sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus"); + if (IS_ERR(sdc->clk_mbus)) { + dev_err(&pdev->dev, "No mbus clock specified\n"); + return PTR_ERR(sdc->clk_mbus); + } + } + sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(sdc->rstc)) { dev_err(&pdev->dev, "No reset controller specified\n"); @@ -1258,8 +1324,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); if (ret && !sdc->max_request) { dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", - DMA_CHAN_MAX_DRQ); - sdc->max_request = DMA_CHAN_MAX_DRQ; + DMA_CHAN_MAX_DRQ_A31); + sdc->max_request = DMA_CHAN_MAX_DRQ_A31; } /* @@ -1308,11 +1374,19 @@ static int sun6i_dma_probe(struct platform_device *pdev) goto err_reset_assert; } + if (sdc->cfg->has_mbus_clk) { + ret = clk_prepare_enable(sdc->clk_mbus); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable mbus clock\n"); + goto err_clk_disable; + } + } + ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, dev_name(&pdev->dev), sdc); if (ret) { dev_err(&pdev->dev, "Cannot request IRQ\n"); - goto err_clk_disable; + goto err_mbus_clk_disable; } ret = dma_async_device_register(&sdc->slave); @@ -1337,6 +1411,8 @@ err_dma_unregister: dma_async_device_unregister(&sdc->slave); err_irq_disable: sun6i_kill_tasklet(sdc); +err_mbus_clk_disable: + clk_disable_unprepare(sdc->clk_mbus); err_clk_disable: clk_disable_unprepare(sdc->clk); err_reset_assert: @@ -1355,6 +1431,7 @@ static int sun6i_dma_remove(struct platform_device *pdev) sun6i_kill_tasklet(sdc); + clk_disable_unprepare(sdc->clk_mbus); clk_disable_unprepare(sdc->clk); reset_control_assert(sdc->rstc); diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ef317c90fbe1..79e9593815f1 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; @@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; } - if (flags & DMA_PREP_INTERRUPT) + if (flags & DMA_PREP_INTERRUPT) { csr |= TEGRA_APBDMA_CSR_IE_EOC; + } else { + WARN_ON_ONCE(1); + return NULL; + } apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index bb5390847257..ec4adf4260a0 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -98,7 +98,7 @@ static void vchan_complete(unsigned long arg) } spin_unlock_irq(&vc->lock); - dmaengine_desc_callback_invoke(&cb, NULL); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); list_for_each_entry_safe(vd, _vd, &head, node) { dmaengine_desc_get_callback(&vd->tx, &cb); @@ -106,7 +106,7 @@ static void vchan_complete(unsigned long arg) list_del(&vd->node); vchan_vdesc_fini(vd); - dmaengine_desc_callback_invoke(&cb, NULL); + dmaengine_desc_callback_invoke(&cb, &vd->tx_result); } } diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 23342ca23d4a..ab158bac03a7 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -14,6 +14,7 @@ struct virt_dma_desc { struct dma_async_tx_descriptor tx; + struct dmaengine_result tx_result; /* protected by vc.lock */ struct list_head node; }; @@ -62,6 +63,9 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan vd->tx.tx_submit = vchan_tx_submit; vd->tx.desc_free = vchan_tx_desc_free; + vd->tx_result.result = DMA_TRANS_NOERROR; + vd->tx_result.residue = 0; + spin_lock_irqsave(&vc->lock, flags); list_add_tail(&vd->node, &vc->desc_allocated); spin_unlock_irqrestore(&vc->lock, flags); diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 36c092349b5b..e7dc3c4dc8e0 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1095,7 +1095,7 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan) static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_vdma_config *config = &chan->config; - struct xilinx_dma_tx_descriptor *desc, *tail_desc; + struct xilinx_dma_tx_descriptor *desc; u32 reg, j; struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; @@ -1112,8 +1112,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) desc = list_first_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); - tail_desc = list_last_entry(&chan->pending_list, - struct xilinx_dma_tx_descriptor, node); /* Configure the hardware using info in the config structure */ if (chan->has_vflip) { diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 61be15d9df7d..da26a584dca0 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -20,6 +20,7 @@ #define MBOX_CHAN_PROPERTY 8 static struct platform_device *rpi_hwmon; +static struct platform_device *rpi_clk; struct rpi_firmware { struct mbox_client cl; @@ -207,6 +208,12 @@ rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw) -1, NULL, 0); } +static void rpi_register_clk_driver(struct device *dev) +{ + rpi_clk = platform_device_register_data(dev, "raspberrypi-clk", + -1, NULL, 0); +} + static int rpi_firmware_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -234,6 +241,7 @@ static int rpi_firmware_probe(struct platform_device *pdev) rpi_firmware_print_firmware_revision(fw); rpi_register_hwmon_driver(dev, fw); + rpi_register_clk_driver(dev); return 0; } @@ -254,6 +262,8 @@ static int rpi_firmware_remove(struct platform_device *pdev) platform_device_unregister(rpi_hwmon); rpi_hwmon = NULL; + platform_device_unregister(rpi_clk); + rpi_clk = NULL; mbox_free_channel(fw->chan); return 0; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index ef93406ace1b..7696c692ad5a 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -916,7 +916,7 @@ static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, + u32 dev_id, u32 clk_id, u32 flags, u8 state) { struct ti_sci_info *info; @@ -944,7 +944,12 @@ static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } req->request_state = state; ret = ti_sci_do_xfer(info, xfer); @@ -976,7 +981,7 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, + u32 dev_id, u32 clk_id, u8 *programmed_state, u8 *current_state) { struct ti_sci_info *info; @@ -1007,7 +1012,12 @@ static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } ret = ti_sci_do_xfer(info, xfer); if (ret) { @@ -1047,8 +1057,8 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, - u8 clk_id, bool needs_ssc, bool can_change_freq, - bool enable_input_term) + u32 clk_id, bool needs_ssc, + bool can_change_freq, bool enable_input_term) { u32 flags = 0; @@ -1073,7 +1083,7 @@ static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id) + u32 dev_id, u32 clk_id) { return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, MSG_CLOCK_SW_STATE_UNREQ); @@ -1092,7 +1102,7 @@ static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id) + u32 dev_id, u32 clk_id) { return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, MSG_CLOCK_SW_STATE_AUTO); @@ -1110,7 +1120,7 @@ static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, bool *req_state) + u32 dev_id, u32 clk_id, bool *req_state) { u8 state = 0; int ret; @@ -1139,7 +1149,7 @@ static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, - u8 clk_id, bool *req_state, bool *curr_state) + u32 clk_id, bool *req_state, bool *curr_state) { u8 c_state = 0, r_state = 0; int ret; @@ -1172,7 +1182,7 @@ static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, - u8 clk_id, bool *req_state, bool *curr_state) + u32 clk_id, bool *req_state, bool *curr_state) { u8 c_state = 0, r_state = 0; int ret; @@ -1204,7 +1214,7 @@ static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, u8 parent_id) + u32 dev_id, u32 clk_id, u32 parent_id) { struct ti_sci_info *info; struct ti_sci_msg_req_set_clock_parent *req; @@ -1231,8 +1241,18 @@ static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; - req->parent_id = parent_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } + if (parent_id < 255) { + req->parent_id = parent_id; + } else { + req->parent_id = 255; + req->parent_id_32 = parent_id; + } ret = ti_sci_do_xfer(info, xfer); if (ret) { @@ -1262,7 +1282,7 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, u8 *parent_id) + u32 dev_id, u32 clk_id, u32 *parent_id) { struct ti_sci_info *info; struct ti_sci_msg_req_get_clock_parent *req; @@ -1289,7 +1309,12 @@ static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } ret = ti_sci_do_xfer(info, xfer); if (ret) { @@ -1299,10 +1324,14 @@ static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; - if (!ti_sci_is_response_ack(resp)) + if (!ti_sci_is_response_ack(resp)) { ret = -ENODEV; - else - *parent_id = resp->parent_id; + } else { + if (resp->parent_id < 255) + *parent_id = resp->parent_id; + else + *parent_id = resp->parent_id_32; + } fail: ti_sci_put_one_xfer(&info->minfo, xfer); @@ -1322,8 +1351,8 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, - u8 *num_parents) + u32 dev_id, u32 clk_id, + u32 *num_parents) { struct ti_sci_info *info; struct ti_sci_msg_req_get_clock_num_parents *req; @@ -1350,7 +1379,12 @@ static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } ret = ti_sci_do_xfer(info, xfer); if (ret) { @@ -1360,10 +1394,14 @@ static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; - if (!ti_sci_is_response_ack(resp)) + if (!ti_sci_is_response_ack(resp)) { ret = -ENODEV; - else - *num_parents = resp->num_parents; + } else { + if (resp->num_parents < 255) + *num_parents = resp->num_parents; + else + *num_parents = resp->num_parents_32; + } fail: ti_sci_put_one_xfer(&info->minfo, xfer); @@ -1391,7 +1429,7 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, u64 min_freq, + u32 dev_id, u32 clk_id, u64 min_freq, u64 target_freq, u64 max_freq, u64 *match_freq) { @@ -1420,7 +1458,12 @@ static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } req->min_freq_hz = min_freq; req->target_freq_hz = target_freq; req->max_freq_hz = max_freq; @@ -1463,7 +1506,7 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, u64 min_freq, + u32 dev_id, u32 clk_id, u64 min_freq, u64 target_freq, u64 max_freq) { struct ti_sci_info *info; @@ -1491,7 +1534,12 @@ static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } req->min_freq_hz = min_freq; req->target_freq_hz = target_freq; req->max_freq_hz = max_freq; @@ -1524,7 +1572,7 @@ fail: * Return: 0 if all went well, else returns appropriate error value. */ static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, - u32 dev_id, u8 clk_id, u64 *freq) + u32 dev_id, u32 clk_id, u64 *freq) { struct ti_sci_info *info; struct ti_sci_msg_req_get_clock_freq *req; @@ -1551,7 +1599,12 @@ static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, } req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; req->dev_id = dev_id; - req->clk_id = clk_id; + if (clk_id < 255) { + req->clk_id = clk_id; + } else { + req->clk_id = 255; + req->clk_id_32 = clk_id; + } ret = ti_sci_do_xfer(info, xfer); if (ret) { @@ -2349,12 +2402,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, if (!res) return ERR_PTR(-ENOMEM); - res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop, - sizeof(u32)); - if (res->sets < 0) { + ret = of_property_count_elems_of_size(dev_of_node(dev), of_prop, + sizeof(u32)); + if (ret < 0) { dev_err(dev, "%s resource type ids not available\n", of_prop); - return ERR_PTR(res->sets); + return ERR_PTR(ret); } + res->sets = ret; res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc), GFP_KERNEL); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h index adbeeefaca92..414e0ced5409 100644 --- a/drivers/firmware/ti_sci.h +++ b/drivers/firmware/ti_sci.h @@ -202,7 +202,8 @@ struct ti_sci_msg_req_set_device_resets { * @dev_id: Device identifier this request is for * @clk_id: Clock identifier for the device for this request. * Each device has it's own set of clock inputs. This indexes - * which clock input to modify. + * which clock input to modify. Set to 255 if clock ID is + * greater than or equal to 255. * @request_state: Request the state for the clock to be set to. * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock, * it can be disabled, regardless of the state of the device @@ -213,6 +214,9 @@ struct ti_sci_msg_req_set_device_resets { * being required by the device.(default) * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled, * regardless of the state of the device. + * @clk_id_32: Clock identifier for the device for this request. + * Only to be used if the clock ID is greater than or equal to + * 255. * * Normally, all required clocks are managed by TISCI entity, this is used * only for specific control *IF* required. Auto managed state is @@ -234,6 +238,7 @@ struct ti_sci_msg_req_set_clock_state { #define MSG_CLOCK_SW_STATE_AUTO 1 #define MSG_CLOCK_SW_STATE_REQ 2 u8 request_state; + u32 clk_id_32; } __packed; /** @@ -242,7 +247,11 @@ struct ti_sci_msg_req_set_clock_state { * @dev_id: Device identifier this request is for * @clk_id: Clock identifier for the device for this request. * Each device has it's own set of clock inputs. This indexes - * which clock input to get state of. + * which clock input to get state of. Set to 255 if the clock + * ID is greater than or equal to 255. + * @clk_id_32: Clock identifier for the device for the request. + * Only to be used if the clock ID is greater than or equal to + * 255. * * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state * of the clock @@ -251,6 +260,7 @@ struct ti_sci_msg_req_get_clock_state { struct ti_sci_msg_hdr hdr; u32 dev_id; u8 clk_id; + u32 clk_id_32; } __packed; /** @@ -278,9 +288,13 @@ struct ti_sci_msg_resp_get_clock_state { * @dev_id: Device identifier this request is for * @clk_id: Clock identifier for the device for this request. * Each device has it's own set of clock inputs. This indexes - * which clock input to modify. + * which clock input to modify. Set to 255 if clock ID is + * greater than or equal to 255. * @parent_id: The new clock parent is selectable by an index via this - * parameter. + * parameter. Set to 255 if clock ID is greater than or + * equal to 255. + * @clk_id_32: Clock identifier if @clk_id field is 255. + * @parent_id_32: Parent identifier if @parent_id is 255. * * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic * ACK / NACK message. @@ -290,6 +304,8 @@ struct ti_sci_msg_req_set_clock_parent { u32 dev_id; u8 clk_id; u8 parent_id; + u32 clk_id_32; + u32 parent_id_32; } __packed; /** @@ -298,7 +314,10 @@ struct ti_sci_msg_req_set_clock_parent { * @dev_id: Device identifier this request is for * @clk_id: Clock identifier for the device for this request. * Each device has it's own set of clock inputs. This indexes - * which clock input to get the parent for. + * which clock input to get the parent for. If this field + * contains 255, the actual clock identifier is stored in + * @clk_id_32. + * @clk_id_32: Clock identifier if the @clk_id field contains 255. * * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information */ @@ -306,25 +325,32 @@ struct ti_sci_msg_req_get_clock_parent { struct ti_sci_msg_hdr hdr; u32 dev_id; u8 clk_id; + u32 clk_id_32; } __packed; /** * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent * @hdr: Generic Header - * @parent_id: The current clock parent + * @parent_id: The current clock parent. If set to 255, the current parent + * ID can be found from the @parent_id_32 field. + * @parent_id_32: Current clock parent if @parent_id field is set to + * 255. * * Response to TI_SCI_MSG_GET_CLOCK_PARENT. */ struct ti_sci_msg_resp_get_clock_parent { struct ti_sci_msg_hdr hdr; u8 parent_id; + u32 parent_id_32; } __packed; /** * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents * @hdr: Generic header * @dev_id: Device identifier this request is for - * @clk_id: Clock identifier for the device for this request. + * @clk_id: Clock identifier for the device for this request. Set to + * 255 if clock ID is greater than or equal to 255. + * @clk_id_32: Clock identifier if the @clk_id field contains 255. * * This request provides information about how many clock parent options * are available for a given clock to a device. This is typically used @@ -337,18 +363,24 @@ struct ti_sci_msg_req_get_clock_num_parents { struct ti_sci_msg_hdr hdr; u32 dev_id; u8 clk_id; + u32 clk_id_32; } __packed; /** * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents * @hdr: Generic header - * @num_parents: Number of clock parents + * @num_parents: Number of clock parents. If set to 255, the actual + * number of parents is stored into @num_parents_32 + * field instead. + * @num_parents_32: Number of clock parents if @num_parents field is + * set to 255. * * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS */ struct ti_sci_msg_resp_get_clock_num_parents { struct ti_sci_msg_hdr hdr; u8 num_parents; + u32 num_parents_32; } __packed; /** @@ -363,7 +395,9 @@ struct ti_sci_msg_resp_get_clock_num_parents { * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum * allowable programmed frequency and does not account for clock * tolerances and jitter. - * @clk_id: Clock identifier for the device for this request. + * @clk_id: Clock identifier for the device for this request. Set to + * 255 if clock identifier is greater than or equal to 255. + * @clk_id_32: Clock identifier if @clk_id is set to 255. * * NOTE: Normally clock frequency management is automatically done by TISCI * entity. In case of specific requests, TISCI evaluates capability to achieve @@ -380,6 +414,7 @@ struct ti_sci_msg_req_query_clock_freq { u64 target_freq_hz; u64 max_freq_hz; u8 clk_id; + u32 clk_id_32; } __packed; /** @@ -407,7 +442,9 @@ struct ti_sci_msg_resp_query_clock_freq { * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum * allowable programmed frequency and does not account for clock * tolerances and jitter. - * @clk_id: Clock identifier for the device for this request. + * @clk_id: Clock identifier for the device for this request. Set to + * 255 if clock ID is greater than or equal to 255. + * @clk_id_32: Clock identifier if @clk_id field is set to 255. * * NOTE: Normally clock frequency management is automatically done by TISCI * entity. In case of specific requests, TISCI evaluates capability to achieve @@ -436,13 +473,16 @@ struct ti_sci_msg_req_set_clock_freq { u64 target_freq_hz; u64 max_freq_hz; u8 clk_id; + u32 clk_id_32; } __packed; /** * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency * @hdr: Generic Header * @dev_id: Device identifier this request is for - * @clk_id: Clock identifier for the device for this request. + * @clk_id: Clock identifier for the device for this request. Set to + * 255 if clock ID is greater than or equal to 255. + * @clk_id_32: Clock identifier if @clk_id field is set to 255. * * NOTE: Normally clock frequency management is automatically done by TISCI * entity. In some cases, clock frequencies are configured by host. @@ -454,6 +494,7 @@ struct ti_sci_msg_req_get_clock_freq { struct ti_sci_msg_hdr hdr; u32 dev_id; u8 clk_id; + u32 clk_id_32; } __packed; /** diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c index dcd80b088c7b..62f924489db5 100644 --- a/drivers/fpga/dfl-afu-dma-region.c +++ b/drivers/fpga/dfl-afu-dma-region.c @@ -12,6 +12,7 @@ #include <linux/dma-mapping.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> +#include <linux/mm.h> #include "dfl-afu.h" @@ -32,52 +33,6 @@ void afu_dma_region_init(struct dfl_feature_platform_data *pdata) } /** - * afu_dma_adjust_locked_vm - adjust locked memory - * @dev: port device - * @npages: number of pages - * @incr: increase or decrease locked memory - * - * Increase or decrease the locked memory size with npages input. - * - * Return 0 on success. - * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK. - */ -static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) -{ - unsigned long locked, lock_limit; - int ret = 0; - - /* the task is exiting. */ - if (!current->mm) - return 0; - - down_write(¤t->mm->mmap_sem); - - if (incr) { - locked = current->mm->locked_vm + npages; - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - ret = -ENOMEM; - else - current->mm->locked_vm += npages; - } else { - if (WARN_ON_ONCE(npages > current->mm->locked_vm)) - npages = current->mm->locked_vm; - current->mm->locked_vm -= npages; - } - - dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid, - incr ? '+' : '-', npages << PAGE_SHIFT, - current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK), - ret ? "- exceeded" : ""); - - up_write(¤t->mm->mmap_sem); - - return ret; -} - -/** * afu_dma_pin_pages - pin pages of given dma memory region * @pdata: feature device platform data * @region: dma memory region to be pinned @@ -92,7 +47,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata, struct device *dev = &pdata->dev->dev; int ret, pinned; - ret = afu_dma_adjust_locked_vm(dev, npages, true); + ret = account_locked_vm(current->mm, npages, true); if (ret) return ret; @@ -121,7 +76,7 @@ put_pages: free_pages: kfree(region->pages); unlock_vm: - afu_dma_adjust_locked_vm(dev, npages, false); + account_locked_vm(current->mm, npages, false); return ret; } @@ -141,7 +96,7 @@ static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata, put_all_pages(region->pages, npages); kfree(region->pages); - afu_dma_adjust_locked_vm(dev, npages, false); + account_locked_vm(current->mm, npages, false); dev_dbg(dev, "%ld pages unpinned\n", npages); } diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index fc494a84a29d..e0b025689625 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -238,8 +238,9 @@ static int davinci_gpio_probe(struct platform_device *pdev) for (i = 0; i < nirq; i++) { chips->irqs[i] = platform_get_irq(pdev, i); if (chips->irqs[i] < 0) { - dev_info(dev, "IRQ not populated, err = %d\n", - chips->irqs[i]); + if (chips->irqs[i] != -EPROBE_DEFER) + dev_info(dev, "IRQ not populated, err = %d\n", + chips->irqs[i]); return chips->irqs[i]; } } diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index b6af705a4e5f..a87951293aaa 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -259,6 +259,13 @@ static const struct irq_domain_ops em_gio_irq_domain_ops = { .xlate = irq_domain_xlate_twocell, }; +static void em_gio_irq_domain_remove(void *data) +{ + struct irq_domain *domain = data; + + irq_domain_remove(domain); +} + static int em_gio_probe(struct platform_device *pdev) { struct em_gio_priv *p; @@ -333,39 +340,30 @@ static int em_gio_probe(struct platform_device *pdev) return -ENXIO; } + ret = devm_add_action_or_reset(&pdev->dev, em_gio_irq_domain_remove, + p->irq_domain); + if (ret) + return ret; + if (devm_request_irq(&pdev->dev, irq[0]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request low IRQ\n"); - ret = -ENOENT; - goto err1; + return -ENOENT; } if (devm_request_irq(&pdev->dev, irq[1]->start, em_gio_irq_handler, 0, name, p)) { dev_err(&pdev->dev, "failed to request high IRQ\n"); - ret = -ENOENT; - goto err1; + return -ENOENT; } ret = devm_gpiochip_add_data(&pdev->dev, gpio_chip, p); if (ret) { dev_err(&pdev->dev, "failed to add GPIO controller\n"); - goto err1; + return ret; } return 0; - -err1: - irq_domain_remove(p->irq_domain); - return ret; -} - -static int em_gio_remove(struct platform_device *pdev) -{ - struct em_gio_priv *p = platform_get_drvdata(pdev); - - irq_domain_remove(p->irq_domain); - return 0; } static const struct of_device_id em_gio_dt_ids[] = { @@ -376,7 +374,6 @@ MODULE_DEVICE_TABLE(of, em_gio_dt_ids); static struct platform_driver em_gio_device_driver = { .probe = em_gio_probe, - .remove = em_gio_remove, .driver = { .name = "em_gio", .of_match_table = em_gio_dt_ids, diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index f974075ff00e..567fb98c0892 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -118,15 +118,8 @@ static void of_gpio_flags_quirks(struct device_node *np, * Legacy handling of SPI active high chip select. If we have a * property named "cs-gpios" we need to inspect the child node * to determine if the flags should have inverted semantics. - * - * This does not apply to an SPI device named "spi-gpio", because - * these have traditionally obtained their own GPIOs by parsing - * the device tree directly and did not respect any "spi-cs-high" - * property on the SPI bus children. */ - if (IS_ENABLED(CONFIG_SPI_MASTER) && - !strcmp(propname, "cs-gpios") && - !of_device_is_compatible(np, "spi-gpio") && + if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") && of_property_read_bool(np, "cs-gpios")) { struct device_node *child; u32 cs; @@ -161,6 +154,7 @@ static void of_gpio_flags_quirks(struct device_node *np, of_node_full_name(child)); *flags |= OF_GPIO_ACTIVE_LOW; } + of_node_put(child); break; } } diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index 7869c67e5b6b..37740e992cfa 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -9,7 +9,7 @@ menuconfig HWSPINLOCK config HWSPINLOCK_OMAP tristate "OMAP Hardware Spinlock device" depends on HWSPINLOCK - depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX + depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 help Say y here to support the OMAP Hardware Spinlock device (firstly introduced in OMAP4). diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 2bad40d42210..8862445aa858 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -9,6 +9,7 @@ #define pr_fmt(fmt) "%s: " fmt, __func__ +#include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> @@ -23,6 +24,9 @@ #include "hwspinlock_internal.h" +/* retry delay used in atomic context */ +#define HWSPINLOCK_RETRY_DELAY_US 100 + /* radix tree tags */ #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ @@ -68,11 +72,11 @@ static DEFINE_MUTEX(hwspinlock_tree_lock); * user need some time-consuming or sleepable operations under the hardware * lock, they need one sleepable lock (like mutex) to protect the operations. * - * If the mode is not HWLOCK_RAW, upon a successful return from this function, - * preemption (and possibly interrupts) is disabled, so the caller must not - * sleep, and is advised to release the hwspinlock as soon as possible. This is - * required in order to minimize remote cores polling on the hardware - * interconnect. + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful + * return from this function, preemption (and possibly interrupts) is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock as + * soon as possible. This is required in order to minimize remote cores polling + * on the hardware interconnect. * * The user decides whether local interrupts are disabled or not, and if yes, * whether he wants their previous state to be saved. It is up to the user @@ -112,6 +116,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) ret = spin_trylock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: ret = 1; break; default: @@ -136,6 +141,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) spin_unlock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: /* Nothing to do */ break; default: @@ -179,11 +185,14 @@ EXPORT_SYMBOL_GPL(__hwspin_trylock); * user need some time-consuming or sleepable operations under the hardware * lock, they need one sleepable lock (like mutex) to protect the operations. * - * If the mode is not HWLOCK_RAW, upon a successful return from this function, - * preemption is disabled (and possibly local interrupts, too), so the caller - * must not sleep, and is advised to release the hwspinlock as soon as possible. - * This is required in order to minimize remote cores polling on the - * hardware interconnect. + * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout + * is handled with busy-waiting delays, hence shall not exceed few msecs. + * + * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful + * return from this function, preemption (and possibly interrupts) is disabled, + * so the caller must not sleep, and is advised to release the hwspinlock as + * soon as possible. This is required in order to minimize remote cores polling + * on the hardware interconnect. * * The user decides whether local interrupts are disabled or not, and if yes, * whether he wants their previous state to be saved. It is up to the user @@ -198,7 +207,7 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, int mode, unsigned long *flags) { int ret; - unsigned long expire; + unsigned long expire, atomic_delay = 0; expire = msecs_to_jiffies(to) + jiffies; @@ -212,8 +221,15 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, * The lock is already taken, let's check if the user wants * us to try again */ - if (time_is_before_eq_jiffies(expire)) - return -ETIMEDOUT; + if (mode == HWLOCK_IN_ATOMIC) { + udelay(HWSPINLOCK_RETRY_DELAY_US); + atomic_delay += HWSPINLOCK_RETRY_DELAY_US; + if (atomic_delay > to * 1000) + return -ETIMEDOUT; + } else { + if (time_is_before_eq_jiffies(expire)) + return -ETIMEDOUT; + } /* * Allow platform-specific relax handlers to prevent @@ -276,6 +292,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) spin_unlock_irq(&hwlock->lock); break; case HWLOCK_RAW: + case HWLOCK_IN_ATOMIC: /* Nothing to do */ break; default: @@ -333,6 +350,11 @@ int of_hwspin_lock_get_id(struct device_node *np, int index) if (ret) return ret; + if (!of_device_is_available(args.np)) { + ret = -ENOENT; + goto out; + } + /* Find the hwspinlock device: we need its base_id */ ret = -EPROBE_DEFER; rcu_read_lock(); diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c index 625844e0abef..14e1a532ebb5 100644 --- a/drivers/hwspinlock/omap_hwspinlock.c +++ b/drivers/hwspinlock/omap_hwspinlock.c @@ -140,6 +140,9 @@ static int omap_hwspinlock_probe(struct platform_device *pdev) if (ret) goto reg_fail; + dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n", + num_locks); + return 0; reg_fail: @@ -171,6 +174,7 @@ static int omap_hwspinlock_remove(struct platform_device *pdev) static const struct of_device_id omap_hwspinlock_of_match[] = { { .compatible = "ti,omap4-hwspinlock", }, + { .compatible = "ti,am654-hwspinlock", }, { /* end */ }, }; MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match); diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c index 441839288893..c8eacf4f9692 100644 --- a/drivers/hwspinlock/stm32_hwspinlock.c +++ b/drivers/hwspinlock/stm32_hwspinlock.c @@ -5,6 +5,7 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/hwspinlock.h> #include <linux/io.h> #include <linux/kernel.h> @@ -42,9 +43,15 @@ static void stm32_hwspinlock_unlock(struct hwspinlock *lock) writel(STM32_MUTEX_COREID, lock_addr); } +static void stm32_hwspinlock_relax(struct hwspinlock *lock) +{ + ndelay(50); +} + static const struct hwspinlock_ops stm32_hwspinlock_ops = { .trylock = stm32_hwspinlock_trylock, .unlock = stm32_hwspinlock_unlock, + .relax = stm32_hwspinlock_relax, }; static int stm32_hwspinlock_probe(struct platform_device *pdev) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 83664db5221d..e15cdcd8cb3c 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -473,4 +473,15 @@ config HYPERV_IOMMU Stub IOMMU driver to handle IRQs as to allow Hyper-V Linux guests to run with x2APIC mode enabled. +config VIRTIO_IOMMU + bool "Virtio IOMMU driver" + depends on VIRTIO=y + depends on ARM64 + select IOMMU_API + select INTERVAL_TREE + help + Para-virtualised IOMMU driver with virtio. + + Say Y here if you intend to run this kernel as a guest. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 8c71a15e986b..f13f36ae1af6 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -33,3 +33,4 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o +obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c new file mode 100644 index 000000000000..433f4d2ee956 --- /dev/null +++ b/drivers/iommu/virtio-iommu.c @@ -0,0 +1,1158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtio driver for the paravirtualized IOMMU + * + * Copyright (C) 2018 Arm Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/amba/bus.h> +#include <linux/delay.h> +#include <linux/dma-iommu.h> +#include <linux/freezer.h> +#include <linux/interval_tree.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of_iommu.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ids.h> +#include <linux/wait.h> + +#include <uapi/linux/virtio_iommu.h> + +#define MSI_IOVA_BASE 0x8000000 +#define MSI_IOVA_LENGTH 0x100000 + +#define VIOMMU_REQUEST_VQ 0 +#define VIOMMU_EVENT_VQ 1 +#define VIOMMU_NR_VQS 2 + +struct viommu_dev { + struct iommu_device iommu; + struct device *dev; + struct virtio_device *vdev; + + struct ida domain_ids; + + struct virtqueue *vqs[VIOMMU_NR_VQS]; + spinlock_t request_lock; + struct list_head requests; + void *evts; + + /* Device configuration */ + struct iommu_domain_geometry geometry; + u64 pgsize_bitmap; + u8 domain_bits; + u32 probe_size; +}; + +struct viommu_mapping { + phys_addr_t paddr; + struct interval_tree_node iova; + u32 flags; +}; + +struct viommu_domain { + struct iommu_domain domain; + struct viommu_dev *viommu; + struct mutex mutex; /* protects viommu pointer */ + unsigned int id; + + spinlock_t mappings_lock; + struct rb_root_cached mappings; + + unsigned long nr_endpoints; +}; + +struct viommu_endpoint { + struct device *dev; + struct viommu_dev *viommu; + struct viommu_domain *vdomain; + struct list_head resv_regions; +}; + +struct viommu_request { + struct list_head list; + void *writeback; + unsigned int write_offset; + unsigned int len; + char buf[]; +}; + +#define VIOMMU_FAULT_RESV_MASK 0xffffff00 + +struct viommu_event { + union { + u32 head; + struct virtio_iommu_fault fault; + }; +}; + +#define to_viommu_domain(domain) \ + container_of(domain, struct viommu_domain, domain) + +static int viommu_get_req_errno(void *buf, size_t len) +{ + struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); + + switch (tail->status) { + case VIRTIO_IOMMU_S_OK: + return 0; + case VIRTIO_IOMMU_S_UNSUPP: + return -ENOSYS; + case VIRTIO_IOMMU_S_INVAL: + return -EINVAL; + case VIRTIO_IOMMU_S_RANGE: + return -ERANGE; + case VIRTIO_IOMMU_S_NOENT: + return -ENOENT; + case VIRTIO_IOMMU_S_FAULT: + return -EFAULT; + case VIRTIO_IOMMU_S_IOERR: + case VIRTIO_IOMMU_S_DEVERR: + default: + return -EIO; + } +} + +static void viommu_set_req_status(void *buf, size_t len, int status) +{ + struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail); + + tail->status = status; +} + +static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, + struct virtio_iommu_req_head *req, + size_t len) +{ + size_t tail_size = sizeof(struct virtio_iommu_req_tail); + + if (req->type == VIRTIO_IOMMU_T_PROBE) + return len - viommu->probe_size - tail_size; + + return len - tail_size; +} + +/* + * __viommu_sync_req - Complete all in-flight requests + * + * Wait for all added requests to complete. When this function returns, all + * requests that were in-flight at the time of the call have completed. + */ +static int __viommu_sync_req(struct viommu_dev *viommu) +{ + int ret = 0; + unsigned int len; + size_t write_len; + struct viommu_request *req; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; + + assert_spin_locked(&viommu->request_lock); + + virtqueue_kick(vq); + + while (!list_empty(&viommu->requests)) { + len = 0; + req = virtqueue_get_buf(vq, &len); + if (!req) + continue; + + if (!len) + viommu_set_req_status(req->buf, req->len, + VIRTIO_IOMMU_S_IOERR); + + write_len = req->len - req->write_offset; + if (req->writeback && len == write_len) + memcpy(req->writeback, req->buf + req->write_offset, + write_len); + + list_del(&req->list); + kfree(req); + } + + return ret; +} + +static int viommu_sync_req(struct viommu_dev *viommu) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&viommu->request_lock, flags); + ret = __viommu_sync_req(viommu); + if (ret) + dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); + spin_unlock_irqrestore(&viommu->request_lock, flags); + + return ret; +} + +/* + * __viommu_add_request - Add one request to the queue + * @buf: pointer to the request buffer + * @len: length of the request buffer + * @writeback: copy data back to the buffer when the request completes. + * + * Add a request to the queue. Only synchronize the queue if it's already full. + * Otherwise don't kick the queue nor wait for requests to complete. + * + * When @writeback is true, data written by the device, including the request + * status, is copied into @buf after the request completes. This is unsafe if + * the caller allocates @buf on stack and drops the lock between add_req() and + * sync_req(). + * + * Return 0 if the request was successfully added to the queue. + */ +static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, + bool writeback) +{ + int ret; + off_t write_offset; + struct viommu_request *req; + struct scatterlist top_sg, bottom_sg; + struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; + struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; + + assert_spin_locked(&viommu->request_lock); + + write_offset = viommu_get_write_desc_offset(viommu, buf, len); + if (write_offset <= 0) + return -EINVAL; + + req = kzalloc(sizeof(*req) + len, GFP_ATOMIC); + if (!req) + return -ENOMEM; + + req->len = len; + if (writeback) { + req->writeback = buf + write_offset; + req->write_offset = write_offset; + } + memcpy(&req->buf, buf, write_offset); + + sg_init_one(&top_sg, req->buf, write_offset); + sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset); + + ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); + if (ret == -ENOSPC) { + /* If the queue is full, sync and retry */ + if (!__viommu_sync_req(viommu)) + ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); + } + if (ret) + goto err_free; + + list_add_tail(&req->list, &viommu->requests); + return 0; + +err_free: + kfree(req); + return ret; +} + +static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&viommu->request_lock, flags); + ret = __viommu_add_req(viommu, buf, len, false); + if (ret) + dev_dbg(viommu->dev, "could not add request: %d\n", ret); + spin_unlock_irqrestore(&viommu->request_lock, flags); + + return ret; +} + +/* + * Send a request and wait for it to complete. Return the request status (as an + * errno) + */ +static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf, + size_t len) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&viommu->request_lock, flags); + + ret = __viommu_add_req(viommu, buf, len, true); + if (ret) { + dev_dbg(viommu->dev, "could not add request (%d)\n", ret); + goto out_unlock; + } + + ret = __viommu_sync_req(viommu); + if (ret) { + dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); + /* Fall-through (get the actual request status) */ + } + + ret = viommu_get_req_errno(buf, len); +out_unlock: + spin_unlock_irqrestore(&viommu->request_lock, flags); + return ret; +} + +/* + * viommu_add_mapping - add a mapping to the internal tree + * + * On success, return the new mapping. Otherwise return NULL. + */ +static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova, + phys_addr_t paddr, size_t size, u32 flags) +{ + unsigned long irqflags; + struct viommu_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); + if (!mapping) + return -ENOMEM; + + mapping->paddr = paddr; + mapping->iova.start = iova; + mapping->iova.last = iova + size - 1; + mapping->flags = flags; + + spin_lock_irqsave(&vdomain->mappings_lock, irqflags); + interval_tree_insert(&mapping->iova, &vdomain->mappings); + spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags); + + return 0; +} + +/* + * viommu_del_mappings - remove mappings from the internal tree + * + * @vdomain: the domain + * @iova: start of the range + * @size: size of the range. A size of 0 corresponds to the entire address + * space. + * + * On success, returns the number of unmapped bytes (>= size) + */ +static size_t viommu_del_mappings(struct viommu_domain *vdomain, + unsigned long iova, size_t size) +{ + size_t unmapped = 0; + unsigned long flags; + unsigned long last = iova + size - 1; + struct viommu_mapping *mapping = NULL; + struct interval_tree_node *node, *next; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + next = interval_tree_iter_first(&vdomain->mappings, iova, last); + while (next) { + node = next; + mapping = container_of(node, struct viommu_mapping, iova); + next = interval_tree_iter_next(node, iova, last); + + /* Trying to split a mapping? */ + if (mapping->iova.start < iova) + break; + + /* + * Virtio-iommu doesn't allow UNMAP to split a mapping created + * with a single MAP request, so remove the full mapping. + */ + unmapped += mapping->iova.last - mapping->iova.start + 1; + + interval_tree_remove(node, &vdomain->mappings); + kfree(mapping); + } + spin_unlock_irqrestore(&vdomain->mappings_lock, flags); + + return unmapped; +} + +/* + * viommu_replay_mappings - re-send MAP requests + * + * When reattaching a domain that was previously detached from all endpoints, + * mappings were deleted from the device. Re-create the mappings available in + * the internal tree. + */ +static int viommu_replay_mappings(struct viommu_domain *vdomain) +{ + int ret = 0; + unsigned long flags; + struct viommu_mapping *mapping; + struct interval_tree_node *node; + struct virtio_iommu_req_map map; + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL); + while (node) { + mapping = container_of(node, struct viommu_mapping, iova); + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(mapping->iova.start), + .virt_end = cpu_to_le64(mapping->iova.last), + .phys_start = cpu_to_le64(mapping->paddr), + .flags = cpu_to_le32(mapping->flags), + }; + + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); + if (ret) + break; + + node = interval_tree_iter_next(node, 0, -1UL); + } + spin_unlock_irqrestore(&vdomain->mappings_lock, flags); + + return ret; +} + +static int viommu_add_resv_mem(struct viommu_endpoint *vdev, + struct virtio_iommu_probe_resv_mem *mem, + size_t len) +{ + size_t size; + u64 start64, end64; + phys_addr_t start, end; + struct iommu_resv_region *region = NULL; + unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + + start = start64 = le64_to_cpu(mem->start); + end = end64 = le64_to_cpu(mem->end); + size = end64 - start64 + 1; + + /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */ + if (start != start64 || end != end64 || size < end64 - start64) + return -EOVERFLOW; + + if (len < sizeof(*mem)) + return -EINVAL; + + switch (mem->subtype) { + default: + dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", + mem->subtype); + /* Fall-through */ + case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: + region = iommu_alloc_resv_region(start, size, 0, + IOMMU_RESV_RESERVED); + break; + case VIRTIO_IOMMU_RESV_MEM_T_MSI: + region = iommu_alloc_resv_region(start, size, prot, + IOMMU_RESV_MSI); + break; + } + if (!region) + return -ENOMEM; + + list_add(&vdev->resv_regions, ®ion->list); + return 0; +} + +static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) +{ + int ret; + u16 type, len; + size_t cur = 0; + size_t probe_len; + struct virtio_iommu_req_probe *probe; + struct virtio_iommu_probe_property *prop; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct viommu_endpoint *vdev = fwspec->iommu_priv; + + if (!fwspec->num_ids) + return -EINVAL; + + probe_len = sizeof(*probe) + viommu->probe_size + + sizeof(struct virtio_iommu_req_tail); + probe = kzalloc(probe_len, GFP_KERNEL); + if (!probe) + return -ENOMEM; + + probe->head.type = VIRTIO_IOMMU_T_PROBE; + /* + * For now, assume that properties of an endpoint that outputs multiple + * IDs are consistent. Only probe the first one. + */ + probe->endpoint = cpu_to_le32(fwspec->ids[0]); + + ret = viommu_send_req_sync(viommu, probe, probe_len); + if (ret) + goto out_free; + + prop = (void *)probe->properties; + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; + + while (type != VIRTIO_IOMMU_PROBE_T_NONE && + cur < viommu->probe_size) { + len = le16_to_cpu(prop->length) + sizeof(*prop); + + switch (type) { + case VIRTIO_IOMMU_PROBE_T_RESV_MEM: + ret = viommu_add_resv_mem(vdev, (void *)prop, len); + break; + default: + dev_err(dev, "unknown viommu prop 0x%x\n", type); + } + + if (ret) + dev_err(dev, "failed to parse viommu prop 0x%x\n", type); + + cur += len; + if (cur >= viommu->probe_size) + break; + + prop = (void *)probe->properties + cur; + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; + } + +out_free: + kfree(probe); + return ret; +} + +static int viommu_fault_handler(struct viommu_dev *viommu, + struct virtio_iommu_fault *fault) +{ + char *reason_str; + + u8 reason = fault->reason; + u32 flags = le32_to_cpu(fault->flags); + u32 endpoint = le32_to_cpu(fault->endpoint); + u64 address = le64_to_cpu(fault->address); + + switch (reason) { + case VIRTIO_IOMMU_FAULT_R_DOMAIN: + reason_str = "domain"; + break; + case VIRTIO_IOMMU_FAULT_R_MAPPING: + reason_str = "page"; + break; + case VIRTIO_IOMMU_FAULT_R_UNKNOWN: + default: + reason_str = "unknown"; + break; + } + + /* TODO: find EP by ID and report_iommu_fault */ + if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS) + dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n", + reason_str, endpoint, address, + flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "", + flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "", + flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : ""); + else + dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n", + reason_str, endpoint); + return 0; +} + +static void viommu_event_handler(struct virtqueue *vq) +{ + int ret; + unsigned int len; + struct scatterlist sg[1]; + struct viommu_event *evt; + struct viommu_dev *viommu = vq->vdev->priv; + + while ((evt = virtqueue_get_buf(vq, &len)) != NULL) { + if (len > sizeof(*evt)) { + dev_err(viommu->dev, + "invalid event buffer (len %u != %zu)\n", + len, sizeof(*evt)); + } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) { + viommu_fault_handler(viommu, &evt->fault); + } + + sg_init_one(sg, evt, sizeof(*evt)); + ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC); + if (ret) + dev_err(viommu->dev, "could not add event buffer\n"); + } + + virtqueue_kick(vq); +} + +/* IOMMU API */ + +static struct iommu_domain *viommu_domain_alloc(unsigned type) +{ + struct viommu_domain *vdomain; + + if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) + return NULL; + + vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL); + if (!vdomain) + return NULL; + + mutex_init(&vdomain->mutex); + spin_lock_init(&vdomain->mappings_lock); + vdomain->mappings = RB_ROOT_CACHED; + + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&vdomain->domain)) { + kfree(vdomain); + return NULL; + } + + return &vdomain->domain; +} + +static int viommu_domain_finalise(struct viommu_dev *viommu, + struct iommu_domain *domain) +{ + int ret; + struct viommu_domain *vdomain = to_viommu_domain(domain); + unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : + (1U << viommu->domain_bits) - 1; + + vdomain->viommu = viommu; + + domain->pgsize_bitmap = viommu->pgsize_bitmap; + domain->geometry = viommu->geometry; + + ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); + if (ret >= 0) + vdomain->id = (unsigned int)ret; + + return ret > 0 ? 0 : ret; +} + +static void viommu_domain_free(struct iommu_domain *domain) +{ + struct viommu_domain *vdomain = to_viommu_domain(domain); + + iommu_put_dma_cookie(domain); + + /* Free all remaining mappings (size 2^64) */ + viommu_del_mappings(vdomain, 0, 0); + + if (vdomain->viommu) + ida_free(&vdomain->viommu->domain_ids, vdomain->id); + + kfree(vdomain); +} + +static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + int i; + int ret = 0; + struct virtio_iommu_req_attach req; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_domain *vdomain = to_viommu_domain(domain); + + mutex_lock(&vdomain->mutex); + if (!vdomain->viommu) { + /* + * Properly initialize the domain now that we know which viommu + * owns it. + */ + ret = viommu_domain_finalise(vdev->viommu, domain); + } else if (vdomain->viommu != vdev->viommu) { + dev_err(dev, "cannot attach to foreign vIOMMU\n"); + ret = -EXDEV; + } + mutex_unlock(&vdomain->mutex); + + if (ret) + return ret; + + /* + * In the virtio-iommu device, when attaching the endpoint to a new + * domain, it is detached from the old one and, if as as a result the + * old domain isn't attached to any endpoint, all mappings are removed + * from the old domain and it is freed. + * + * In the driver the old domain still exists, and its mappings will be + * recreated if it gets reattached to an endpoint. Otherwise it will be + * freed explicitly. + * + * vdev->vdomain is protected by group->mutex + */ + if (vdev->vdomain) + vdev->vdomain->nr_endpoints--; + + req = (struct virtio_iommu_req_attach) { + .head.type = VIRTIO_IOMMU_T_ATTACH, + .domain = cpu_to_le32(vdomain->id), + }; + + for (i = 0; i < fwspec->num_ids; i++) { + req.endpoint = cpu_to_le32(fwspec->ids[i]); + + ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req)); + if (ret) + return ret; + } + + if (!vdomain->nr_endpoints) { + /* + * This endpoint is the first to be attached to the domain. + * Replay existing mappings (e.g. SW MSI). + */ + ret = viommu_replay_mappings(vdomain); + if (ret) + return ret; + } + + vdomain->nr_endpoints++; + vdev->vdomain = vdomain; + + return 0; +} + +static int viommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot) +{ + int ret; + int flags; + struct virtio_iommu_req_map map; + struct viommu_domain *vdomain = to_viommu_domain(domain); + + flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) | + (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | + (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); + + ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); + if (ret) + return ret; + + map = (struct virtio_iommu_req_map) { + .head.type = VIRTIO_IOMMU_T_MAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(iova), + .phys_start = cpu_to_le64(paddr), + .virt_end = cpu_to_le64(iova + size - 1), + .flags = cpu_to_le32(flags), + }; + + if (!vdomain->nr_endpoints) + return 0; + + ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); + if (ret) + viommu_del_mappings(vdomain, iova, size); + + return ret; +} + +static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size) +{ + int ret = 0; + size_t unmapped; + struct virtio_iommu_req_unmap unmap; + struct viommu_domain *vdomain = to_viommu_domain(domain); + + unmapped = viommu_del_mappings(vdomain, iova, size); + if (unmapped < size) + return 0; + + /* Device already removed all mappings after detach. */ + if (!vdomain->nr_endpoints) + return unmapped; + + unmap = (struct virtio_iommu_req_unmap) { + .head.type = VIRTIO_IOMMU_T_UNMAP, + .domain = cpu_to_le32(vdomain->id), + .virt_start = cpu_to_le64(iova), + .virt_end = cpu_to_le64(iova + unmapped - 1), + }; + + ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap)); + return ret ? 0 : unmapped; +} + +static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t iova) +{ + u64 paddr = 0; + unsigned long flags; + struct viommu_mapping *mapping; + struct interval_tree_node *node; + struct viommu_domain *vdomain = to_viommu_domain(domain); + + spin_lock_irqsave(&vdomain->mappings_lock, flags); + node = interval_tree_iter_first(&vdomain->mappings, iova, iova); + if (node) { + mapping = container_of(node, struct viommu_mapping, iova); + paddr = mapping->paddr + (iova - mapping->iova.start); + } + spin_unlock_irqrestore(&vdomain->mappings_lock, flags); + + return paddr; +} + +static void viommu_iotlb_sync(struct iommu_domain *domain) +{ + struct viommu_domain *vdomain = to_viommu_domain(domain); + + viommu_sync_req(vdomain->viommu); +} + +static void viommu_get_resv_regions(struct device *dev, struct list_head *head) +{ + struct iommu_resv_region *entry, *new_entry, *msi = NULL; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct viommu_endpoint *vdev = fwspec->iommu_priv; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + + list_for_each_entry(entry, &vdev->resv_regions, list) { + if (entry->type == IOMMU_RESV_MSI) + msi = entry; + + new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); + if (!new_entry) + return; + list_add_tail(&new_entry->list, head); + } + + /* + * If the device didn't register any bypass MSI window, add a + * software-mapped region. + */ + if (!msi) { + msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, + prot, IOMMU_RESV_SW_MSI); + if (!msi) + return; + + list_add_tail(&msi->list, head); + } + + iommu_dma_get_resv_regions(dev, head); +} + +static void viommu_put_resv_regions(struct device *dev, struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); +} + +static struct iommu_ops viommu_ops; +static struct virtio_driver virtio_iommu_drv; + +static int viommu_match_node(struct device *dev, const void *data) +{ + return dev->parent->fwnode == data; +} + +static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode) +{ + struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL, + fwnode, viommu_match_node); + put_device(dev); + + return dev ? dev_to_virtio(dev)->priv : NULL; +} + +static int viommu_add_device(struct device *dev) +{ + int ret; + struct iommu_group *group; + struct viommu_endpoint *vdev; + struct viommu_dev *viommu = NULL; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + + if (!fwspec || fwspec->ops != &viommu_ops) + return -ENODEV; + + viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); + if (!viommu) + return -ENODEV; + + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); + if (!vdev) + return -ENOMEM; + + vdev->dev = dev; + vdev->viommu = viommu; + INIT_LIST_HEAD(&vdev->resv_regions); + fwspec->iommu_priv = vdev; + + if (viommu->probe_size) { + /* Get additional information for this endpoint */ + ret = viommu_probe_endpoint(viommu, dev); + if (ret) + goto err_free_dev; + } + + ret = iommu_device_link(&viommu->iommu, dev); + if (ret) + goto err_free_dev; + + /* + * Last step creates a default domain and attaches to it. Everything + * must be ready. + */ + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) { + ret = PTR_ERR(group); + goto err_unlink_dev; + } + + iommu_group_put(group); + + return PTR_ERR_OR_ZERO(group); + +err_unlink_dev: + iommu_device_unlink(&viommu->iommu, dev); +err_free_dev: + viommu_put_resv_regions(dev, &vdev->resv_regions); + kfree(vdev); + + return ret; +} + +static void viommu_remove_device(struct device *dev) +{ + struct viommu_endpoint *vdev; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + + if (!fwspec || fwspec->ops != &viommu_ops) + return; + + vdev = fwspec->iommu_priv; + + iommu_group_remove_device(dev); + iommu_device_unlink(&vdev->viommu->iommu, dev); + viommu_put_resv_regions(dev, &vdev->resv_regions); + kfree(vdev); +} + +static struct iommu_group *viommu_device_group(struct device *dev) +{ + if (dev_is_pci(dev)) + return pci_device_group(dev); + else + return generic_device_group(dev); +} + +static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) +{ + return iommu_fwspec_add_ids(dev, args->args, 1); +} + +static struct iommu_ops viommu_ops = { + .domain_alloc = viommu_domain_alloc, + .domain_free = viommu_domain_free, + .attach_dev = viommu_attach_dev, + .map = viommu_map, + .unmap = viommu_unmap, + .iova_to_phys = viommu_iova_to_phys, + .iotlb_sync = viommu_iotlb_sync, + .add_device = viommu_add_device, + .remove_device = viommu_remove_device, + .device_group = viommu_device_group, + .get_resv_regions = viommu_get_resv_regions, + .put_resv_regions = viommu_put_resv_regions, + .of_xlate = viommu_of_xlate, +}; + +static int viommu_init_vqs(struct viommu_dev *viommu) +{ + struct virtio_device *vdev = dev_to_virtio(viommu->dev); + const char *names[] = { "request", "event" }; + vq_callback_t *callbacks[] = { + NULL, /* No async requests */ + viommu_event_handler, + }; + + return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks, + names, NULL); +} + +static int viommu_fill_evtq(struct viommu_dev *viommu) +{ + int i, ret; + struct scatterlist sg[1]; + struct viommu_event *evts; + struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ]; + size_t nr_evts = vq->num_free; + + viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts, + sizeof(*evts), GFP_KERNEL); + if (!evts) + return -ENOMEM; + + for (i = 0; i < nr_evts; i++) { + sg_init_one(sg, &evts[i], sizeof(*evts)); + ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); + if (ret) + return ret; + } + + return 0; +} + +static int viommu_probe(struct virtio_device *vdev) +{ + struct device *parent_dev = vdev->dev.parent; + struct viommu_dev *viommu = NULL; + struct device *dev = &vdev->dev; + u64 input_start = 0; + u64 input_end = -1UL; + int ret; + + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || + !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP)) + return -ENODEV; + + viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL); + if (!viommu) + return -ENOMEM; + + spin_lock_init(&viommu->request_lock); + ida_init(&viommu->domain_ids); + viommu->dev = dev; + viommu->vdev = vdev; + INIT_LIST_HEAD(&viommu->requests); + + ret = viommu_init_vqs(viommu); + if (ret) + return ret; + + virtio_cread(vdev, struct virtio_iommu_config, page_size_mask, + &viommu->pgsize_bitmap); + + if (!viommu->pgsize_bitmap) { + ret = -EINVAL; + goto err_free_vqs; + } + + viommu->domain_bits = 32; + + /* Optional features */ + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, + struct virtio_iommu_config, input_range.start, + &input_start); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, + struct virtio_iommu_config, input_range.end, + &input_end); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, + struct virtio_iommu_config, domain_bits, + &viommu->domain_bits); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, + struct virtio_iommu_config, probe_size, + &viommu->probe_size); + + viommu->geometry = (struct iommu_domain_geometry) { + .aperture_start = input_start, + .aperture_end = input_end, + .force_aperture = true, + }; + + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; + + virtio_device_ready(vdev); + + /* Populate the event queue with buffers */ + ret = viommu_fill_evtq(viommu); + if (ret) + goto err_free_vqs; + + ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s", + virtio_bus_name(vdev)); + if (ret) + goto err_free_vqs; + + iommu_device_set_ops(&viommu->iommu, &viommu_ops); + iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode); + + iommu_device_register(&viommu->iommu); + +#ifdef CONFIG_PCI + if (pci_bus_type.iommu_ops != &viommu_ops) { + pci_request_acs(); + ret = bus_set_iommu(&pci_bus_type, &viommu_ops); + if (ret) + goto err_unregister; + } +#endif +#ifdef CONFIG_ARM_AMBA + if (amba_bustype.iommu_ops != &viommu_ops) { + ret = bus_set_iommu(&amba_bustype, &viommu_ops); + if (ret) + goto err_unregister; + } +#endif + if (platform_bus_type.iommu_ops != &viommu_ops) { + ret = bus_set_iommu(&platform_bus_type, &viommu_ops); + if (ret) + goto err_unregister; + } + + vdev->priv = viommu; + + dev_info(dev, "input address: %u bits\n", + order_base_2(viommu->geometry.aperture_end)); + dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap); + + return 0; + +err_unregister: + iommu_device_sysfs_remove(&viommu->iommu); + iommu_device_unregister(&viommu->iommu); +err_free_vqs: + vdev->config->del_vqs(vdev); + + return ret; +} + +static void viommu_remove(struct virtio_device *vdev) +{ + struct viommu_dev *viommu = vdev->priv; + + iommu_device_sysfs_remove(&viommu->iommu); + iommu_device_unregister(&viommu->iommu); + + /* Stop all virtqueues */ + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + + dev_info(&vdev->dev, "device removed\n"); +} + +static void viommu_config_changed(struct virtio_device *vdev) +{ + dev_warn(&vdev->dev, "config changed\n"); +} + +static unsigned int features[] = { + VIRTIO_IOMMU_F_MAP_UNMAP, + VIRTIO_IOMMU_F_DOMAIN_BITS, + VIRTIO_IOMMU_F_INPUT_RANGE, + VIRTIO_IOMMU_F_PROBE, +}; + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_iommu_drv = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .probe = viommu_probe, + .remove = viommu_remove, + .config_changed = viommu_config_changed, +}; + +module_virtio_driver(virtio_iommu_drv); + +MODULE_DESCRIPTION("Virtio IOMMU driver"); +MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c index 2a3f7ef1c8c4..b232ed279fc3 100644 --- a/drivers/memory/jz4780-nemc.c +++ b/drivers/memory/jz4780-nemc.c @@ -61,7 +61,7 @@ struct jz4780_nemc { * * Return: The number of unique NEMC banks referred to by the specified NEMC * child device. Unique here means that a device that references the same bank - * multiple times in the its "reg" property will only count once. + * multiple times in its "reg" property will only count once. */ unsigned int jz4780_nemc_num_banks(struct device *dev) { diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 6765f10837ac..6e208a060a58 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -793,7 +793,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, - { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, + { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), .driver_data = (kernel_ulong_t)&am654_data }, diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index c5cae8e74dc4..060712c666bf 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -33,6 +33,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/list.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 5e4f3a8c5784..e4332d5a5757 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -53,7 +53,7 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) pad = round_up(skb->len, 4) + 4 - skb->len; /* First packet of a A-MSDU burst keeps track of the whole burst - * length, need to update lenght of it and the last packet. + * length, need to update length of it and the last packet. */ skb_walk_frags(skb, iter) { last = iter; diff --git a/drivers/of/base.c b/drivers/of/base.c index 20e0e7ee4edf..55e7f5bb0549 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -2294,8 +2294,12 @@ int of_map_rid(struct device_node *np, u32 rid, return 0; } - pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n", - np, map_name, rid, target && *target ? *target : NULL); - return -EFAULT; + pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name, + rid, target && *target ? *target : NULL); + + /* Bypasses translation */ + if (id_out) + *id_out = rid; + return 0; } EXPORT_SYMBOL_GPL(of_map_rid); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 73d5adec0a28..bc7b27a28795 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -22,12 +22,15 @@ void pci_set_of_node(struct pci_dev *dev) return; dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); + if (dev->dev.of_node) + dev->dev.fwnode = &dev->dev.of_node->fwnode; } void pci_release_of_node(struct pci_dev *dev) { of_node_put(dev->dev.of_node); dev->dev.of_node = NULL; + dev->dev.fwnode = NULL; } void pci_set_bus_of_node(struct pci_bus *bus) @@ -41,13 +44,18 @@ void pci_set_bus_of_node(struct pci_bus *bus) if (node && of_property_read_bool(node, "external-facing")) bus->self->untrusted = true; } + bus->dev.of_node = node; + + if (bus->dev.of_node) + bus->dev.fwnode = &bus->dev.of_node->fwnode; } void pci_release_bus_of_node(struct pci_bus *bus) { of_node_put(bus->dev.of_node); bus->dev.of_node = NULL; + bus->dev.fwnode = NULL; } struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 3a546ec10d90..22a65ad4e46e 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -152,6 +152,14 @@ static long pps_cdev_ioctl(struct file *file, pps->params.mode |= PPS_CANWAIT; pps->params.api_version = PPS_API_VERS; + /* + * Clear unused fields of pps_kparams to avoid leaking + * uninitialized data of the PPS_SETPARAMS caller via + * PPS_GETPARAMS + */ + pps->params.assert_off_tu.flags = 0; + pps->params.clear_off_tu.flags = 0; + spin_unlock_irq(&pps->lock); break; diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index ce7a90e68042..8155f59ece38 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1686,6 +1686,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, dev_info.comptag, dev_info.destid, dev_info.hopcount); @@ -1817,6 +1818,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) if (copy_from_user(&dev_info, arg, sizeof(dev_info))) return -EFAULT; + dev_info.name[sizeof(dev_info.name) - 1] = '\0'; mport = priv->md->mport; diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 18be41b8aa7e..28ed306982f7 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -16,7 +16,7 @@ if REMOTEPROC config IMX_REMOTEPROC tristate "IMX6/7 remoteproc support" - depends on SOC_IMX6SX || SOC_IMX7D + depends on ARCH_MXC help Say y here to support iMX's remote processors (Cortex M4 on iMX7D) via the remote processor framework. @@ -116,6 +116,7 @@ config QCOM_Q6V5_MSS depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n depends on QCOM_SYSMON || QCOM_SYSMON=n select MFD_SYSCON + select QCOM_MDT_LOADER select QCOM_Q6V5_COMMON select QCOM_RPROC_COMMON select QCOM_SCM @@ -198,6 +199,21 @@ config ST_REMOTEPROC config ST_SLIM_REMOTEPROC tristate +config STM32_RPROC + tristate "STM32 remoteproc support" + depends on ARCH_STM32 + depends on REMOTEPROC + select MAILBOX + help + Say y here to support STM32 MCU processors via the + remote processor framework. + + You want to say y here in order to enable AMP + use-cases to run on your platform (dedicated firmware could be + offloaded to remote MCU processors using this framework). + + This can be either built-in or a loadable module. + endif # REMOTEPROC endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index ce5d061e92be..00f09e658cb3 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -26,3 +26,4 @@ qcom_wcnss_pil-y += qcom_wcnss.o qcom_wcnss_pil-y += qcom_wcnss_iris.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o +obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 36f2f14dad0c..3e72b6f38d4b 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -165,7 +165,7 @@ static int imx_rproc_start(struct rproc *rproc) ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_start); if (ret) - dev_err(dev, "Filed to enable M4!\n"); + dev_err(dev, "Failed to enable M4!\n"); return ret; } @@ -180,7 +180,7 @@ static int imx_rproc_stop(struct rproc *rproc) ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask, dcfg->src_stop); if (ret) - dev_err(dev, "Filed to stop M4!\n"); + dev_err(dev, "Failed to stop M4!\n"); return ret; } @@ -203,7 +203,7 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da, } } - dev_warn(priv->dev, "Translation filed: da = 0x%llx len = 0x%x\n", + dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%x\n", da, len); return -ENOENT; } @@ -349,7 +349,7 @@ static int imx_rproc_probe(struct platform_device *pdev) ret = imx_rproc_addr_init(priv, pdev); if (ret) { - dev_err(dev, "filed on imx_rproc_addr_init\n"); + dev_err(dev, "failed on imx_rproc_addr_init\n"); goto err_put_rproc; } diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 1f3ef9ee493c..e953886b2eb7 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -46,11 +46,9 @@ #define LPASS_PWR_ON_REG 0x10 #define LPASS_HALTREQ_REG 0x0 -/* list of clocks required by ADSP PIL */ -static const char * const adsp_clk_id[] = { - "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr", - "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", -}; +#define QDSP6SS_XO_CBCR 0x38 +#define QDSP6SS_CORE_CBCR 0x20 +#define QDSP6SS_SLEEP_CBCR 0x3c struct adsp_pil_data { int crash_reason_smem; @@ -59,6 +57,9 @@ struct adsp_pil_data { const char *ssr_name; const char *sysmon_name; int ssctl_id; + + const char **clk_ids; + int num_clks; }; struct qcom_adsp { @@ -75,7 +76,7 @@ struct qcom_adsp { void __iomem *qdsp6ss_base; struct reset_control *pdc_sync_reset; - struct reset_control *cc_lpass_restart; + struct reset_control *restart; struct regmap *halt_map; unsigned int halt_lpass; @@ -143,7 +144,7 @@ reset: /* Assert the LPASS PDC Reset */ reset_control_assert(adsp->pdc_sync_reset); /* Place the LPASS processor into reset */ - reset_control_assert(adsp->cc_lpass_restart); + reset_control_assert(adsp->restart); /* wait after asserting subsystem restart from AOSS */ usleep_range(200, 300); @@ -153,7 +154,7 @@ reset: /* De-assert the LPASS PDC Reset */ reset_control_deassert(adsp->pdc_sync_reset); /* Remove the LPASS reset */ - reset_control_deassert(adsp->cc_lpass_restart); + reset_control_deassert(adsp->restart); /* wait after de-asserting subsystem restart from AOSS */ usleep_range(200, 300); @@ -192,6 +193,15 @@ static int adsp_start(struct rproc *rproc) goto disable_power_domain; } + /* Enable the XO clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_XO_CBCR); + + /* Enable the QDSP6SS sleep clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_SLEEP_CBCR); + + /* Enable the QDSP6 core clock */ + writel(1, adsp->qdsp6ss_base + QDSP6SS_CORE_CBCR); + /* Program boot address */ writel(adsp->mem_phys >> 4, adsp->qdsp6ss_base + RST_EVB_REG); @@ -280,8 +290,9 @@ static const struct rproc_ops adsp_ops = { .load = adsp_load, }; -static int adsp_init_clock(struct qcom_adsp *adsp) +static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids) { + int num_clks = 0; int i, ret; adsp->xo = devm_clk_get(adsp->dev, "xo"); @@ -292,32 +303,39 @@ static int adsp_init_clock(struct qcom_adsp *adsp) return ret; } - adsp->num_clks = ARRAY_SIZE(adsp_clk_id); + for (i = 0; clk_ids[i]; i++) + num_clks++; + + adsp->num_clks = num_clks; adsp->clks = devm_kcalloc(adsp->dev, adsp->num_clks, sizeof(*adsp->clks), GFP_KERNEL); if (!adsp->clks) return -ENOMEM; for (i = 0; i < adsp->num_clks; i++) - adsp->clks[i].id = adsp_clk_id[i]; + adsp->clks[i].id = clk_ids[i]; return devm_clk_bulk_get(adsp->dev, adsp->num_clks, adsp->clks); } static int adsp_init_reset(struct qcom_adsp *adsp) { - adsp->pdc_sync_reset = devm_reset_control_get_exclusive(adsp->dev, + adsp->pdc_sync_reset = devm_reset_control_get_optional_exclusive(adsp->dev, "pdc_sync"); if (IS_ERR(adsp->pdc_sync_reset)) { dev_err(adsp->dev, "failed to acquire pdc_sync reset\n"); return PTR_ERR(adsp->pdc_sync_reset); } - adsp->cc_lpass_restart = devm_reset_control_get_exclusive(adsp->dev, - "cc_lpass"); - if (IS_ERR(adsp->cc_lpass_restart)) { - dev_err(adsp->dev, "failed to acquire cc_lpass restart\n"); - return PTR_ERR(adsp->cc_lpass_restart); + adsp->restart = devm_reset_control_get_optional_exclusive(adsp->dev, "restart"); + + /* Fall back to the old "cc_lpass" if "restart" is absent */ + if (!adsp->restart) + adsp->restart = devm_reset_control_get_exclusive(adsp->dev, "cc_lpass"); + + if (IS_ERR(adsp->restart)) { + dev_err(adsp->dev, "failed to acquire restart\n"); + return PTR_ERR(adsp->restart); } return 0; @@ -415,7 +433,7 @@ static int adsp_probe(struct platform_device *pdev) if (ret) goto free_rproc; - ret = adsp_init_clock(adsp); + ret = adsp_init_clock(adsp, desc->clk_ids); if (ret) goto free_rproc; @@ -479,9 +497,28 @@ static const struct adsp_pil_data adsp_resource_init = { .ssr_name = "lpass", .sysmon_name = "adsp", .ssctl_id = 0x14, + .clk_ids = (const char*[]) { + "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr", + "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL + }, + .num_clks = 7, +}; + +static const struct adsp_pil_data cdsp_resource_init = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .ssr_name = "cdsp", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, + .clk_ids = (const char*[]) { + "sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master", + "q6_axim", NULL + }, + .num_clks = 7, }; static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init }, { .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init }, { }, }; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 981581bcdd56..8fcf9d28dd73 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -659,23 +659,29 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) { unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; + void *metadata; int mdata_perm; int xferop_ret; + size_t size; void *ptr; int ret; - ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); + metadata = qcom_mdt_read_metadata(fw, &size); + if (IS_ERR(metadata)) + return PTR_ERR(metadata); + + ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); if (!ptr) { + kfree(metadata); dev_err(qproc->dev, "failed to allocate mdt buffer\n"); return -ENOMEM; } - memcpy(ptr, fw->data, fw->size); + memcpy(ptr, metadata, size); /* Hypervisor mapping to access metadata by modem */ mdata_perm = BIT(QCOM_SCM_VMID_HLOS); - ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, - true, phys, fw->size); + ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size); if (ret) { dev_err(qproc->dev, "assigning Q6 access to metadata failed: %d\n", ret); @@ -693,14 +699,14 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); /* Metadata authentication done, remove modem access */ - xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, - false, phys, fw->size); + xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size); if (xferop_ret) dev_warn(qproc->dev, "mdt buffer not reclaimed system may become unstable\n"); free_dma_attrs: - dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); + dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); + kfree(metadata); return ret < 0 ? ret : 0; } @@ -981,7 +987,18 @@ static int q6v5_mpss_load(struct q6v5 *qproc) ptr = qproc->mpss_region + offset; - if (phdr->p_filesz) { + if (phdr->p_filesz && phdr->p_offset < fw->size) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(qproc->dev, + "failed to load segment %d from truncated file %s\n", + i, fw_name); + ret = -EINVAL; + goto release_firmware; + } + + memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); + } else if (phdr->p_filesz) { /* Replace "xxx.xxx" with "xxx.bxx" */ sprintf(fw_name + fw_name_len - 3, "b%02d", i); ret = request_firmware(&seg_fw, fw_name, qproc->dev); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8b5363223eaa..3c5fbbbfb0f1 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -512,6 +512,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, /* Initialise vdev subdevice */ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); rvdev->dev.parent = rproc->dev.parent; + rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; rvdev->dev.release = rproc_rvdev_release; dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); dev_set_drvdata(&rvdev->dev, rvdev); @@ -1058,6 +1059,20 @@ static int rproc_handle_resources(struct rproc *rproc, dev_dbg(dev, "rsc: type %d\n", hdr->type); + if (hdr->type >= RSC_VENDOR_START && + hdr->type <= RSC_VENDOR_END) { + ret = rproc_handle_rsc(rproc, hdr->type, rsc, + offset + sizeof(*hdr), avail); + if (ret == RSC_HANDLED) + continue; + else if (ret < 0) + break; + + dev_warn(dev, "unsupported vendor resource %d\n", + hdr->type); + continue; + } + if (hdr->type >= RSC_LAST) { dev_warn(dev, "unsupported resource %d\n", hdr->type); continue; diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index 215a4400f21e..606aae166eba 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -247,8 +247,7 @@ find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size) } /* make sure the offsets array isn't truncated */ - if (table->num * sizeof(table->offset[0]) + - sizeof(struct resource_table) > size) { + if (struct_size(table, offset, table->num) > size) { dev_err(dev, "resource table incomplete\n"); return NULL; } diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index a87750903785..493ef9262411 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -99,6 +99,17 @@ static inline int rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) } static inline +int rproc_handle_rsc(struct rproc *rproc, u32 rsc_type, void *rsc, int offset, + int avail) +{ + if (rproc->ops->handle_rsc) + return rproc->ops->handle_rsc(rproc, rsc_type, rsc, offset, + avail); + + return RSC_IGNORED; +} + +static inline struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc, const struct firmware *fw) { diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c new file mode 100644 index 000000000000..e2da7198b65f --- /dev/null +++ b/drivers/remoteproc/stm32_rproc.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 - All Rights Reserved + * Authors: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics. + * Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics. + */ + +#include <linux/arm-smccc.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/regmap.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> + +#include "remoteproc_internal.h" + +#define HOLD_BOOT 0 +#define RELEASE_BOOT 1 + +#define MBOX_NB_VQ 2 +#define MBOX_NB_MBX 3 + +#define STM32_SMC_RCC 0x82001000 +#define STM32_SMC_REG_WRITE 0x1 + +#define STM32_MBX_VQ0 "vq0" +#define STM32_MBX_VQ1 "vq1" +#define STM32_MBX_SHUTDOWN "shutdown" + +struct stm32_syscon { + struct regmap *map; + u32 reg; + u32 mask; +}; + +struct stm32_rproc_mem { + char name[20]; + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +struct stm32_rproc_mem_ranges { + u32 dev_addr; + u32 bus_addr; + u32 size; +}; + +struct stm32_mbox { + const unsigned char name[10]; + struct mbox_chan *chan; + struct mbox_client client; + int vq_id; +}; + +struct stm32_rproc { + struct reset_control *rst; + struct stm32_syscon hold_boot; + struct stm32_syscon pdds; + u32 nb_rmems; + struct stm32_rproc_mem *rmems; + struct stm32_mbox mb[MBOX_NB_MBX]; + bool secured_soc; +}; + +static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da) +{ + unsigned int i; + struct stm32_rproc *ddata = rproc->priv; + struct stm32_rproc_mem *p_mem; + + for (i = 0; i < ddata->nb_rmems; i++) { + p_mem = &ddata->rmems[i]; + + if (pa < p_mem->bus_addr || + pa >= p_mem->bus_addr + p_mem->size) + continue; + *da = pa - p_mem->bus_addr + p_mem->dev_addr; + dev_dbg(rproc->dev.parent, "pa %pa to da %llx\n", &pa, *da); + return 0; + } + + return -EINVAL; +} + +static int stm32_rproc_mem_alloc(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + struct device *dev = rproc->dev.parent; + void *va; + + dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len); + va = ioremap_wc(mem->dma, mem->len); + if (IS_ERR_OR_NULL(va)) { + dev_err(dev, "Unable to map memory region: %pa+%x\n", + &mem->dma, mem->len); + return -ENOMEM; + } + + /* Update memory entry va */ + mem->va = va; + + return 0; +} + +static int stm32_rproc_mem_release(struct rproc *rproc, + struct rproc_mem_entry *mem) +{ + dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma); + iounmap(mem->va); + + return 0; +} + +static int stm32_rproc_of_memory_translations(struct rproc *rproc) +{ + struct device *parent, *dev = rproc->dev.parent; + struct stm32_rproc *ddata = rproc->priv; + struct device_node *np; + struct stm32_rproc_mem *p_mems; + struct stm32_rproc_mem_ranges *mem_range; + int cnt, array_size, i, ret = 0; + + parent = dev->parent; + np = parent->of_node; + + cnt = of_property_count_elems_of_size(np, "dma-ranges", + sizeof(*mem_range)); + if (cnt <= 0) { + dev_err(dev, "%s: dma-ranges property not defined\n", __func__); + return -EINVAL; + } + + p_mems = devm_kcalloc(dev, cnt, sizeof(*p_mems), GFP_KERNEL); + if (!p_mems) + return -ENOMEM; + mem_range = kcalloc(cnt, sizeof(*mem_range), GFP_KERNEL); + if (!mem_range) + return -ENOMEM; + + array_size = cnt * sizeof(struct stm32_rproc_mem_ranges) / sizeof(u32); + + ret = of_property_read_u32_array(np, "dma-ranges", + (u32 *)mem_range, array_size); + if (ret) { + dev_err(dev, "error while get dma-ranges property: %x\n", ret); + goto free_mem; + } + + for (i = 0; i < cnt; i++) { + p_mems[i].bus_addr = mem_range[i].bus_addr; + p_mems[i].dev_addr = mem_range[i].dev_addr; + p_mems[i].size = mem_range[i].size; + + dev_dbg(dev, "memory range[%i]: da %#x, pa %pa, size %#zx:\n", + i, p_mems[i].dev_addr, &p_mems[i].bus_addr, + p_mems[i].size); + } + + ddata->rmems = p_mems; + ddata->nb_rmems = cnt; + +free_mem: + kfree(mem_range); + return ret; +} + +static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name) +{ + struct stm32_rproc *ddata = rproc->priv; + int i; + + for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) { + if (!strncmp(ddata->mb[i].name, name, strlen(name))) + return i; + } + dev_err(&rproc->dev, "mailbox %s not found\n", name); + + return -EINVAL; +} + +static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc, + const struct firmware *fw) +{ + if (rproc_elf_load_rsc_table(rproc, fw)) + dev_warn(&rproc->dev, "no resource table found for this firmware\n"); + + return 0; +} + +static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + struct device *dev = rproc->dev.parent; + struct device_node *np = dev->of_node; + struct of_phandle_iterator it; + struct rproc_mem_entry *mem; + struct reserved_mem *rmem; + u64 da; + int index = 0; + + /* Register associated reserved memory regions */ + of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); + while (of_phandle_iterator_next(&it) == 0) { + rmem = of_reserved_mem_lookup(it.node); + if (!rmem) { + dev_err(dev, "unable to acquire memory-region\n"); + return -EINVAL; + } + + if (stm32_rproc_pa_to_da(rproc, rmem->base, &da) < 0) { + dev_err(dev, "memory region not valid %pa\n", + &rmem->base); + return -EINVAL; + } + + /* No need to map vdev buffer */ + if (strcmp(it.node->name, "vdev0buffer")) { + /* Register memory region */ + mem = rproc_mem_entry_init(dev, NULL, + (dma_addr_t)rmem->base, + rmem->size, da, + stm32_rproc_mem_alloc, + stm32_rproc_mem_release, + it.node->name); + + if (mem) + rproc_coredump_add_segment(rproc, da, + rmem->size); + } else { + /* Register reserved memory for vdev buffer alloc */ + mem = rproc_of_resm_mem_entry_init(dev, index, + rmem->size, + rmem->base, + it.node->name); + } + + if (!mem) + return -ENOMEM; + + rproc_add_carveout(rproc, mem); + index++; + } + + return stm32_rproc_elf_load_rsc_table(rproc, fw); +} + +static irqreturn_t stm32_rproc_wdg(int irq, void *data) +{ + struct rproc *rproc = data; + + rproc_report_crash(rproc, RPROC_WATCHDOG); + + return IRQ_HANDLED; +} + +static void stm32_rproc_mb_callback(struct mbox_client *cl, void *data) +{ + struct rproc *rproc = dev_get_drvdata(cl->dev); + struct stm32_mbox *mb = container_of(cl, struct stm32_mbox, client); + + if (rproc_vq_interrupt(rproc, mb->vq_id) == IRQ_NONE) + dev_dbg(&rproc->dev, "no message found in vq%d\n", mb->vq_id); +} + +static void stm32_rproc_free_mbox(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(ddata->mb); i++) { + if (ddata->mb[i].chan) + mbox_free_channel(ddata->mb[i].chan); + ddata->mb[i].chan = NULL; + } +} + +static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = { + { + .name = STM32_MBX_VQ0, + .vq_id = 0, + .client = { + .rx_callback = stm32_rproc_mb_callback, + .tx_block = false, + }, + }, + { + .name = STM32_MBX_VQ1, + .vq_id = 1, + .client = { + .rx_callback = stm32_rproc_mb_callback, + .tx_block = false, + }, + }, + { + .name = STM32_MBX_SHUTDOWN, + .vq_id = -1, + .client = { + .tx_block = true, + .tx_done = NULL, + .tx_tout = 500, /* 500 ms time out */ + }, + } +}; + +static void stm32_rproc_request_mbox(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + struct device *dev = &rproc->dev; + unsigned int i; + const unsigned char *name; + struct mbox_client *cl; + + /* Initialise mailbox structure table */ + memcpy(ddata->mb, stm32_rproc_mbox, sizeof(stm32_rproc_mbox)); + + for (i = 0; i < MBOX_NB_MBX; i++) { + name = ddata->mb[i].name; + + cl = &ddata->mb[i].client; + cl->dev = dev->parent; + + ddata->mb[i].chan = mbox_request_channel_byname(cl, name); + if (IS_ERR(ddata->mb[i].chan)) { + dev_warn(dev, "cannot get %s mbox\n", name); + ddata->mb[i].chan = NULL; + } + } +} + +static int stm32_rproc_set_hold_boot(struct rproc *rproc, bool hold) +{ + struct stm32_rproc *ddata = rproc->priv; + struct stm32_syscon hold_boot = ddata->hold_boot; + struct arm_smccc_res smc_res; + int val, err; + + val = hold ? HOLD_BOOT : RELEASE_BOOT; + + if (IS_ENABLED(CONFIG_HAVE_ARM_SMCCC) && ddata->secured_soc) { + arm_smccc_smc(STM32_SMC_RCC, STM32_SMC_REG_WRITE, + hold_boot.reg, val, 0, 0, 0, 0, &smc_res); + err = smc_res.a0; + } else { + err = regmap_update_bits(hold_boot.map, hold_boot.reg, + hold_boot.mask, val); + } + + if (err) + dev_err(&rproc->dev, "failed to set hold boot\n"); + + return err; +} + +static void stm32_rproc_add_coredump_trace(struct rproc *rproc) +{ + struct rproc_debug_trace *trace; + struct rproc_dump_segment *segment; + bool already_added; + + list_for_each_entry(trace, &rproc->traces, node) { + already_added = false; + + list_for_each_entry(segment, &rproc->dump_segments, node) { + if (segment->da == trace->trace_mem.da) { + already_added = true; + break; + } + } + + if (!already_added) + rproc_coredump_add_segment(rproc, trace->trace_mem.da, + trace->trace_mem.len); + } +} + +static int stm32_rproc_start(struct rproc *rproc) +{ + int err; + + stm32_rproc_add_coredump_trace(rproc); + + err = stm32_rproc_set_hold_boot(rproc, false); + if (err) + return err; + + return stm32_rproc_set_hold_boot(rproc, true); +} + +static int stm32_rproc_stop(struct rproc *rproc) +{ + struct stm32_rproc *ddata = rproc->priv; + int err, dummy_data, idx; + + /* request shutdown of the remote processor */ + if (rproc->state != RPROC_OFFLINE) { + idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN); + if (idx >= 0 && ddata->mb[idx].chan) { + /* a dummy data is sent to allow to block on transmit */ + err = mbox_send_message(ddata->mb[idx].chan, + &dummy_data); + if (err < 0) + dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n"); + } + } + + err = stm32_rproc_set_hold_boot(rproc, true); + if (err) + return err; + + err = reset_control_assert(ddata->rst); + if (err) { + dev_err(&rproc->dev, "failed to assert the reset\n"); + return err; + } + + /* to allow platform Standby power mode, set remote proc Deep Sleep */ + if (ddata->pdds.map) { + err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg, + ddata->pdds.mask, 1); + if (err) { + dev_err(&rproc->dev, "failed to set pdds\n"); + return err; + } + } + + return 0; +} + +static void stm32_rproc_kick(struct rproc *rproc, int vqid) +{ + struct stm32_rproc *ddata = rproc->priv; + unsigned int i; + int err; + + if (WARN_ON(vqid >= MBOX_NB_VQ)) + return; + + for (i = 0; i < MBOX_NB_MBX; i++) { + if (vqid != ddata->mb[i].vq_id) + continue; + if (!ddata->mb[i].chan) + return; + err = mbox_send_message(ddata->mb[i].chan, (void *)(long)vqid); + if (err < 0) + dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n", + __func__, ddata->mb[i].name, err); + return; + } +} + +static struct rproc_ops st_rproc_ops = { + .start = stm32_rproc_start, + .stop = stm32_rproc_stop, + .kick = stm32_rproc_kick, + .load = rproc_elf_load_segments, + .parse_fw = stm32_rproc_parse_fw, + .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, + .sanity_check = rproc_elf_sanity_check, + .get_boot_addr = rproc_elf_get_boot_addr, +}; + +static const struct of_device_id stm32_rproc_match[] = { + { .compatible = "st,stm32mp1-m4" }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_rproc_match); + +static int stm32_rproc_get_syscon(struct device_node *np, const char *prop, + struct stm32_syscon *syscon) +{ + int err = 0; + + syscon->map = syscon_regmap_lookup_by_phandle(np, prop); + if (IS_ERR(syscon->map)) { + err = PTR_ERR(syscon->map); + syscon->map = NULL; + goto out; + } + + err = of_property_read_u32_index(np, prop, 1, &syscon->reg); + if (err) + goto out; + + err = of_property_read_u32_index(np, prop, 2, &syscon->mask); + +out: + return err; +} + +static int stm32_rproc_parse_dt(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct rproc *rproc = platform_get_drvdata(pdev); + struct stm32_rproc *ddata = rproc->priv; + struct stm32_syscon tz; + unsigned int tzen; + int err, irq; + + irq = platform_get_irq(pdev, 0); + if (irq > 0) { + err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0, + dev_name(dev), rproc); + if (err) { + dev_err(dev, "failed to request wdg irq\n"); + return err; + } + + dev_info(dev, "wdg irq registered\n"); + } + + ddata->rst = devm_reset_control_get_by_index(dev, 0); + if (IS_ERR(ddata->rst)) { + dev_err(dev, "failed to get mcu reset\n"); + return PTR_ERR(ddata->rst); + } + + /* + * if platform is secured the hold boot bit must be written by + * smc call and read normally. + * if not secure the hold boot bit could be read/write normally + */ + err = stm32_rproc_get_syscon(np, "st,syscfg-tz", &tz); + if (err) { + dev_err(dev, "failed to get tz syscfg\n"); + return err; + } + + err = regmap_read(tz.map, tz.reg, &tzen); + if (err) { + dev_err(&rproc->dev, "failed to read tzen\n"); + return err; + } + ddata->secured_soc = tzen & tz.mask; + + err = stm32_rproc_get_syscon(np, "st,syscfg-holdboot", + &ddata->hold_boot); + if (err) { + dev_err(dev, "failed to get hold boot\n"); + return err; + } + + err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds); + if (err) + dev_warn(dev, "failed to get pdds\n"); + + rproc->auto_boot = of_property_read_bool(np, "st,auto-boot"); + + return stm32_rproc_of_memory_translations(rproc); +} + +static int stm32_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_rproc *ddata; + struct device_node *np = dev->of_node; + struct rproc *rproc; + int ret; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + ddata = rproc->priv; + + platform_set_drvdata(pdev, rproc); + + ret = stm32_rproc_parse_dt(pdev); + if (ret) + goto free_rproc; + + stm32_rproc_request_mbox(rproc); + + ret = rproc_add(rproc); + if (ret) + goto free_mb; + + return 0; + +free_mb: + stm32_rproc_free_mbox(rproc); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int stm32_rproc_remove(struct platform_device *pdev) +{ + struct rproc *rproc = platform_get_drvdata(pdev); + + if (atomic_read(&rproc->power) > 0) + rproc_shutdown(rproc); + + rproc_del(rproc); + stm32_rproc_free_mbox(rproc); + rproc_free(rproc); + + return 0; +} + +static struct platform_driver stm32_rproc_driver = { + .probe = stm32_rproc_probe, + .remove = stm32_rproc_remove, + .driver = { + .name = "stm32-rproc", + .of_match_table = of_match_ptr(stm32_rproc_match), + }, +}; +module_platform_driver(stm32_rproc_driver); + +MODULE_DESCRIPTION("STM32 Remote Processor Control Driver"); +MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>"); +MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 8122807db380..ea88fd4e2a6e 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -493,7 +493,8 @@ static int rpmsg_dev_remove(struct device *dev) if (rpdev->ops->announce_destroy) err = rpdev->ops->announce_destroy(rpdev); - rpdrv->remove(rpdev); + if (rpdrv->remove) + rpdrv->remove(rpdev); dev_pm_domain_detach(dev, true); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 3254a30ebb1b..e72f65b61176 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -562,7 +562,7 @@ config RTC_DRV_TPS6586X config RTC_DRV_TPS65910 tristate "TI TPS65910 RTC driver" - depends on RTC_CLASS && MFD_TPS65910 + depends on MFD_TPS65910 help If you say yes here you get support for the RTC on the TPS65910 chips. @@ -820,6 +820,7 @@ config RTC_DRV_MAX6902 config RTC_DRV_PCF2123 tristate "NXP PCF2123" + select REGMAP_SPI help If you say yes here you get support for the NXP PCF2123 RTC chip. diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 4124f4dd376b..72b7ddc43116 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -633,7 +633,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) { struct rtc_device *rtc; ktime_t period; - int count; + u64 count; rtc = container_of(timer, struct rtc_device, pie_timer); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 93d338e7732b..1f7e8aefc1eb 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -222,6 +222,45 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) return -EINVAL; } + tmp = regs[DS1307_REG_SECS]; + switch (ds1307->type) { + case ds_1307: + case m41t0: + case m41t00: + case m41t11: + if (tmp & DS1307_BIT_CH) + return -EINVAL; + break; + case ds_1308: + case ds_1338: + if (tmp & DS1307_BIT_CH) + return -EINVAL; + + ret = regmap_read(ds1307->regmap, DS1307_REG_CONTROL, &tmp); + if (ret) + return ret; + if (tmp & DS1338_BIT_OSF) + return -EINVAL; + break; + case ds_1340: + if (tmp & DS1340_BIT_nEOSC) + return -EINVAL; + + ret = regmap_read(ds1307->regmap, DS1340_REG_FLAG, &tmp); + if (ret) + return ret; + if (tmp & DS1340_BIT_OSF) + return -EINVAL; + break; + case mcp794xx: + if (!(tmp & MCP794XX_BIT_ST)) + return -EINVAL; + + break; + default: + break; + } + t->tm_sec = bcd2bin(regs[DS1307_REG_SECS] & 0x7f); t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f); tmp = regs[DS1307_REG_HOUR] & 0x3f; @@ -286,7 +325,17 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) if (t->tm_year > 199 && chip->century_bit) regs[chip->century_reg] |= chip->century_bit; - if (ds1307->type == mcp794xx) { + switch (ds1307->type) { + case ds_1308: + case ds_1338: + regmap_update_bits(ds1307->regmap, DS1307_REG_CONTROL, + DS1338_BIT_OSF, 0); + break; + case ds_1340: + regmap_update_bits(ds1307->regmap, DS1340_REG_FLAG, + DS1340_BIT_OSF, 0); + break; + case mcp794xx: /* * these bits were cleared when preparing the date/time * values and need to be set again before writing the @@ -294,6 +343,9 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) */ regs[DS1307_REG_SECS] |= MCP794XX_BIT_ST; regs[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN; + break; + default: + break; } dev_dbg(dev, "%s: %7ph\n", "write", regs); @@ -1702,7 +1754,6 @@ static int ds1307_probe(struct i2c_client *client, break; } -read_rtc: /* read RTC registers */ err = regmap_bulk_read(ds1307->regmap, chip->offset, regs, sizeof(regs)); @@ -1711,75 +1762,11 @@ read_rtc: goto exit; } - /* - * minimal sanity checking; some chips (like DS1340) don't - * specify the extra bits as must-be-zero, but there are - * still a few values that are clearly out-of-range. - */ - tmp = regs[DS1307_REG_SECS]; - switch (ds1307->type) { - case ds_1307: - case m41t0: - case m41t00: - case m41t11: - /* clock halted? turn it on, so clock can tick. */ - if (tmp & DS1307_BIT_CH) { - regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); - dev_warn(ds1307->dev, "SET TIME!\n"); - goto read_rtc; - } - break; - case ds_1308: - case ds_1338: - /* clock halted? turn it on, so clock can tick. */ - if (tmp & DS1307_BIT_CH) - regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); - - /* oscillator fault? clear flag, and warn */ - if (regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) { - regmap_write(ds1307->regmap, DS1307_REG_CONTROL, - regs[DS1307_REG_CONTROL] & - ~DS1338_BIT_OSF); - dev_warn(ds1307->dev, "SET TIME!\n"); - goto read_rtc; - } - break; - case ds_1340: - /* clock halted? turn it on, so clock can tick. */ - if (tmp & DS1340_BIT_nEOSC) - regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); - - err = regmap_read(ds1307->regmap, DS1340_REG_FLAG, &tmp); - if (err) { - dev_dbg(ds1307->dev, "read error %d\n", err); - goto exit; - } - - /* oscillator fault? clear flag, and warn */ - if (tmp & DS1340_BIT_OSF) { - regmap_write(ds1307->regmap, DS1340_REG_FLAG, 0); - dev_warn(ds1307->dev, "SET TIME!\n"); - } - break; - case mcp794xx: - /* make sure that the backup battery is enabled */ - if (!(regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) { - regmap_write(ds1307->regmap, DS1307_REG_WDAY, - regs[DS1307_REG_WDAY] | - MCP794XX_BIT_VBATEN); - } - - /* clock halted? turn it on, so clock can tick. */ - if (!(tmp & MCP794XX_BIT_ST)) { - regmap_write(ds1307->regmap, DS1307_REG_SECS, - MCP794XX_BIT_ST); - dev_warn(ds1307->dev, "SET TIME!\n"); - goto read_rtc; - } - - break; - default: - break; + if (ds1307->type == mcp794xx && + !(regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) { + regmap_write(ds1307->regmap, DS1307_REG_WDAY, + regs[DS1307_REG_WDAY] | + MCP794XX_BIT_VBATEN); } tmp = regs[DS1307_REG_HOUR]; diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c index 1e9f429ada64..9df0c44512b8 100644 --- a/drivers/rtc/rtc-ds2404.c +++ b/drivers/rtc/rtc-ds2404.c @@ -182,9 +182,10 @@ static void ds2404_enable_osc(struct device *dev) static int ds2404_read_time(struct device *dev, struct rtc_time *dt) { unsigned long time = 0; + __le32 hw_time = 0; - ds2404_read_memory(dev, 0x203, 4, (u8 *)&time); - time = le32_to_cpu(time); + ds2404_read_memory(dev, 0x203, 4, (u8 *)&hw_time); + time = le32_to_cpu(hw_time); rtc_time64_to_tm(time, dt); return 0; diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c index 1caa21b82c7d..677ec2da13d8 100644 --- a/drivers/rtc/rtc-fm3130.c +++ b/drivers/rtc/rtc-fm3130.c @@ -104,8 +104,7 @@ static int fm3130_get_time(struct device *dev, struct rtc_time *t) fm3130_rtc_mode(dev, FM3130_MODE_READ); /* read the RTC date and time registers all at once */ - tmp = i2c_transfer(to_i2c_adapter(fm3130->client->dev.parent), - fm3130->msg, 2); + tmp = i2c_transfer(fm3130->client->adapter, fm3130->msg, 2); if (tmp != 2) { dev_err(dev, "%s error %d\n", "read", tmp); return -EIO; @@ -197,8 +196,7 @@ static int fm3130_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) } /* read the RTC alarm registers all at once */ - tmp = i2c_transfer(to_i2c_adapter(fm3130->client->dev.parent), - &fm3130->msg[2], 2); + tmp = i2c_transfer(fm3130->client->adapter, &fm3130->msg[2], 2); if (tmp != 2) { dev_err(dev, "%s error %d\n", "read", tmp); return -EIO; @@ -348,7 +346,7 @@ static int fm3130_probe(struct i2c_client *client, struct fm3130 *fm3130; int err = -ENODEV; int tmp; - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c index 19642bfd913a..c933045fe04b 100644 --- a/drivers/rtc/rtc-imx-sc.c +++ b/drivers/rtc/rtc-imx-sc.c @@ -3,6 +3,7 @@ * Copyright 2018 NXP. */ +#include <dt-bindings/firmware/imx/rsrc.h> #include <linux/arm-smccc.h> #include <linux/firmware/imx/sci.h> #include <linux/module.h> @@ -11,11 +12,15 @@ #include <linux/rtc.h> #define IMX_SC_TIMER_FUNC_GET_RTC_SEC1970 9 +#define IMX_SC_TIMER_FUNC_SET_RTC_ALARM 8 #define IMX_SC_TIMER_FUNC_SET_RTC_TIME 6 #define IMX_SIP_SRTC 0xC2000002 #define IMX_SIP_SRTC_SET_TIME 0x0 +#define SC_IRQ_GROUP_RTC 2 +#define SC_IRQ_RTC 1 + static struct imx_sc_ipc *rtc_ipc_handle; static struct rtc_device *imx_sc_rtc; @@ -24,6 +29,16 @@ struct imx_sc_msg_timer_get_rtc_time { u32 time; } __packed; +struct imx_sc_msg_timer_rtc_set_alarm { + struct imx_sc_rpc_msg hdr; + u16 year; + u8 mon; + u8 day; + u8 hour; + u8 min; + u8 sec; +} __packed; + static int imx_sc_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct imx_sc_msg_timer_get_rtc_time msg; @@ -60,9 +75,77 @@ static int imx_sc_rtc_set_time(struct device *dev, struct rtc_time *tm) return res.a0; } +static int imx_sc_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + return imx_scu_irq_group_enable(SC_IRQ_GROUP_RTC, SC_IRQ_RTC, enable); +} + +static int imx_sc_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + /* + * SCU firmware does NOT provide read alarm API, but .read_alarm + * callback is required by RTC framework to support alarm function, + * so just return here. + */ + return 0; +} + +static int imx_sc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct imx_sc_msg_timer_rtc_set_alarm msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + struct rtc_time *alrm_tm = &alrm->time; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_TIMER; + hdr->func = IMX_SC_TIMER_FUNC_SET_RTC_ALARM; + hdr->size = 3; + + msg.year = alrm_tm->tm_year + 1900; + msg.mon = alrm_tm->tm_mon + 1; + msg.day = alrm_tm->tm_mday; + msg.hour = alrm_tm->tm_hour; + msg.min = alrm_tm->tm_min; + msg.sec = alrm_tm->tm_sec; + + ret = imx_scu_call_rpc(rtc_ipc_handle, &msg, true); + if (ret) { + dev_err(dev, "set rtc alarm failed, ret %d\n", ret); + return ret; + } + + ret = imx_sc_rtc_alarm_irq_enable(dev, alrm->enabled); + if (ret) { + dev_err(dev, "enable rtc alarm failed, ret %d\n", ret); + return ret; + } + + return 0; +} + static const struct rtc_class_ops imx_sc_rtc_ops = { .read_time = imx_sc_rtc_read_time, .set_time = imx_sc_rtc_set_time, + .read_alarm = imx_sc_rtc_read_alarm, + .set_alarm = imx_sc_rtc_set_alarm, + .alarm_irq_enable = imx_sc_rtc_alarm_irq_enable, +}; + +static int imx_sc_rtc_alarm_notify(struct notifier_block *nb, + unsigned long event, void *group) +{ + /* ignore non-rtc irq */ + if (!((event & SC_IRQ_RTC) && (*(u8 *)group == SC_IRQ_GROUP_RTC))) + return 0; + + rtc_update_irq(imx_sc_rtc, 1, RTC_IRQF | RTC_AF); + + return 0; +} + +static struct notifier_block imx_sc_rtc_alarm_sc_notifier = { + .notifier_call = imx_sc_rtc_alarm_notify, }; static int imx_sc_rtc_probe(struct platform_device *pdev) @@ -73,6 +156,8 @@ static int imx_sc_rtc_probe(struct platform_device *pdev) if (ret) return ret; + device_init_wakeup(&pdev->dev, true); + imx_sc_rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(imx_sc_rtc)) return PTR_ERR(imx_sc_rtc); @@ -87,6 +172,8 @@ static int imx_sc_rtc_probe(struct platform_device *pdev) return ret; } + imx_scu_irq_register_notifier(&imx_sc_rtc_alarm_sc_notifier); + return 0; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 9fdc284c943b..5f46f85f814b 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -872,7 +872,7 @@ static struct notifier_block wdt_notifier = { static int m41t80_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; int rc = 0; struct rtc_time tm; struct m41t80_data *m41t80_data = NULL; diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index f431263e2d39..fb542a930bf0 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -40,7 +40,7 @@ #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/module.h> -#include <linux/sysfs.h> +#include <linux/regmap.h> /* REGISTERS */ #define PCF2123_REG_CTRL1 (0x00) /* Control Register 1 */ @@ -95,6 +95,7 @@ #define OFFSET_SIGN_BIT 6 /* 2's complement sign bit */ #define OFFSET_COARSE BIT(7) /* Coarse mode offset */ #define OFFSET_STEP (2170) /* Offset step in parts per billion */ +#define OFFSET_MASK GENMASK(6, 0) /* Offset value */ /* READ/WRITE ADDRESS BITS */ #define PCF2123_WRITE BIT(4) @@ -103,120 +104,35 @@ static struct spi_driver pcf2123_driver; -struct pcf2123_sysfs_reg { - struct device_attribute attr; - char name[2]; -}; - struct pcf2123_plat_data { struct rtc_device *rtc; - struct pcf2123_sysfs_reg regs[16]; + struct regmap *map; }; -/* - * Causes a 30 nanosecond delay to ensure that the PCF2123 chip select - * is released properly after an SPI write. This function should be - * called after EVERY read/write call over SPI. - */ -static inline void pcf2123_delay_trec(void) -{ - ndelay(30); -} - -static int pcf2123_read(struct device *dev, u8 reg, u8 *rxbuf, size_t size) -{ - struct spi_device *spi = to_spi_device(dev); - int ret; - - reg |= PCF2123_READ; - ret = spi_write_then_read(spi, ®, 1, rxbuf, size); - pcf2123_delay_trec(); - - return ret; -} - -static int pcf2123_write(struct device *dev, u8 *txbuf, size_t size) -{ - struct spi_device *spi = to_spi_device(dev); - int ret; - - txbuf[0] |= PCF2123_WRITE; - ret = spi_write(spi, txbuf, size); - pcf2123_delay_trec(); - - return ret; -} - -static int pcf2123_write_reg(struct device *dev, u8 reg, u8 val) -{ - u8 txbuf[2]; - - txbuf[0] = reg; - txbuf[1] = val; - return pcf2123_write(dev, txbuf, sizeof(txbuf)); -} - -static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr, - char *buffer) -{ - struct pcf2123_sysfs_reg *r; - u8 rxbuf[1]; - unsigned long reg; - int ret; - - r = container_of(attr, struct pcf2123_sysfs_reg, attr); - - ret = kstrtoul(r->name, 16, ®); - if (ret) - return ret; - - ret = pcf2123_read(dev, reg, rxbuf, 1); - if (ret < 0) - return -EIO; - - return sprintf(buffer, "0x%x\n", rxbuf[0]); -} +static const struct regmap_config pcf2123_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .read_flag_mask = PCF2123_READ, + .write_flag_mask = PCF2123_WRITE, + .max_register = PCF2123_REG_CTDWN_TMR, +}; -static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr, - const char *buffer, size_t count) +static int pcf2123_read_offset(struct device *dev, long *offset) { - struct pcf2123_sysfs_reg *r; - unsigned long reg; - unsigned long val; - - int ret; + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); + int ret, val; + unsigned int reg; - r = container_of(attr, struct pcf2123_sysfs_reg, attr); - - ret = kstrtoul(r->name, 16, ®); + ret = regmap_read(pdata->map, PCF2123_REG_OFFSET, ®); if (ret) return ret; - ret = kstrtoul(buffer, 10, &val); - if (ret) - return ret; - - ret = pcf2123_write_reg(dev, reg, val); - if (ret < 0) - return -EIO; - return count; -} - -static int pcf2123_read_offset(struct device *dev, long *offset) -{ - int ret; - s8 reg; - - ret = pcf2123_read(dev, PCF2123_REG_OFFSET, ®, 1); - if (ret < 0) - return ret; + val = sign_extend32((reg & OFFSET_MASK), OFFSET_SIGN_BIT); if (reg & OFFSET_COARSE) - reg <<= 1; /* multiply by 2 and sign extend */ - else - reg = sign_extend32(reg, OFFSET_SIGN_BIT); + val *= 2; - *offset = ((long)reg) * OFFSET_STEP; + *offset = ((long)val) * OFFSET_STEP; return 0; } @@ -233,6 +149,7 @@ static int pcf2123_read_offset(struct device *dev, long *offset) */ static int pcf2123_set_offset(struct device *dev, long offset) { + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); s8 reg; if (offset > OFFSET_STEP * 127) @@ -240,7 +157,7 @@ static int pcf2123_set_offset(struct device *dev, long offset) else if (offset < OFFSET_STEP * -128) reg = -128; else - reg = (s8)((offset + (OFFSET_STEP >> 1)) / OFFSET_STEP); + reg = DIV_ROUND_CLOSEST(offset, OFFSET_STEP); /* choose fine offset only for odd values in the normal range */ if (reg & 1 && reg <= 63 && reg >= -64) { @@ -252,16 +169,18 @@ static int pcf2123_set_offset(struct device *dev, long offset) reg |= OFFSET_COARSE; } - return pcf2123_write_reg(dev, PCF2123_REG_OFFSET, reg); + return regmap_write(pdata->map, PCF2123_REG_OFFSET, (unsigned int)reg); } static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm) { + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); u8 rxbuf[7]; int ret; - ret = pcf2123_read(dev, PCF2123_REG_SC, rxbuf, sizeof(rxbuf)); - if (ret < 0) + ret = regmap_bulk_read(pdata->map, PCF2123_REG_SC, rxbuf, + sizeof(rxbuf)); + if (ret) return ret; if (rxbuf[0] & OSC_HAS_STOPPED) { @@ -279,82 +198,168 @@ static int pcf2123_rtc_read_time(struct device *dev, struct rtc_time *tm) if (tm->tm_year < 70) tm->tm_year += 100; /* assume we are in 1970...2069 */ - dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " - "mday=%d, mon=%d, year=%d, wday=%d\n", - __func__, - tm->tm_sec, tm->tm_min, tm->tm_hour, - tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + dev_dbg(dev, "%s: tm is %ptR\n", __func__, tm); return 0; } static int pcf2123_rtc_set_time(struct device *dev, struct rtc_time *tm) { - u8 txbuf[8]; + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); + u8 txbuf[7]; int ret; - dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, " - "mday=%d, mon=%d, year=%d, wday=%d\n", - __func__, - tm->tm_sec, tm->tm_min, tm->tm_hour, - tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); + dev_dbg(dev, "%s: tm is %ptR\n", __func__, tm); /* Stop the counter first */ - ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP); - if (ret < 0) + ret = regmap_write(pdata->map, PCF2123_REG_CTRL1, CTRL1_STOP); + if (ret) return ret; /* Set the new time */ - txbuf[0] = PCF2123_REG_SC; - txbuf[1] = bin2bcd(tm->tm_sec & 0x7F); - txbuf[2] = bin2bcd(tm->tm_min & 0x7F); - txbuf[3] = bin2bcd(tm->tm_hour & 0x3F); - txbuf[4] = bin2bcd(tm->tm_mday & 0x3F); - txbuf[5] = tm->tm_wday & 0x07; - txbuf[6] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */ - txbuf[7] = bin2bcd(tm->tm_year < 100 ? tm->tm_year : tm->tm_year - 100); - - ret = pcf2123_write(dev, txbuf, sizeof(txbuf)); - if (ret < 0) + txbuf[0] = bin2bcd(tm->tm_sec & 0x7F); + txbuf[1] = bin2bcd(tm->tm_min & 0x7F); + txbuf[2] = bin2bcd(tm->tm_hour & 0x3F); + txbuf[3] = bin2bcd(tm->tm_mday & 0x3F); + txbuf[4] = tm->tm_wday & 0x07; + txbuf[5] = bin2bcd((tm->tm_mon + 1) & 0x1F); /* rtc mn 1-12 */ + txbuf[6] = bin2bcd(tm->tm_year < 100 ? tm->tm_year : tm->tm_year - 100); + + ret = regmap_bulk_write(pdata->map, PCF2123_REG_SC, txbuf, + sizeof(txbuf)); + if (ret) return ret; /* Start the counter */ - ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR); - if (ret < 0) + ret = regmap_write(pdata->map, PCF2123_REG_CTRL1, CTRL1_CLEAR); + if (ret) + return ret; + + return 0; +} + +static int pcf2123_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); + u8 rxbuf[4]; + int ret; + unsigned int val = 0; + + ret = regmap_bulk_read(pdata->map, PCF2123_REG_ALRM_MN, rxbuf, + sizeof(rxbuf)); + if (ret) + return ret; + + alm->time.tm_min = bcd2bin(rxbuf[0] & 0x7F); + alm->time.tm_hour = bcd2bin(rxbuf[1] & 0x3F); + alm->time.tm_mday = bcd2bin(rxbuf[2] & 0x3F); + alm->time.tm_wday = bcd2bin(rxbuf[3] & 0x07); + + dev_dbg(dev, "%s: alm is %ptR\n", __func__, &alm->time); + + ret = regmap_read(pdata->map, PCF2123_REG_CTRL2, &val); + if (ret) + return ret; + + alm->enabled = !!(val & CTRL2_AIE); + + return 0; +} + +static int pcf2123_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); + u8 txbuf[4]; + int ret; + + dev_dbg(dev, "%s: alm is %ptR\n", __func__, &alm->time); + + /* Ensure alarm flag is clear */ + ret = regmap_update_bits(pdata->map, PCF2123_REG_CTRL2, CTRL2_AF, 0); + if (ret) return ret; + /* Disable alarm interrupt */ + ret = regmap_update_bits(pdata->map, PCF2123_REG_CTRL2, CTRL2_AIE, 0); + if (ret) + return ret; + + /* Set new alarm */ + txbuf[0] = bin2bcd(alm->time.tm_min & 0x7F); + txbuf[1] = bin2bcd(alm->time.tm_hour & 0x3F); + txbuf[2] = bin2bcd(alm->time.tm_mday & 0x3F); + txbuf[3] = bin2bcd(alm->time.tm_wday & 0x07); + + ret = regmap_bulk_write(pdata->map, PCF2123_REG_ALRM_MN, txbuf, + sizeof(txbuf)); + if (ret) + return ret; + + /* Enable alarm interrupt */ + if (alm->enabled) { + ret = regmap_update_bits(pdata->map, PCF2123_REG_CTRL2, + CTRL2_AIE, CTRL2_AIE); + if (ret) + return ret; + } + return 0; } +static irqreturn_t pcf2123_rtc_irq(int irq, void *dev) +{ + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); + struct mutex *lock = &pdata->rtc->ops_lock; + unsigned int val = 0; + int ret = IRQ_NONE; + + mutex_lock(lock); + regmap_read(pdata->map, PCF2123_REG_CTRL2, &val); + + /* Alarm? */ + if (val & CTRL2_AF) { + ret = IRQ_HANDLED; + + /* Clear alarm flag */ + regmap_update_bits(pdata->map, PCF2123_REG_CTRL2, CTRL2_AF, 0); + + rtc_update_irq(pdata->rtc, 1, RTC_IRQF | RTC_AF); + } + + mutex_unlock(lock); + + return ret; +} + static int pcf2123_reset(struct device *dev) { + struct pcf2123_plat_data *pdata = dev_get_platdata(dev); int ret; - u8 rxbuf[2]; + unsigned int val = 0; - ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_SW_RESET); - if (ret < 0) + ret = regmap_write(pdata->map, PCF2123_REG_CTRL1, CTRL1_SW_RESET); + if (ret) return ret; /* Stop the counter */ dev_dbg(dev, "stopping RTC\n"); - ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_STOP); - if (ret < 0) + ret = regmap_write(pdata->map, PCF2123_REG_CTRL1, CTRL1_STOP); + if (ret) return ret; /* See if the counter was actually stopped */ dev_dbg(dev, "checking for presence of RTC\n"); - ret = pcf2123_read(dev, PCF2123_REG_CTRL1, rxbuf, sizeof(rxbuf)); - if (ret < 0) + ret = regmap_read(pdata->map, PCF2123_REG_CTRL1, &val); + if (ret) return ret; - dev_dbg(dev, "received data from RTC (0x%02X 0x%02X)\n", - rxbuf[0], rxbuf[1]); - if (!(rxbuf[0] & CTRL1_STOP)) + dev_dbg(dev, "received data from RTC (0x%08X)\n", val); + if (!(val & CTRL1_STOP)) return -ENODEV; /* Start the counter */ - ret = pcf2123_write_reg(dev, PCF2123_REG_CTRL1, CTRL1_CLEAR); - if (ret < 0) + ret = regmap_write(pdata->map, PCF2123_REG_CTRL1, CTRL1_CLEAR); + if (ret) return ret; return 0; @@ -365,7 +370,8 @@ static const struct rtc_class_ops pcf2123_rtc_ops = { .set_time = pcf2123_rtc_set_time, .read_offset = pcf2123_read_offset, .set_offset = pcf2123_set_offset, - + .read_alarm = pcf2123_rtc_read_alarm, + .set_alarm = pcf2123_rtc_set_alarm, }; static int pcf2123_probe(struct spi_device *spi) @@ -373,7 +379,7 @@ static int pcf2123_probe(struct spi_device *spi) struct rtc_device *rtc; struct rtc_time tm; struct pcf2123_plat_data *pdata; - int ret, i; + int ret = 0; pdata = devm_kzalloc(&spi->dev, sizeof(struct pcf2123_plat_data), GFP_KERNEL); @@ -381,6 +387,13 @@ static int pcf2123_probe(struct spi_device *spi) return -ENOMEM; spi->dev.platform_data = pdata; + pdata->map = devm_regmap_init_spi(spi, &pcf2123_regmap_config); + + if (IS_ERR(pdata->map)) { + dev_err(&spi->dev, "regmap init failed.\n"); + goto kfree_exit; + } + ret = pcf2123_rtc_read_time(&spi->dev, &tm); if (ret < 0) { ret = pcf2123_reset(&spi->dev); @@ -405,47 +418,31 @@ static int pcf2123_probe(struct spi_device *spi) pdata->rtc = rtc; - for (i = 0; i < 16; i++) { - sysfs_attr_init(&pdata->regs[i].attr.attr); - sprintf(pdata->regs[i].name, "%1x", i); - pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; - pdata->regs[i].attr.attr.name = pdata->regs[i].name; - pdata->regs[i].attr.show = pcf2123_show; - pdata->regs[i].attr.store = pcf2123_store; - ret = device_create_file(&spi->dev, &pdata->regs[i].attr); - if (ret) { - dev_err(&spi->dev, "Unable to create sysfs %s\n", - pdata->regs[i].name); - goto sysfs_exit; - } + /* Register alarm irq */ + if (spi->irq > 0) { + ret = devm_request_threaded_irq(&spi->dev, spi->irq, NULL, + pcf2123_rtc_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + pcf2123_driver.driver.name, &spi->dev); + if (!ret) + device_init_wakeup(&spi->dev, true); + else + dev_err(&spi->dev, "could not request irq.\n"); } - return 0; + /* The PCF2123's alarm only has minute accuracy. Must add timer + * support to this driver to generate interrupts more than once + * per minute. + */ + pdata->rtc->uie_unsupported = 1; -sysfs_exit: - for (i--; i >= 0; i--) - device_remove_file(&spi->dev, &pdata->regs[i].attr); + return 0; kfree_exit: spi->dev.platform_data = NULL; return ret; } -static int pcf2123_remove(struct spi_device *spi) -{ - struct pcf2123_plat_data *pdata = dev_get_platdata(&spi->dev); - int i; - - if (pdata) { - for (i = 0; i < 16; i++) - if (pdata->regs[i].name[0]) - device_remove_file(&spi->dev, - &pdata->regs[i].attr); - } - - return 0; -} - #ifdef CONFIG_OF static const struct of_device_id pcf2123_dt_ids[] = { { .compatible = "nxp,rtc-pcf2123", }, @@ -461,7 +458,6 @@ static struct spi_driver pcf2123_driver = { .of_match_table = of_match_ptr(pcf2123_dt_ids), }, .probe = pcf2123_probe, - .remove = pcf2123_remove, }; module_spi_driver(pcf2123_driver); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index c569dfe8c2ae..ac159d24286d 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -560,7 +560,6 @@ static int pcf8563_probe(struct i2c_client *client, struct pcf8563 *pcf8563; int err; unsigned char buf; - unsigned char alm_pending; dev_dbg(&client->dev, "%s\n", __func__); @@ -584,13 +583,13 @@ static int pcf8563_probe(struct i2c_client *client, return err; } - err = pcf8563_get_alarm_mode(client, NULL, &alm_pending); - if (err) { - dev_err(&client->dev, "%s: read error\n", __func__); + /* Clear flags and disable interrupts */ + buf = 0; + err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf); + if (err < 0) { + dev_err(&client->dev, "%s: write error\n", __func__); return err; } - if (alm_pending) - pcf8563_set_alarm_mode(client, 0); pcf8563->rtc = devm_rtc_device_register(&client->dev, pcf8563_driver.driver.name, @@ -602,7 +601,7 @@ static int pcf8563_probe(struct i2c_client *client, if (client->irq > 0) { err = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf8563_irq, - IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING, + IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW, pcf8563_driver.driver.name, client); if (err) { dev_err(&client->dev, "unable to request IRQ %d\n", diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 0b102c3cf5a4..fc5243400108 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -517,7 +517,7 @@ static int rx8900_trickle_charger_init(struct rv8803_data *rv8803) static int rv8803_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; struct rv8803_data *rv8803; int err, flags; struct nvmem_config nvmem_cfg = { diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index b15ad8e10938..8102469e27c0 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -433,7 +433,7 @@ static struct rtc_class_ops rx8010_rtc_ops = { static int rx8010_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; struct rx8010_data *rx8010; int err = 0; diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index cb082ad19471..b9bda10589e0 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -501,7 +501,7 @@ static void rx8025_sysfs_unregister(struct device *dev) static int rx8025_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct i2c_adapter *adapter = client->adapter; struct rx8025_data *rx8025; int err = 0; diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c index 8c37acb4a007..84806ff763cf 100644 --- a/drivers/rtc/rtc-s35390a.c +++ b/drivers/rtc/rtc-s35390a.c @@ -32,21 +32,22 @@ #define S35390A_ALRM_BYTE_MINS 2 /* flags for STATUS1 */ -#define S35390A_FLAG_POC 0x01 -#define S35390A_FLAG_BLD 0x02 -#define S35390A_FLAG_INT2 0x04 -#define S35390A_FLAG_24H 0x40 -#define S35390A_FLAG_RESET 0x80 +#define S35390A_FLAG_POC BIT(0) +#define S35390A_FLAG_BLD BIT(1) +#define S35390A_FLAG_INT2 BIT(2) +#define S35390A_FLAG_24H BIT(6) +#define S35390A_FLAG_RESET BIT(7) /* flag for STATUS2 */ -#define S35390A_FLAG_TEST 0x01 - -#define S35390A_INT2_MODE_MASK 0xF0 +#define S35390A_FLAG_TEST BIT(0) +/* INT2 pin output mode */ +#define S35390A_INT2_MODE_MASK 0x0E #define S35390A_INT2_MODE_NOINTR 0x00 -#define S35390A_INT2_MODE_FREQ 0x10 -#define S35390A_INT2_MODE_ALARM 0x40 -#define S35390A_INT2_MODE_PMIN_EDG 0x20 +#define S35390A_INT2_MODE_ALARM BIT(1) /* INT2AE */ +#define S35390A_INT2_MODE_PMIN_EDG BIT(2) /* INT2ME */ +#define S35390A_INT2_MODE_FREQ BIT(3) /* INT2FE */ +#define S35390A_INT2_MODE_PMIN (BIT(3) | BIT(2)) /* INT2FE | INT2ME */ static const struct i2c_device_id s35390a_id[] = { { "s35390a", 0 }, @@ -284,6 +285,9 @@ static int s35390a_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) alm->time.tm_min, alm->time.tm_hour, alm->time.tm_mday, alm->time.tm_mon, alm->time.tm_year, alm->time.tm_wday); + if (alm->time.tm_sec != 0) + dev_warn(&client->dev, "Alarms are only supported on a per minute basis!\n"); + /* disable interrupt (which deasserts the irq line) */ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); if (err < 0) @@ -299,9 +303,6 @@ static int s35390a_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) else sts = S35390A_INT2_MODE_NOINTR; - /* This chip expects the bits of each byte to be in reverse order */ - sts = bitrev8(sts); - /* set interupt mode*/ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts)); if (err < 0) @@ -339,7 +340,7 @@ static int s35390a_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) if (err < 0) return err; - if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) { + if ((sts & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) { /* * When the alarm isn't enabled, the register to configure * the alarm time isn't accessible. @@ -431,14 +432,14 @@ static int s35390a_probe(struct i2c_client *client, unsigned int i; struct s35390a *s35390a; char buf, status1; + struct device *dev = &client->dev; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { err = -ENODEV; goto exit; } - s35390a = devm_kzalloc(&client->dev, sizeof(struct s35390a), - GFP_KERNEL); + s35390a = devm_kzalloc(dev, sizeof(struct s35390a), GFP_KERNEL); if (!s35390a) { err = -ENOMEM; goto exit; @@ -452,8 +453,8 @@ static int s35390a_probe(struct i2c_client *client, s35390a->client[i] = i2c_new_dummy(client->adapter, client->addr + i); if (!s35390a->client[i]) { - dev_err(&client->dev, "Address %02x unavailable\n", - client->addr + i); + dev_err(dev, "Address %02x unavailable\n", + client->addr + i); err = -EBUSY; goto exit_dummy; } @@ -462,7 +463,7 @@ static int s35390a_probe(struct i2c_client *client, err_read = s35390a_read_status(s35390a, &status1); if (err_read < 0) { err = err_read; - dev_err(&client->dev, "error resetting chip\n"); + dev_err(dev, "error resetting chip\n"); goto exit_dummy; } @@ -476,28 +477,30 @@ static int s35390a_probe(struct i2c_client *client, buf = 0; err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1); if (err < 0) { - dev_err(&client->dev, "error disabling alarm"); + dev_err(dev, "error disabling alarm"); goto exit_dummy; } } else { err = s35390a_disable_test_mode(s35390a); if (err < 0) { - dev_err(&client->dev, "error disabling test mode\n"); + dev_err(dev, "error disabling test mode\n"); goto exit_dummy; } } - device_set_wakeup_capable(&client->dev, 1); + device_set_wakeup_capable(dev, 1); - s35390a->rtc = devm_rtc_device_register(&client->dev, - s35390a_driver.driver.name, - &s35390a_rtc_ops, THIS_MODULE); + s35390a->rtc = devm_rtc_device_register(dev, s35390a_driver.driver.name, + &s35390a_rtc_ops, THIS_MODULE); if (IS_ERR(s35390a->rtc)) { err = PTR_ERR(s35390a->rtc); goto exit_dummy; } + /* supports per-minute alarms only, therefore set uie_unsupported */ + s35390a->rtc->uie_unsupported = 1; + if (status1 & S35390A_FLAG_INT2) rtc_update_irq(s35390a->rtc, 1, RTC_AF); diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index 5fe021821831..49474a31c66d 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -162,10 +162,6 @@ static int st_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) now_secs = rtc_tm_to_time64(&now); alarm_secs = rtc_tm_to_time64(&t->time); - /* Invalid alarm time */ - if (now_secs > alarm_secs) - return -EINVAL; - memcpy(&rtc->alarm, t, sizeof(struct rtc_wkalrm)); /* Now many secs to fire */ diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c index 8e6c9b3bcc29..773a1990b93f 100644 --- a/drivers/rtc/rtc-stm32.c +++ b/drivers/rtc/rtc-stm32.c @@ -519,11 +519,7 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) /* Write to Alarm register */ writel_relaxed(alrmar, rtc->base + regs->alrmar); - if (alrm->enabled) - stm32_rtc_alarm_irq_enable(dev, 1); - else - stm32_rtc_alarm_irq_enable(dev, 0); - + stm32_rtc_alarm_irq_enable(dev, alrm->enabled); end: stm32_rtc_wpr_lock(rtc); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 8128ec200ba2..c0e75c373605 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -672,6 +672,7 @@ static const struct of_device_id sun6i_rtc_dt_ids[] = { { .compatible = "allwinner,sun6i-a31-rtc" }, { .compatible = "allwinner,sun8i-a23-rtc" }, { .compatible = "allwinner,sun8i-h3-rtc" }, + { .compatible = "allwinner,sun8i-r40-rtc" }, { .compatible = "allwinner,sun8i-v3-rtc" }, { .compatible = "allwinner,sun50i-h5-rtc" }, { /* sentinel */ }, diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c index f0ce76865434..8fa1b3febf69 100644 --- a/drivers/rtc/rtc-tegra.c +++ b/drivers/rtc/rtc-tegra.c @@ -2,7 +2,7 @@ /* * An RTC driver for the NVIDIA Tegra 200 series internal RTC. * - * Copyright (c) 2010, NVIDIA Corporation. + * Copyright (c) 2010-2019, NVIDIA Corporation. */ #include <linux/clk.h> @@ -18,10 +18,10 @@ #include <linux/rtc.h> #include <linux/slab.h> -/* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */ +/* Set to 1 = busy every eight 32 kHz clocks during copy of sec+msec to AHB. */ #define TEGRA_RTC_REG_BUSY 0x004 #define TEGRA_RTC_REG_SECONDS 0x008 -/* when msec is read, the seconds are buffered into shadow seconds. */ +/* When msec is read, the seconds are buffered into shadow seconds. */ #define TEGRA_RTC_REG_SHADOW_SECONDS 0x00c #define TEGRA_RTC_REG_MILLI_SECONDS 0x010 #define TEGRA_RTC_REG_SECONDS_ALARM0 0x014 @@ -46,44 +46,48 @@ #define TEGRA_RTC_INTR_STATUS_SEC_ALARM0 (1<<0) struct tegra_rtc_info { - struct platform_device *pdev; - struct rtc_device *rtc_dev; - void __iomem *rtc_base; /* NULL if not initialized. */ - struct clk *clk; - int tegra_rtc_irq; /* alarm and periodic irq */ - spinlock_t tegra_rtc_lock; + struct platform_device *pdev; + struct rtc_device *rtc; + void __iomem *base; /* NULL if not initialized */ + struct clk *clk; + int irq; /* alarm and periodic IRQ */ + spinlock_t lock; }; -/* RTC hardware is busy when it is updating its values over AHB once - * every eight 32kHz clocks (~250uS). - * outside of these updates the CPU is free to write. - * CPU is always free to read. +/* + * RTC hardware is busy when it is updating its values over AHB once every + * eight 32 kHz clocks (~250 us). Outside of these updates the CPU is free to + * write. CPU is always free to read. */ static inline u32 tegra_rtc_check_busy(struct tegra_rtc_info *info) { - return readl(info->rtc_base + TEGRA_RTC_REG_BUSY) & 1; + return readl(info->base + TEGRA_RTC_REG_BUSY) & 1; } -/* Wait for hardware to be ready for writing. - * This function tries to maximize the amount of time before the next update. - * It does this by waiting for the RTC to become busy with its periodic update, - * then returning once the RTC first becomes not busy. +/* + * Wait for hardware to be ready for writing. This function tries to maximize + * the amount of time before the next update. It does this by waiting for the + * RTC to become busy with its periodic update, then returning once the RTC + * first becomes not busy. + * * This periodic update (where the seconds and milliseconds are copied to the - * AHB side) occurs every eight 32kHz clocks (~250uS). - * The behavior of this function allows us to make some assumptions without - * introducing a race, because 250uS is plenty of time to read/write a value. + * AHB side) occurs every eight 32 kHz clocks (~250 us). The behavior of this + * function allows us to make some assumptions without introducing a race, + * because 250 us is plenty of time to read/write a value. */ static int tegra_rtc_wait_while_busy(struct device *dev) { struct tegra_rtc_info *info = dev_get_drvdata(dev); + int retries = 500; /* ~490 us is the worst case, ~250 us is best */ - int retries = 500; /* ~490 us is the worst case, ~250 us is best. */ - - /* first wait for the RTC to become busy. this is when it - * posts its updated seconds+msec registers to AHB side. */ + /* + * First wait for the RTC to become busy. This is when it posts its + * updated seconds+msec registers to AHB side. + */ while (tegra_rtc_check_busy(info)) { if (!retries--) goto retry_failed; + udelay(1); } @@ -91,28 +95,30 @@ static int tegra_rtc_wait_while_busy(struct device *dev) return 0; retry_failed: - dev_err(dev, "write failed:retry count exceeded.\n"); + dev_err(dev, "write failed: retry count exceeded\n"); return -ETIMEDOUT; } static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned long sec, msec; - unsigned long sl_irq_flags; + unsigned long flags; + u32 sec, msec; - /* RTC hardware copies seconds to shadow seconds when a read - * of milliseconds occurs. use a lock to keep other threads out. */ - spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags); + /* + * RTC hardware copies seconds to shadow seconds when a read of + * milliseconds occurs. use a lock to keep other threads out. + */ + spin_lock_irqsave(&info->lock, flags); - msec = readl(info->rtc_base + TEGRA_RTC_REG_MILLI_SECONDS); - sec = readl(info->rtc_base + TEGRA_RTC_REG_SHADOW_SECONDS); + msec = readl(info->base + TEGRA_RTC_REG_MILLI_SECONDS); + sec = readl(info->base + TEGRA_RTC_REG_SHADOW_SECONDS); - spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags); + spin_unlock_irqrestore(&info->lock, flags); rtc_time64_to_tm(sec, tm); - dev_vdbg(dev, "time read as %lu. %ptR\n", sec, tm); + dev_vdbg(dev, "time read as %u, %ptR\n", sec, tm); return 0; } @@ -120,21 +126,21 @@ static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm) static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned long sec; + u32 sec; int ret; - /* convert tm to seconds. */ + /* convert tm to seconds */ sec = rtc_tm_to_time64(tm); - dev_vdbg(dev, "time set to %lu. %ptR\n", sec, tm); + dev_vdbg(dev, "time set to %u, %ptR\n", sec, tm); - /* seconds only written if wait succeeded. */ + /* seconds only written if wait succeeded */ ret = tegra_rtc_wait_while_busy(dev); if (!ret) - writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS); + writel(sec, info->base + TEGRA_RTC_REG_SECONDS); dev_vdbg(dev, "time read back as %d\n", - readl(info->rtc_base + TEGRA_RTC_REG_SECONDS)); + readl(info->base + TEGRA_RTC_REG_SECONDS)); return ret; } @@ -142,22 +148,21 @@ static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm) static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned long sec; - unsigned tmp; + u32 sec, value; - sec = readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0); + sec = readl(info->base + TEGRA_RTC_REG_SECONDS_ALARM0); if (sec == 0) { - /* alarm is disabled. */ + /* alarm is disabled */ alarm->enabled = 0; } else { - /* alarm is enabled. */ + /* alarm is enabled */ alarm->enabled = 1; rtc_time64_to_tm(sec, &alarm->time); } - tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS); - alarm->pending = (tmp & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0; + value = readl(info->base + TEGRA_RTC_REG_INTR_STATUS); + alarm->pending = (value & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0; return 0; } @@ -165,22 +170,22 @@ static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned status; - unsigned long sl_irq_flags; + unsigned long flags; + u32 status; tegra_rtc_wait_while_busy(dev); - spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags); + spin_lock_irqsave(&info->lock, flags); - /* read the original value, and OR in the flag. */ - status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_MASK); + /* read the original value, and OR in the flag */ + status = readl(info->base + TEGRA_RTC_REG_INTR_MASK); if (enabled) status |= TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* set it */ else status &= ~TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* clear it */ - writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_MASK); + writel(status, info->base + TEGRA_RTC_REG_INTR_MASK); - spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags); + spin_unlock_irqrestore(&info->lock, flags); return 0; } @@ -188,7 +193,7 @@ static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned long sec; + u32 sec; if (alarm->enabled) sec = rtc_tm_to_time64(&alarm->time); @@ -196,16 +201,16 @@ static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) sec = 0; tegra_rtc_wait_while_busy(dev); - writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0); + writel(sec, info->base + TEGRA_RTC_REG_SECONDS_ALARM0); dev_vdbg(dev, "alarm read back as %d\n", - readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0)); + readl(info->base + TEGRA_RTC_REG_SECONDS_ALARM0)); /* if successfully written and alarm is enabled ... */ if (sec) { tegra_rtc_alarm_irq_enable(dev, 1); - dev_vdbg(dev, "alarm set as %lu. %ptR\n", sec, &alarm->time); + dev_vdbg(dev, "alarm set as %u, %ptR\n", sec, &alarm->time); } else { - /* disable alarm if 0 or write error. */ + /* disable alarm if 0 or write error */ dev_vdbg(dev, "alarm disabled\n"); tegra_rtc_alarm_irq_enable(dev, 0); } @@ -227,39 +232,39 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data) { struct device *dev = data; struct tegra_rtc_info *info = dev_get_drvdata(dev); - unsigned long events = 0; - unsigned status; - unsigned long sl_irq_flags; + unsigned long events = 0, flags; + u32 status; - status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS); + status = readl(info->base + TEGRA_RTC_REG_INTR_STATUS); if (status) { - /* clear the interrupt masks and status on any irq. */ + /* clear the interrupt masks and status on any IRQ */ tegra_rtc_wait_while_busy(dev); - spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags); - writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK); - writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS); - spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags); + + spin_lock_irqsave(&info->lock, flags); + writel(0, info->base + TEGRA_RTC_REG_INTR_MASK); + writel(status, info->base + TEGRA_RTC_REG_INTR_STATUS); + spin_unlock_irqrestore(&info->lock, flags); } - /* check if Alarm */ - if ((status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0)) + /* check if alarm */ + if (status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) events |= RTC_IRQF | RTC_AF; - /* check if Periodic */ - if ((status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM)) + /* check if periodic */ + if (status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM) events |= RTC_IRQF | RTC_PF; - rtc_update_irq(info->rtc_dev, 1, events); + rtc_update_irq(info->rtc, 1, events); return IRQ_HANDLED; } static const struct rtc_class_ops tegra_rtc_ops = { - .read_time = tegra_rtc_read_time, - .set_time = tegra_rtc_set_time, - .read_alarm = tegra_rtc_read_alarm, - .set_alarm = tegra_rtc_set_alarm, - .proc = tegra_rtc_proc, + .read_time = tegra_rtc_read_time, + .set_time = tegra_rtc_set_time, + .read_alarm = tegra_rtc_read_alarm, + .set_alarm = tegra_rtc_set_alarm, + .proc = tegra_rtc_proc, .alarm_irq_enable = tegra_rtc_alarm_irq_enable, }; @@ -269,21 +274,20 @@ static const struct of_device_id tegra_rtc_dt_match[] = { }; MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match); -static int __init tegra_rtc_probe(struct platform_device *pdev) +static int tegra_rtc_probe(struct platform_device *pdev) { struct tegra_rtc_info *info; struct resource *res; int ret; - info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info), - GFP_KERNEL); + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - info->rtc_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(info->rtc_base)) - return PTR_ERR(info->rtc_base); + info->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(info->base)) + return PTR_ERR(info->base); ret = platform_get_irq(pdev, 0); if (ret <= 0) { @@ -291,14 +295,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) return ret; } - info->tegra_rtc_irq = ret; + info->irq = ret; - info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); - if (IS_ERR(info->rtc_dev)) - return PTR_ERR(info->rtc_dev); + info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); - info->rtc_dev->ops = &tegra_rtc_ops; - info->rtc_dev->range_max = U32_MAX; + info->rtc->ops = &tegra_rtc_ops; + info->rtc->range_max = U32_MAX; info->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(info->clk)) @@ -308,33 +312,30 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) if (ret < 0) return ret; - /* set context info. */ + /* set context info */ info->pdev = pdev; - spin_lock_init(&info->tegra_rtc_lock); + spin_lock_init(&info->lock); platform_set_drvdata(pdev, info); - /* clear out the hardware. */ - writel(0, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0); - writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS); - writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK); + /* clear out the hardware */ + writel(0, info->base + TEGRA_RTC_REG_SECONDS_ALARM0); + writel(0xffffffff, info->base + TEGRA_RTC_REG_INTR_STATUS); + writel(0, info->base + TEGRA_RTC_REG_INTR_MASK); device_init_wakeup(&pdev->dev, 1); - ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq, - tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH, - dev_name(&pdev->dev), &pdev->dev); + ret = devm_request_irq(&pdev->dev, info->irq, tegra_rtc_irq_handler, + IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), + &pdev->dev); if (ret) { - dev_err(&pdev->dev, - "Unable to request interrupt for device (err=%d).\n", - ret); + dev_err(&pdev->dev, "failed to request interrupt: %d\n", ret); goto disable_clk; } - ret = rtc_register_device(info->rtc_dev); + ret = rtc_register_device(info->rtc); if (ret) { - dev_err(&pdev->dev, "Unable to register device (err=%d).\n", - ret); + dev_err(&pdev->dev, "failed to register device: %d\n", ret); goto disable_clk; } @@ -363,20 +364,20 @@ static int tegra_rtc_suspend(struct device *dev) tegra_rtc_wait_while_busy(dev); - /* only use ALARM0 as a wake source. */ - writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS); + /* only use ALARM0 as a wake source */ + writel(0xffffffff, info->base + TEGRA_RTC_REG_INTR_STATUS); writel(TEGRA_RTC_INTR_STATUS_SEC_ALARM0, - info->rtc_base + TEGRA_RTC_REG_INTR_MASK); + info->base + TEGRA_RTC_REG_INTR_MASK); dev_vdbg(dev, "alarm sec = %d\n", - readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0)); + readl(info->base + TEGRA_RTC_REG_SECONDS_ALARM0)); - dev_vdbg(dev, "Suspend (device_may_wakeup=%d) irq:%d\n", - device_may_wakeup(dev), info->tegra_rtc_irq); + dev_vdbg(dev, "Suspend (device_may_wakeup=%d) IRQ:%d\n", + device_may_wakeup(dev), info->irq); - /* leave the alarms on as a wake source. */ + /* leave the alarms on as a wake source */ if (device_may_wakeup(dev)) - enable_irq_wake(info->tegra_rtc_irq); + enable_irq_wake(info->irq); return 0; } @@ -386,10 +387,11 @@ static int tegra_rtc_resume(struct device *dev) struct tegra_rtc_info *info = dev_get_drvdata(dev); dev_vdbg(dev, "Resume (device_may_wakeup=%d)\n", - device_may_wakeup(dev)); - /* alarms were left on as a wake source, turn them off. */ + device_may_wakeup(dev)); + + /* alarms were left on as a wake source, turn them off */ if (device_may_wakeup(dev)) - disable_irq_wake(info->tegra_rtc_irq); + disable_irq_wake(info->irq); return 0; } @@ -399,22 +401,21 @@ static SIMPLE_DEV_PM_OPS(tegra_rtc_pm_ops, tegra_rtc_suspend, tegra_rtc_resume); static void tegra_rtc_shutdown(struct platform_device *pdev) { - dev_vdbg(&pdev->dev, "disabling interrupts.\n"); + dev_vdbg(&pdev->dev, "disabling interrupts\n"); tegra_rtc_alarm_irq_enable(&pdev->dev, 0); } -MODULE_ALIAS("platform:tegra_rtc"); static struct platform_driver tegra_rtc_driver = { - .remove = tegra_rtc_remove, - .shutdown = tegra_rtc_shutdown, - .driver = { - .name = "tegra_rtc", + .probe = tegra_rtc_probe, + .remove = tegra_rtc_remove, + .shutdown = tegra_rtc_shutdown, + .driver = { + .name = "tegra_rtc", .of_match_table = tegra_rtc_dt_match, - .pm = &tegra_rtc_pm_ops, + .pm = &tegra_rtc_pm_ops, }, }; - -module_platform_driver_probe(tegra_rtc_driver, tegra_rtc_probe); +module_platform_driver(tegra_rtc_driver); MODULE_AUTHOR("Jon Mayo <jmayo@nvidia.com>"); MODULE_DESCRIPTION("driver for Tegra internal RTC"); diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index b298e9902f45..74b3a0603b73 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c @@ -133,6 +133,7 @@ static int test_probe(struct platform_device *plat_dev) break; default: rtd->rtc->ops = &test_rtc_ops; + device_init_wakeup(&plat_dev->dev, 1); } timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0); diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c index 8556d925e52a..7078f6da1cbc 100644 --- a/drivers/rtc/rtc-tps65910.c +++ b/drivers/rtc/rtc-tps65910.c @@ -143,7 +143,7 @@ static int tps65910_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) struct tps65910 *tps = dev_get_drvdata(dev->parent); int ret; - ret = regmap_bulk_read(tps->regmap, TPS65910_SECONDS, alarm_data, + ret = regmap_bulk_read(tps->regmap, TPS65910_ALARM_SECONDS, alarm_data, NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_read_alarm error %d\n", ret); diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c index d2e8b21c90c4..ccef887d2690 100644 --- a/drivers/rtc/rtc-wm831x.c +++ b/drivers/rtc/rtc-wm831x.c @@ -435,7 +435,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(&pdev->dev, alm_irq, NULL, wm831x_alm_irq, - IRQF_TRIGGER_RISING, "RTC alarm", + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "RTC alarm", wm831x_rtc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n", diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 1705398b026a..297e1076e571 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -792,7 +792,7 @@ static int virtscsi_probe(struct virtio_device *vdev) num_targets = virtscsi_config_get(vdev, max_target) + 1; shost = scsi_host_alloc(&virtscsi_host_template, - sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); + struct_size(vscsi, req_vqs, num_queues)); if (!shost) return -ENOMEM; diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index 9ca7d9484de0..24cd193dec55 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -66,6 +66,66 @@ ssize_t qcom_mdt_get_size(const struct firmware *fw) } EXPORT_SYMBOL_GPL(qcom_mdt_get_size); +/** + * qcom_mdt_read_metadata() - read header and metadata from mdt or mbn + * @fw: firmware of mdt header or mbn + * @data_len: length of the read metadata blob + * + * The mechanism that performs the authentication of the loading firmware + * expects an ELF header directly followed by the segment of hashes, with no + * padding inbetween. This function allocates a chunk of memory for this pair + * and copy the two pieces into the buffer. + * + * In the case of split firmware the hash is found directly following the ELF + * header, rather than at p_offset described by the second program header. + * + * The caller is responsible to free (kfree()) the returned pointer. + * + * Return: pointer to data, or ERR_PTR() + */ +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len) +{ + const struct elf32_phdr *phdrs; + const struct elf32_hdr *ehdr; + size_t hash_offset; + size_t hash_size; + size_t ehdr_size; + void *data; + + ehdr = (struct elf32_hdr *)fw->data; + phdrs = (struct elf32_phdr *)(ehdr + 1); + + if (ehdr->e_phnum < 2) + return ERR_PTR(-EINVAL); + + if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD) + return ERR_PTR(-EINVAL); + + if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH) + return ERR_PTR(-EINVAL); + + ehdr_size = phdrs[0].p_filesz; + hash_size = phdrs[1].p_filesz; + + data = kmalloc(ehdr_size + hash_size, GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + /* Is the header and hash already packed */ + if (ehdr_size + hash_size == fw->size) + hash_offset = phdrs[0].p_filesz; + else + hash_offset = phdrs[1].p_offset; + + memcpy(data, fw->data, ehdr_size); + memcpy(data + ehdr_size, fw->data + hash_offset, hash_size); + + *data_len = ehdr_size + hash_size; + + return data; +} +EXPORT_SYMBOL_GPL(qcom_mdt_read_metadata); + static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *firmware, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -78,12 +138,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, phys_addr_t mem_reloc; phys_addr_t min_addr = PHYS_ADDR_MAX; phys_addr_t max_addr = 0; + size_t metadata_len; size_t fw_name_len; ssize_t offset; + void *metadata; char *fw_name; bool relocate = false; void *ptr; - int ret; + int ret = 0; int i; if (!fw || !mem_region || !mem_phys || !mem_size) @@ -101,7 +163,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, return -ENOMEM; if (pas_init) { - ret = qcom_scm_pas_init_image(pas_id, fw->data, fw->size); + metadata = qcom_mdt_read_metadata(fw, &metadata_len); + if (IS_ERR(metadata)) { + ret = PTR_ERR(metadata); + goto out; + } + + ret = qcom_scm_pas_init_image(pas_id, metadata, metadata_len); + + kfree(metadata); if (ret) { dev_err(dev, "invalid firmware metadata\n"); goto out; @@ -162,7 +232,19 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ptr = mem_region + offset; - if (phdr->p_filesz) { + if (phdr->p_filesz && phdr->p_offset < fw->size) { + /* Firmware is large enough to be non-split */ + if (phdr->p_offset + phdr->p_filesz > fw->size) { + dev_err(dev, + "failed to load segment %d from truncated file %s\n", + i, firmware); + ret = -EINVAL; + break; + } + + memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); + } else if (phdr->p_filesz) { + /* Firmware not large enough, load split-out segments */ sprintf(fw_name + fw_name_len - 3, "b%02d", i); ret = request_firmware_into_buf(&seg_fw, fw_name, dev, ptr, phdr->p_filesz); diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c index 4bb16e9bc297..d4aef9c4a94c 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra20.c +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c @@ -99,7 +99,7 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL); + fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL); if (!fuse->apbdma.chan) return -EPROBE_DEFER; diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 8ff109fb77e1..afd99f668c65 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -117,14 +117,4 @@ static struct thermal_governor thermal_gov_fair_share = { .name = "fair_share", .throttle = fair_share_throttle, }; - -int thermal_gov_fair_share_register(void) -{ - return thermal_register_governor(&thermal_gov_fair_share); -} - -void thermal_gov_fair_share_unregister(void) -{ - thermal_unregister_governor(&thermal_gov_fair_share); -} - +THERMAL_GOVERNOR_DECLARE(thermal_gov_fair_share); diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c index 632fb925e975..e0575d29023a 100644 --- a/drivers/thermal/gov_bang_bang.c +++ b/drivers/thermal/gov_bang_bang.c @@ -116,13 +116,4 @@ static struct thermal_governor thermal_gov_bang_bang = { .name = "bang_bang", .throttle = bang_bang_control, }; - -int thermal_gov_bang_bang_register(void) -{ - return thermal_register_governor(&thermal_gov_bang_bang); -} - -void thermal_gov_bang_bang_unregister(void) -{ - thermal_unregister_governor(&thermal_gov_bang_bang); -} +THERMAL_GOVERNOR_DECLARE(thermal_gov_bang_bang); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 53c84fa498ce..77dae1e7c3bf 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -443,6 +443,22 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +#ifdef CONFIG_PM_SLEEP +static int proc_thermal_resume(struct device *dev) +{ + struct proc_thermal_device *proc_dev; + + proc_dev = dev_get_drvdata(dev); + proc_thermal_read_ppcc(proc_dev); + + return 0; +} +#else +#define proc_thermal_resume NULL +#endif + +static SIMPLE_DEV_PM_OPS(proc_thermal_pm, NULL, proc_thermal_resume); + static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)}, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)}, @@ -465,6 +481,7 @@ static struct pci_driver proc_thermal_pci_driver = { .probe = proc_thermal_pci_probe, .remove = proc_thermal_pci_remove, .id_table = proc_thermal_pci_ids, + .driver.pm = &proc_thermal_pm, }; static const struct acpi_device_id int3401_device_ids[] = { @@ -479,6 +496,7 @@ static struct platform_driver int3401_driver = { .driver = { .name = "int3401 thermal", .acpi_match_table = int3401_device_ids, + .pm = &proc_thermal_pm, }, }; diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 3055f9a12a17..44636475b2a3 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -651,13 +651,4 @@ static struct thermal_governor thermal_gov_power_allocator = { .unbind_from_tz = power_allocator_unbind, .throttle = power_allocator_throttle, }; - -int thermal_gov_power_allocator_register(void) -{ - return thermal_register_governor(&thermal_gov_power_allocator); -} - -void thermal_gov_power_allocator_unregister(void) -{ - thermal_unregister_governor(&thermal_gov_power_allocator); -} +THERMAL_GOVERNOR_DECLARE(thermal_gov_power_allocator); diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index 81a183befd48..6e051cbd824f 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -206,13 +206,4 @@ static struct thermal_governor thermal_gov_step_wise = { .name = "step_wise", .throttle = step_wise_throttle, }; - -int thermal_gov_step_wise_register(void) -{ - return thermal_register_governor(&thermal_gov_step_wise); -} - -void thermal_gov_step_wise_unregister(void) -{ - thermal_unregister_governor(&thermal_gov_step_wise); -} +THERMAL_GOVERNOR_DECLARE(thermal_gov_step_wise); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 46cfb7de4eb2..6bab66e84eb5 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -243,36 +243,42 @@ int thermal_build_list_of_policies(char *buf) return count; } -static int __init thermal_register_governors(void) +static void __init thermal_unregister_governors(void) { - int result; + struct thermal_governor **governor; - result = thermal_gov_step_wise_register(); - if (result) - return result; + for_each_governor_table(governor) + thermal_unregister_governor(*governor); +} - result = thermal_gov_fair_share_register(); - if (result) - return result; +static int __init thermal_register_governors(void) +{ + int ret = 0; + struct thermal_governor **governor; - result = thermal_gov_bang_bang_register(); - if (result) - return result; + for_each_governor_table(governor) { + ret = thermal_register_governor(*governor); + if (ret) { + pr_err("Failed to register governor: '%s'", + (*governor)->name); + break; + } - result = thermal_gov_user_space_register(); - if (result) - return result; + pr_info("Registered thermal governor '%s'", + (*governor)->name); + } - return thermal_gov_power_allocator_register(); -} + if (ret) { + struct thermal_governor **gov; -static void __init thermal_unregister_governors(void) -{ - thermal_gov_step_wise_unregister(); - thermal_gov_fair_share_unregister(); - thermal_gov_bang_bang_unregister(); - thermal_gov_user_space_unregister(); - thermal_gov_power_allocator_unregister(); + for_each_governor_table(gov) { + if (gov == governor) + break; + thermal_unregister_governor(*gov); + } + } + + return ret; } /* diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 0df190ed82a7..207b0cda70da 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -15,6 +15,21 @@ /* Initial state of a cooling device during binding */ #define THERMAL_NO_TARGET -1UL +/* Init section thermal table */ +extern struct thermal_governor *__governor_thermal_table[]; +extern struct thermal_governor *__governor_thermal_table_end[]; + +#define THERMAL_TABLE_ENTRY(table, name) \ + static typeof(name) *__thermal_table_entry_##name \ + __used __section(__##table##_thermal_table) = &name + +#define THERMAL_GOVERNOR_DECLARE(name) THERMAL_TABLE_ENTRY(governor, name) + +#define for_each_governor_table(__governor) \ + for (__governor = __governor_thermal_table; \ + __governor < __governor_thermal_table_end; \ + __governor++) + /* * This structure is used to describe the behavior of * a certain cooling device on a certain trip point @@ -74,46 +89,6 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev, unsigned long new_state) {} #endif /* CONFIG_THERMAL_STATISTICS */ -#ifdef CONFIG_THERMAL_GOV_STEP_WISE -int thermal_gov_step_wise_register(void); -void thermal_gov_step_wise_unregister(void); -#else -static inline int thermal_gov_step_wise_register(void) { return 0; } -static inline void thermal_gov_step_wise_unregister(void) {} -#endif /* CONFIG_THERMAL_GOV_STEP_WISE */ - -#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE -int thermal_gov_fair_share_register(void); -void thermal_gov_fair_share_unregister(void); -#else -static inline int thermal_gov_fair_share_register(void) { return 0; } -static inline void thermal_gov_fair_share_unregister(void) {} -#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ - -#ifdef CONFIG_THERMAL_GOV_BANG_BANG -int thermal_gov_bang_bang_register(void); -void thermal_gov_bang_bang_unregister(void); -#else -static inline int thermal_gov_bang_bang_register(void) { return 0; } -static inline void thermal_gov_bang_bang_unregister(void) {} -#endif /* CONFIG_THERMAL_GOV_BANG_BANG */ - -#ifdef CONFIG_THERMAL_GOV_USER_SPACE -int thermal_gov_user_space_register(void); -void thermal_gov_user_space_unregister(void); -#else -static inline int thermal_gov_user_space_register(void) { return 0; } -static inline void thermal_gov_user_space_unregister(void) {} -#endif /* CONFIG_THERMAL_GOV_USER_SPACE */ - -#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR -int thermal_gov_power_allocator_register(void); -void thermal_gov_power_allocator_unregister(void); -#else -static inline int thermal_gov_power_allocator_register(void) { return 0; } -static inline void thermal_gov_power_allocator_unregister(void) {} -#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ - /* device tree support */ #ifdef CONFIG_THERMAL_OF int of_parse_thermal_zones(void); diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c index d62a99fc60a6..962873fd9242 100644 --- a/drivers/thermal/user_space.c +++ b/drivers/thermal/user_space.c @@ -44,14 +44,4 @@ static struct thermal_governor thermal_gov_user_space = { .name = "user_space", .throttle = notify_user_space, }; - -int thermal_gov_user_space_register(void) -{ - return thermal_register_governor(&thermal_gov_user_space); -} - -void thermal_gov_user_space_unregister(void) -{ - thermal_unregister_governor(&thermal_gov_user_space); -} - +THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space); diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c index ed8608763134..b558d4cfd082 100644 --- a/drivers/vfio/mdev/mdev_core.c +++ b/drivers/vfio/mdev/mdev_core.c @@ -143,6 +143,8 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) { int ret; struct mdev_parent *parent; + char *env_string = "MDEV_STATE=registered"; + char *envp[] = { env_string, NULL }; /* check for mandatory ops */ if (!ops || !ops->create || !ops->remove || !ops->supported_type_groups) @@ -194,6 +196,8 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) mutex_unlock(&parent_list_lock); dev_info(dev, "MDEV: Registered\n"); + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); + return 0; add_dev_err: @@ -217,6 +221,8 @@ EXPORT_SYMBOL(mdev_register_device); void mdev_unregister_device(struct device *dev) { struct mdev_parent *parent; + char *env_string = "MDEV_STATE=unregistered"; + char *envp[] = { env_string, NULL }; mutex_lock(&parent_list_lock); parent = __find_parent_device(dev); @@ -240,6 +246,9 @@ void mdev_unregister_device(struct device *dev) up_write(&parent->unreg_sem); mdev_put_parent(parent); + + /* We still have the caller's reference to use for the uevent */ + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); } EXPORT_SYMBOL(mdev_unregister_device); diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index 50fe3c4f7feb..f2983f0f84be 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -161,8 +161,7 @@ static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev, atomic_inc(&data->mm->mm_count); ret = (int) mm_iommu_newdev(data->mm, data->useraddr, - (vma->vm_end - vma->vm_start) >> PAGE_SHIFT, - data->gpu_hpa, &data->mem); + vma_pages(vma), data->gpu_hpa, &data->mem); trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr, vma->vm_end - vma->vm_start, ret); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 7048c9198c21..8ce9ad21129f 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -19,6 +19,7 @@ #include <linux/vmalloc.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> +#include <linux/mm.h> #include <asm/iommu.h> #include <asm/tce.h> @@ -31,51 +32,6 @@ static void tce_iommu_detach_group(void *iommu_data, struct iommu_group *iommu_group); -static long try_increment_locked_vm(struct mm_struct *mm, long npages) -{ - long ret = 0, locked, lock_limit; - - if (WARN_ON_ONCE(!mm)) - return -EPERM; - - if (!npages) - return 0; - - down_write(&mm->mmap_sem); - locked = mm->locked_vm + npages; - lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - ret = -ENOMEM; - else - mm->locked_vm += npages; - - pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid, - npages << PAGE_SHIFT, - mm->locked_vm << PAGE_SHIFT, - rlimit(RLIMIT_MEMLOCK), - ret ? " - exceeded" : ""); - - up_write(&mm->mmap_sem); - - return ret; -} - -static void decrement_locked_vm(struct mm_struct *mm, long npages) -{ - if (!mm || !npages) - return; - - down_write(&mm->mmap_sem); - if (WARN_ON_ONCE(npages > mm->locked_vm)) - npages = mm->locked_vm; - mm->locked_vm -= npages; - pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid, - npages << PAGE_SHIFT, - mm->locked_vm << PAGE_SHIFT, - rlimit(RLIMIT_MEMLOCK)); - up_write(&mm->mmap_sem); -} - /* * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation * @@ -333,7 +289,7 @@ static int tce_iommu_enable(struct tce_container *container) return ret; locked = table_group->tce32_size >> PAGE_SHIFT; - ret = try_increment_locked_vm(container->mm, locked); + ret = account_locked_vm(container->mm, locked, true); if (ret) return ret; @@ -352,7 +308,7 @@ static void tce_iommu_disable(struct tce_container *container) container->enabled = false; BUG_ON(!container->mm); - decrement_locked_vm(container->mm, container->locked_pages); + account_locked_vm(container->mm, container->locked_pages, false); } static void *tce_iommu_open(unsigned long arg) @@ -656,7 +612,7 @@ static long tce_iommu_create_table(struct tce_container *container, if (!table_size) return -EINVAL; - ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT); + ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true); if (ret) return ret; @@ -675,7 +631,7 @@ static void tce_iommu_free_table(struct tce_container *container, unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT; iommu_tce_table_put(tbl); - decrement_locked_vm(container->mm, pages); + account_locked_vm(container->mm, pages, false); } static long tce_iommu_create_window(struct tce_container *container, diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index add34adfadc7..054391f30fa8 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -272,21 +272,8 @@ static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) ret = down_write_killable(&mm->mmap_sem); if (!ret) { - if (npage > 0) { - if (!dma->lock_cap) { - unsigned long limit; - - limit = task_rlimit(dma->task, - RLIMIT_MEMLOCK) >> PAGE_SHIFT; - - if (mm->locked_vm + npage > limit) - ret = -ENOMEM; - } - } - - if (!ret) - mm->locked_vm += npage; - + ret = __account_locked_vm(mm, abs(npage), npage > 0, dma->task, + dma->lock_cap); up_write(&mm->mmap_sem); } diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 247e5585af5d..1a2dd53caade 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -956,7 +956,7 @@ static void handle_tx(struct vhost_net *net) if (!sock) goto out; - if (!vq_iotlb_prefetch(vq)) + if (!vq_meta_prefetch(vq)) goto out; vhost_disable_notify(&net->dev, vq); @@ -1125,7 +1125,7 @@ static void handle_rx(struct vhost_net *net) if (!sock) goto out; - if (!vq_iotlb_prefetch(vq)) + if (!vq_meta_prefetch(vq)) goto out; vhost_disable_notify(&net->dev, vq); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ff8892c38666..0536f8526359 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -298,6 +298,160 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) __vhost_vq_meta_reset(d->vqs[i]); } +#if VHOST_ARCH_CAN_ACCEL_UACCESS +static void vhost_map_unprefetch(struct vhost_map *map) +{ + kfree(map->pages); + map->pages = NULL; + map->npages = 0; + map->addr = NULL; +} + +static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) +{ + struct vhost_map *map[VHOST_NUM_ADDRS]; + int i; + + spin_lock(&vq->mmu_lock); + for (i = 0; i < VHOST_NUM_ADDRS; i++) { + map[i] = rcu_dereference_protected(vq->maps[i], + lockdep_is_held(&vq->mmu_lock)); + if (map[i]) + rcu_assign_pointer(vq->maps[i], NULL); + } + spin_unlock(&vq->mmu_lock); + + synchronize_rcu(); + + for (i = 0; i < VHOST_NUM_ADDRS; i++) + if (map[i]) + vhost_map_unprefetch(map[i]); + +} + +static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) +{ + int i; + + vhost_uninit_vq_maps(vq); + for (i = 0; i < VHOST_NUM_ADDRS; i++) + vq->uaddrs[i].size = 0; +} + +static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, + unsigned long start, + unsigned long end) +{ + if (unlikely(!uaddr->size)) + return false; + + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); +} + +static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq, + int index, + unsigned long start, + unsigned long end) +{ + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; + struct vhost_map *map; + int i; + + if (!vhost_map_range_overlap(uaddr, start, end)) + return; + + spin_lock(&vq->mmu_lock); + ++vq->invalidate_count; + + map = rcu_dereference_protected(vq->maps[index], + lockdep_is_held(&vq->mmu_lock)); + if (map) { + if (uaddr->write) { + for (i = 0; i < map->npages; i++) + set_page_dirty(map->pages[i]); + } + rcu_assign_pointer(vq->maps[index], NULL); + } + spin_unlock(&vq->mmu_lock); + + if (map) { + synchronize_rcu(); + vhost_map_unprefetch(map); + } +} + +static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq, + int index, + unsigned long start, + unsigned long end) +{ + if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end)) + return; + + spin_lock(&vq->mmu_lock); + --vq->invalidate_count; + spin_unlock(&vq->mmu_lock); +} + +static int vhost_invalidate_range_start(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) +{ + struct vhost_dev *dev = container_of(mn, struct vhost_dev, + mmu_notifier); + int i, j; + + if (!mmu_notifier_range_blockable(range)) + return -EAGAIN; + + for (i = 0; i < dev->nvqs; i++) { + struct vhost_virtqueue *vq = dev->vqs[i]; + + for (j = 0; j < VHOST_NUM_ADDRS; j++) + vhost_invalidate_vq_start(vq, j, + range->start, + range->end); + } + + return 0; +} + +static void vhost_invalidate_range_end(struct mmu_notifier *mn, + const struct mmu_notifier_range *range) +{ + struct vhost_dev *dev = container_of(mn, struct vhost_dev, + mmu_notifier); + int i, j; + + for (i = 0; i < dev->nvqs; i++) { + struct vhost_virtqueue *vq = dev->vqs[i]; + + for (j = 0; j < VHOST_NUM_ADDRS; j++) + vhost_invalidate_vq_end(vq, j, + range->start, + range->end); + } +} + +static const struct mmu_notifier_ops vhost_mmu_notifier_ops = { + .invalidate_range_start = vhost_invalidate_range_start, + .invalidate_range_end = vhost_invalidate_range_end, +}; + +static void vhost_init_maps(struct vhost_dev *dev) +{ + struct vhost_virtqueue *vq; + int i, j; + + dev->mmu_notifier.ops = &vhost_mmu_notifier_ops; + + for (i = 0; i < dev->nvqs; ++i) { + vq = dev->vqs[i]; + for (j = 0; j < VHOST_NUM_ADDRS; j++) + RCU_INIT_POINTER(vq->maps[j], NULL); + } +} +#endif + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -326,7 +480,11 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; + vq->invalidate_count = 0; __vhost_vq_meta_reset(vq); +#if VHOST_ARCH_CAN_ACCEL_UACCESS + vhost_reset_vq_maps(vq); +#endif } static int vhost_worker(void *data) @@ -427,6 +585,32 @@ bool vhost_exceeds_weight(struct vhost_virtqueue *vq, } EXPORT_SYMBOL_GPL(vhost_exceeds_weight); +static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, + unsigned int num) +{ + size_t event __maybe_unused = + vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + + return sizeof(*vq->avail) + + sizeof(*vq->avail->ring) * num + event; +} + +static size_t vhost_get_used_size(struct vhost_virtqueue *vq, + unsigned int num) +{ + size_t event __maybe_unused = + vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; + + return sizeof(*vq->used) + + sizeof(*vq->used->ring) * num + event; +} + +static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, + unsigned int num) +{ + return sizeof(*vq->desc) * num; +} + void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight) @@ -450,7 +634,9 @@ void vhost_dev_init(struct vhost_dev *dev, INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); - +#if VHOST_ARCH_CAN_ACCEL_UACCESS + vhost_init_maps(dev); +#endif for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; @@ -459,6 +645,7 @@ void vhost_dev_init(struct vhost_dev *dev, vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); + spin_lock_init(&vq->mmu_lock); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, @@ -538,7 +725,18 @@ long vhost_dev_set_owner(struct vhost_dev *dev) if (err) goto err_cgroup; +#if VHOST_ARCH_CAN_ACCEL_UACCESS + err = mmu_notifier_register(&dev->mmu_notifier, dev->mm); + if (err) + goto err_mmu_notifier; +#endif + return 0; + +#if VHOST_ARCH_CAN_ACCEL_UACCESS +err_mmu_notifier: + vhost_dev_free_iovecs(dev); +#endif err_cgroup: kthread_stop(worker); dev->worker = NULL; @@ -629,6 +827,107 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } +#if VHOST_ARCH_CAN_ACCEL_UACCESS +static void vhost_setup_uaddr(struct vhost_virtqueue *vq, + int index, unsigned long uaddr, + size_t size, bool write) +{ + struct vhost_uaddr *addr = &vq->uaddrs[index]; + + addr->uaddr = uaddr; + addr->size = size; + addr->write = write; +} + +static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) +{ + vhost_setup_uaddr(vq, VHOST_ADDR_DESC, + (unsigned long)vq->desc, + vhost_get_desc_size(vq, vq->num), + false); + vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL, + (unsigned long)vq->avail, + vhost_get_avail_size(vq, vq->num), + false); + vhost_setup_uaddr(vq, VHOST_ADDR_USED, + (unsigned long)vq->used, + vhost_get_used_size(vq, vq->num), + true); +} + +static int vhost_map_prefetch(struct vhost_virtqueue *vq, + int index) +{ + struct vhost_map *map; + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; + struct page **pages; + int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE); + int npinned; + void *vaddr, *v; + int err; + int i; + + spin_lock(&vq->mmu_lock); + + err = -EFAULT; + if (vq->invalidate_count) + goto err; + + err = -ENOMEM; + map = kmalloc(sizeof(*map), GFP_ATOMIC); + if (!map) + goto err; + + pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC); + if (!pages) + goto err_pages; + + err = EFAULT; + npinned = __get_user_pages_fast(uaddr->uaddr, npages, + uaddr->write, pages); + if (npinned > 0) + release_pages(pages, npinned); + if (npinned != npages) + goto err_gup; + + for (i = 0; i < npinned; i++) + if (PageHighMem(pages[i])) + goto err_gup; + + vaddr = v = page_address(pages[0]); + + /* For simplicity, fallback to userspace address if VA is not + * contigious. + */ + for (i = 1; i < npinned; i++) { + v += PAGE_SIZE; + if (v != page_address(pages[i])) + goto err_gup; + } + + map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1)); + map->npages = npages; + map->pages = pages; + + rcu_assign_pointer(vq->maps[index], map); + /* No need for a synchronize_rcu(). This function should be + * called by dev->worker so we are serialized with all + * readers. + */ + spin_unlock(&vq->mmu_lock); + + return 0; + +err_gup: + kfree(pages); +err_pages: + kfree(map); +err: + spin_unlock(&vq->mmu_lock); + return err; +} +#endif + void vhost_dev_cleanup(struct vhost_dev *dev) { int i; @@ -658,8 +957,16 @@ void vhost_dev_cleanup(struct vhost_dev *dev) kthread_stop(dev->worker); dev->worker = NULL; } - if (dev->mm) + if (dev->mm) { +#if VHOST_ARCH_CAN_ACCEL_UACCESS + mmu_notifier_unregister(&dev->mmu_notifier, dev->mm); +#endif mmput(dev->mm); + } +#if VHOST_ARCH_CAN_ACCEL_UACCESS + for (i = 0; i < dev->nvqs; i++) + vhost_uninit_vq_maps(dev->vqs[i]); +#endif dev->mm = NULL; } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); @@ -886,6 +1193,113 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, ret; \ }) +static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_used *used; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); + if (likely(map)) { + used = map->addr; + *((__virtio16 *)&used->ring[vq->num]) = + cpu_to_vhost16(vq, vq->avail_idx); + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), + vhost_avail_event(vq)); +} + +static inline int vhost_put_used(struct vhost_virtqueue *vq, + struct vring_used_elem *head, int idx, + int count) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_used *used; + size_t size; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); + if (likely(map)) { + used = map->addr; + size = count * sizeof(*head); + memcpy(used->ring + idx, head, size); + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_copy_to_user(vq, vq->used->ring + idx, head, + count * sizeof(*head)); +} + +static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) + +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_used *used; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); + if (likely(map)) { + used = map->addr; + used->flags = cpu_to_vhost16(vq, vq->used_flags); + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), + &vq->used->flags); +} + +static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) + +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_used *used; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); + if (likely(map)) { + used = map->addr; + used->idx = cpu_to_vhost16(vq, vq->last_used_idx); + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), + &vq->used->idx); +} + #define vhost_get_user(vq, x, ptr, type) \ ({ \ int ret; \ @@ -924,6 +1338,155 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) mutex_unlock(&d->vqs[i]->mutex); } +static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, + __virtio16 *idx) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_avail *avail; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); + if (likely(map)) { + avail = map->addr; + *idx = avail->idx; + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_get_avail(vq, *idx, &vq->avail->idx); +} + +static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, + __virtio16 *head, int idx) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_avail *avail; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); + if (likely(map)) { + avail = map->addr; + *head = avail->ring[idx & (vq->num - 1)]; + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_get_avail(vq, *head, + &vq->avail->ring[idx & (vq->num - 1)]); +} + +static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, + __virtio16 *flags) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_avail *avail; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); + if (likely(map)) { + avail = map->addr; + *flags = avail->flags; + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_get_avail(vq, *flags, &vq->avail->flags); +} + +static inline int vhost_get_used_event(struct vhost_virtqueue *vq, + __virtio16 *event) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_avail *avail; + + if (!vq->iotlb) { + rcu_read_lock(); + map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); + if (likely(map)) { + avail = map->addr; + *event = (__virtio16)avail->ring[vq->num]; + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + } +#endif + + return vhost_get_avail(vq, *event, vhost_used_event(vq)); +} + +static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, + __virtio16 *idx) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_used *used; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); + if (likely(map)) { + used = map->addr; + *idx = used->idx; + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_get_used(vq, *idx, &vq->used->idx); +} + +static inline int vhost_get_desc(struct vhost_virtqueue *vq, + struct vring_desc *desc, int idx) +{ +#if VHOST_ARCH_CAN_ACCEL_UACCESS + struct vhost_map *map; + struct vring_desc *d; + + if (!vq->iotlb) { + rcu_read_lock(); + + map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]); + if (likely(map)) { + d = map->addr; + *desc = *(d + idx); + rcu_read_unlock(); + return 0; + } + + rcu_read_unlock(); + } +#endif + + return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); +} + static int vhost_new_umem_range(struct vhost_umem *umem, u64 start, u64 size, u64 end, u64 userspace_addr, int perm) @@ -1209,13 +1772,9 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, struct vring_used __user *used) { - size_t s __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; - - return access_ok(desc, num * sizeof *desc) && - access_ok(avail, - sizeof *avail + num * sizeof *avail->ring + s) && - access_ok(used, - sizeof *used + num * sizeof *used->ring + s); + return access_ok(desc, vhost_get_desc_size(vq, num)) && + access_ok(avail, vhost_get_avail_size(vq, num)) && + access_ok(used, vhost_get_used_size(vq, num)); } static void vhost_vq_meta_update(struct vhost_virtqueue *vq, @@ -1265,26 +1824,42 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq, return true; } -int vq_iotlb_prefetch(struct vhost_virtqueue *vq) +#if VHOST_ARCH_CAN_ACCEL_UACCESS +static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq) +{ + struct vhost_map __rcu *map; + int i; + + for (i = 0; i < VHOST_NUM_ADDRS; i++) { + rcu_read_lock(); + map = rcu_dereference(vq->maps[i]); + rcu_read_unlock(); + if (unlikely(!map)) + vhost_map_prefetch(vq, i); + } +} +#endif + +int vq_meta_prefetch(struct vhost_virtqueue *vq) { - size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; unsigned int num = vq->num; - if (!vq->iotlb) + if (!vq->iotlb) { +#if VHOST_ARCH_CAN_ACCEL_UACCESS + vhost_vq_map_prefetch(vq); +#endif return 1; + } return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, - num * sizeof(*vq->desc), VHOST_ADDR_DESC) && + vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, - sizeof *vq->avail + - num * sizeof(*vq->avail->ring) + s, + vhost_get_avail_size(vq, num), VHOST_ADDR_AVAIL) && iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, - sizeof *vq->used + - num * sizeof(*vq->used->ring) + s, - VHOST_ADDR_USED); + vhost_get_used_size(vq, num), VHOST_ADDR_USED); } -EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); +EXPORT_SYMBOL_GPL(vq_meta_prefetch); /* Can we log writes? */ /* Caller should have device mutex but not vq mutex */ @@ -1299,13 +1874,10 @@ EXPORT_SYMBOL_GPL(vhost_log_access_ok); static bool vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { - size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; - return vq_memory_access_ok(log_base, vq->umem, vhost_has_feature(vq, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, - sizeof *vq->used + - vq->num * sizeof *vq->used->ring + s)); + vhost_get_used_size(vq, vq->num))); } /* Can we start vq? */ @@ -1405,6 +1977,121 @@ err: return -EFAULT; } +static long vhost_vring_set_num(struct vhost_dev *d, + struct vhost_virtqueue *vq, + void __user *argp) +{ + struct vhost_vring_state s; + + /* Resizing ring with an active backend? + * You don't want to do that. */ + if (vq->private_data) + return -EBUSY; + + if (copy_from_user(&s, argp, sizeof s)) + return -EFAULT; + + if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) + return -EINVAL; + vq->num = s.num; + + return 0; +} + +static long vhost_vring_set_addr(struct vhost_dev *d, + struct vhost_virtqueue *vq, + void __user *argp) +{ + struct vhost_vring_addr a; + + if (copy_from_user(&a, argp, sizeof a)) + return -EFAULT; + if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) + return -EOPNOTSUPP; + + /* For 32bit, verify that the top 32bits of the user + data are set to zero. */ + if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || + (u64)(unsigned long)a.used_user_addr != a.used_user_addr || + (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) + return -EFAULT; + + /* Make sure it's safe to cast pointers to vring types. */ + BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); + BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); + if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || + (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || + (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) + return -EINVAL; + + /* We only verify access here if backend is configured. + * If it is not, we don't as size might not have been setup. + * We will verify when backend is configured. */ + if (vq->private_data) { + if (!vq_access_ok(vq, vq->num, + (void __user *)(unsigned long)a.desc_user_addr, + (void __user *)(unsigned long)a.avail_user_addr, + (void __user *)(unsigned long)a.used_user_addr)) + return -EINVAL; + + /* Also validate log access for used ring if enabled. */ + if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && + !log_access_ok(vq->log_base, a.log_guest_addr, + sizeof *vq->used + + vq->num * sizeof *vq->used->ring)) + return -EINVAL; + } + + vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); + vq->desc = (void __user *)(unsigned long)a.desc_user_addr; + vq->avail = (void __user *)(unsigned long)a.avail_user_addr; + vq->log_addr = a.log_guest_addr; + vq->used = (void __user *)(unsigned long)a.used_user_addr; + + return 0; +} + +static long vhost_vring_set_num_addr(struct vhost_dev *d, + struct vhost_virtqueue *vq, + unsigned int ioctl, + void __user *argp) +{ + long r; + + mutex_lock(&vq->mutex); + +#if VHOST_ARCH_CAN_ACCEL_UACCESS + /* Unregister MMU notifer to allow invalidation callback + * can access vq->uaddrs[] without holding a lock. + */ + if (d->mm) + mmu_notifier_unregister(&d->mmu_notifier, d->mm); + + vhost_uninit_vq_maps(vq); +#endif + + switch (ioctl) { + case VHOST_SET_VRING_NUM: + r = vhost_vring_set_num(d, vq, argp); + break; + case VHOST_SET_VRING_ADDR: + r = vhost_vring_set_addr(d, vq, argp); + break; + default: + BUG(); + } + +#if VHOST_ARCH_CAN_ACCEL_UACCESS + vhost_setup_vq_uaddr(vq); + + if (d->mm) + mmu_notifier_register(&d->mmu_notifier, d->mm); +#endif + + mutex_unlock(&vq->mutex); + + return r; +} long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL; @@ -1414,7 +2101,6 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg struct vhost_virtqueue *vq; struct vhost_vring_state s; struct vhost_vring_file f; - struct vhost_vring_addr a; u32 idx; long r; @@ -1427,26 +2113,14 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg idx = array_index_nospec(idx, d->nvqs); vq = d->vqs[idx]; + if (ioctl == VHOST_SET_VRING_NUM || + ioctl == VHOST_SET_VRING_ADDR) { + return vhost_vring_set_num_addr(d, vq, ioctl, argp); + } + mutex_lock(&vq->mutex); switch (ioctl) { - case VHOST_SET_VRING_NUM: - /* Resizing ring with an active backend? - * You don't want to do that. */ - if (vq->private_data) { - r = -EBUSY; - break; - } - if (copy_from_user(&s, argp, sizeof s)) { - r = -EFAULT; - break; - } - if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { - r = -EINVAL; - break; - } - vq->num = s.num; - break; case VHOST_SET_VRING_BASE: /* Moving base with an active backend? * You don't want to do that. */ @@ -1472,62 +2146,6 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; - case VHOST_SET_VRING_ADDR: - if (copy_from_user(&a, argp, sizeof a)) { - r = -EFAULT; - break; - } - if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { - r = -EOPNOTSUPP; - break; - } - /* For 32bit, verify that the top 32bits of the user - data are set to zero. */ - if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || - (u64)(unsigned long)a.used_user_addr != a.used_user_addr || - (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { - r = -EFAULT; - break; - } - - /* Make sure it's safe to cast pointers to vring types. */ - BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); - BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); - if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || - (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || - (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) { - r = -EINVAL; - break; - } - - /* We only verify access here if backend is configured. - * If it is not, we don't as size might not have been setup. - * We will verify when backend is configured. */ - if (vq->private_data) { - if (!vq_access_ok(vq, vq->num, - (void __user *)(unsigned long)a.desc_user_addr, - (void __user *)(unsigned long)a.avail_user_addr, - (void __user *)(unsigned long)a.used_user_addr)) { - r = -EINVAL; - break; - } - - /* Also validate log access for used ring if enabled. */ - if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && - !log_access_ok(vq->log_base, a.log_guest_addr, - sizeof *vq->used + - vq->num * sizeof *vq->used->ring)) { - r = -EINVAL; - break; - } - } - - vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); - vq->desc = (void __user *)(unsigned long)a.desc_user_addr; - vq->avail = (void __user *)(unsigned long)a.avail_user_addr; - vq->log_addr = a.log_guest_addr; - vq->used = (void __user *)(unsigned long)a.used_user_addr; - break; case VHOST_SET_VRING_KICK: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; @@ -1861,8 +2479,7 @@ EXPORT_SYMBOL_GPL(vhost_log_write); static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), - &vq->used->flags) < 0) + if (vhost_put_used_flags(vq)) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ @@ -1879,8 +2496,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) { - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), - vhost_avail_event(vq))) + if (vhost_put_avail_event(vq)) return -EFAULT; if (unlikely(vq->log_used)) { void __user *used; @@ -1916,7 +2532,7 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq) r = -EFAULT; goto err; } - r = vhost_get_used(vq, last_used_idx, &vq->used->idx); + r = vhost_get_used_idx(vq, &last_used_idx); if (r) { vq_err(vq, "Can't access used idx at %p\n", &vq->used->idx); @@ -2115,7 +2731,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, last_avail_idx = vq->last_avail_idx; if (vq->avail_idx == vq->last_avail_idx) { - if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { + if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) { vq_err(vq, "Failed to access avail idx at %p\n", &vq->avail->idx); return -EFAULT; @@ -2142,8 +2758,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ - if (unlikely(vhost_get_avail(vq, ring_head, - &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { + if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, &vq->avail->ring[last_avail_idx % vq->num]); @@ -2178,8 +2793,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq, i, vq->num, head); return -EINVAL; } - ret = vhost_copy_from_user(vq, &desc, vq->desc + i, - sizeof desc); + ret = vhost_get_desc(vq, &desc, i); if (unlikely(ret)) { vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", i, vq->desc + i); @@ -2272,16 +2886,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, start = vq->last_used_idx & (vq->num - 1); used = vq->used->ring + start; - if (count == 1) { - if (vhost_put_user(vq, heads[0].id, &used->id)) { - vq_err(vq, "Failed to write used id"); - return -EFAULT; - } - if (vhost_put_user(vq, heads[0].len, &used->len)) { - vq_err(vq, "Failed to write used len"); - return -EFAULT; - } - } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) { + if (vhost_put_used(vq, heads, start, count)) { vq_err(vq, "Failed to write used"); return -EFAULT; } @@ -2323,8 +2928,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, /* Make sure buffer is written before we update index. */ smp_wmb(); - if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), - &vq->used->idx)) { + if (vhost_put_used_idx(vq)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } @@ -2357,7 +2961,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; - if (vhost_get_avail(vq, flags, &vq->avail->flags)) { + if (vhost_get_avail_flags(vq, &flags)) { vq_err(vq, "Failed to get flags"); return true; } @@ -2371,7 +2975,7 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (unlikely(!v)) return true; - if (vhost_get_avail(vq, event, vhost_used_event(vq))) { + if (vhost_get_used_event(vq, &event)) { vq_err(vq, "Failed to get used event idx"); return true; } @@ -2416,7 +3020,7 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) if (vq->avail_idx != vq->last_avail_idx) return false; - r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail_idx(vq, &avail_idx); if (unlikely(r)) return false; vq->avail_idx = vhost16_to_cpu(vq, avail_idx); @@ -2452,7 +3056,7 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); - r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); + r = vhost_get_avail_idx(vq, &avail_idx); if (r) { vq_err(vq, "Failed to check avail idx at %p: %d\n", &vq->avail->idx, r); diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 27a78a9b8cc7..819296332913 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,6 +12,9 @@ #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/atomic.h> +#include <linux/pagemap.h> +#include <linux/mmu_notifier.h> +#include <asm/cacheflush.h> struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -80,6 +83,24 @@ enum vhost_uaddr_type { VHOST_NUM_ADDRS = 3, }; +struct vhost_map { + int npages; + void *addr; + struct page **pages; +}; + +struct vhost_uaddr { + unsigned long uaddr; + size_t size; + bool write; +}; + +#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#else +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 +#endif + /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -90,7 +111,22 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; + +#if VHOST_ARCH_CAN_ACCEL_UACCESS + /* Read by memory accessors, modified by meta data + * prefetching, MMU notifier and vring ioctl(). + * Synchonrized through mmu_lock (writers) and RCU (writers + * and readers). + */ + struct vhost_map __rcu *maps[VHOST_NUM_ADDRS]; + /* Read by MMU notifier, modified by vring ioctl(), + * synchronized through MMU notifier + * registering/unregistering. + */ + struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS]; +#endif const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; + struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -145,6 +181,8 @@ struct vhost_virtqueue { bool user_be; #endif u32 busyloop_timeout; + spinlock_t mmu_lock; + int invalidate_count; }; struct vhost_msg_node { @@ -158,6 +196,9 @@ struct vhost_msg_node { struct vhost_dev { struct mm_struct *mm; +#ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier mmu_notifier; +#endif struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; @@ -212,7 +253,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len, struct iovec *iov, int count); -int vq_iotlb_prefetch(struct vhost_virtqueue *vq); +int vq_meta_prefetch(struct vhost_virtqueue *vq); struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); void vhost_enqueue_msg(struct vhost_dev *dev, diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f363fbeb5ab0..e09edb5c5e06 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -463,9 +463,14 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct irq_affinity *desc) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - unsigned int irq = platform_get_irq(vm_dev->pdev, 0); + int irq = platform_get_irq(vm_dev->pdev, 0); int i, err, queue_idx = 0; + if (irq < 0) { + dev_err(&vdev->dev, "Cannot get IRQ resource\n"); + return irq; + } + err = request_irq(irq, vm_interrupt, IRQF_SHARED, dev_name(&vdev->dev), vm_dev); if (err) |