diff options
Diffstat (limited to 'drivers/clk')
84 files changed, 8415 insertions, 966 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 7641965d208d..6f56d3a4f010 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -65,10 +65,12 @@ config COMMON_CLK_SI570 clock generators. config COMMON_CLK_S2MPS11 - tristate "Clock driver for S2MPS11 MFD" + tristate "Clock driver for S2MPS11/S5M8767 MFD" depends on MFD_SEC_CORE ---help--- - This driver supports S2MPS11 crystal oscillator clock. + This driver supports S2MPS11/S5M8767 crystal oscillator clock. These + multi-function devices have 3 fixed-rate oscillators, clocked at + 32KHz each. config CLK_TWL6040 tristate "External McPDM functional clock from twl6040" @@ -111,4 +113,5 @@ source "drivers/clk/qcom/Kconfig" endmenu +source "drivers/clk/bcm/Kconfig" source "drivers/clk/mvebu/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index a367a9831717..5f8a28735c96 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o obj-$(CONFIG_MACH_LOONGSON1) += clk-ls1x.o obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o +obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o @@ -29,7 +30,9 @@ obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o obj-$(CONFIG_COMMON_CLK_AT91) += at91/ +obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/ obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ +obj-$(CONFIG_ARCH_HIP04) += hisilicon/ obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ ifeq ($(CONFIG_COMMON_CLK), y) obj-$(CONFIG_ARCH_MMP) += mmp/ @@ -43,6 +46,7 @@ obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/ obj-$(CONFIG_ARCH_SIRF) += sirf/ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ +obj-$(CONFIG_ARCH_STI) += st/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/ diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index bd313f7816a8..c1af80bcdf20 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, irq = irq_of_parse_and_map(np, 0); if (!irq) - return; + goto out_free_characteristics; clk = at91_clk_register_master(pmc, irq, name, num_parents, parent_names, layout, diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index fd792b203eaf..62e2509f9df1 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -13,12 +13,9 @@ #include <linux/clk/at91_pmc.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_irq.h> #include <linux/io.h> #include <linux/wait.h> #include <linux/sched.h> -#include <linux/interrupt.h> -#include <linux/irq.h> #include "pmc.h" @@ -38,104 +35,59 @@ struct clk_programmable_layout { struct clk_programmable { struct clk_hw hw; struct at91_pmc *pmc; - unsigned int irq; - wait_queue_head_t wait; u8 id; - u8 css; - u8 pres; - u8 slckmck; const struct clk_programmable_layout *layout; }; #define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw) - -static irqreturn_t clk_programmable_irq_handler(int irq, void *dev_id) -{ - struct clk_programmable *prog = (struct clk_programmable *)dev_id; - - wake_up(&prog->wait); - - return IRQ_HANDLED; -} - -static int clk_programmable_prepare(struct clk_hw *hw) +static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { - u32 tmp; + u32 pres; struct clk_programmable *prog = to_clk_programmable(hw); struct at91_pmc *pmc = prog->pmc; const struct clk_programmable_layout *layout = prog->layout; - u8 id = prog->id; - u32 mask = PROG_STATUS_MASK(id); - - tmp = prog->css | (prog->pres << layout->pres_shift); - if (layout->have_slck_mck && prog->slckmck) - tmp |= AT91_PMC_CSSMCK_MCK; - - pmc_write(pmc, AT91_PMC_PCKR(id), tmp); - - while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) - wait_event(prog->wait, pmc_read(pmc, AT91_PMC_SR) & mask); - return 0; + pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) & + PROG_PRES_MASK; + return parent_rate >> pres; } -static int clk_programmable_is_ready(struct clk_hw *hw) +static long clk_programmable_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_clk) { - struct clk_programmable *prog = to_clk_programmable(hw); - struct at91_pmc *pmc = prog->pmc; - - return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_PCKR(prog->id)); -} + struct clk *parent = NULL; + long best_rate = -EINVAL; + unsigned long parent_rate; + unsigned long tmp_rate; + int shift; + int i; -static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - u32 tmp; - struct clk_programmable *prog = to_clk_programmable(hw); - struct at91_pmc *pmc = prog->pmc; - const struct clk_programmable_layout *layout = prog->layout; + for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { + parent = clk_get_parent_by_index(hw->clk, i); + if (!parent) + continue; - tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); - prog->pres = (tmp >> layout->pres_shift) & PROG_PRES_MASK; + parent_rate = __clk_get_rate(parent); + for (shift = 0; shift < PROG_PRES_MASK; shift++) { + tmp_rate = parent_rate >> shift; + if (tmp_rate <= rate) + break; + } - return parent_rate >> prog->pres; -} + if (tmp_rate > rate) + continue; -static long clk_programmable_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - unsigned long best_rate = *parent_rate; - unsigned long best_diff; - unsigned long new_diff; - unsigned long cur_rate; - int shift = shift; - - if (rate > *parent_rate) - return *parent_rate; - else - best_diff = *parent_rate - rate; - - if (!best_diff) - return best_rate; - - for (shift = 1; shift < PROG_PRES_MASK; shift++) { - cur_rate = *parent_rate >> shift; - - if (cur_rate > rate) - new_diff = cur_rate - rate; - else - new_diff = rate - cur_rate; - - if (!new_diff) - return cur_rate; - - if (new_diff < best_diff) { - best_diff = new_diff; - best_rate = cur_rate; + if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { + best_rate = tmp_rate; + *best_parent_rate = parent_rate; + *best_parent_clk = parent; } - if (rate > cur_rate) + if (!best_rate) break; } @@ -146,17 +98,22 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index) { struct clk_programmable *prog = to_clk_programmable(hw); const struct clk_programmable_layout *layout = prog->layout; + struct at91_pmc *pmc = prog->pmc; + u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask; + + if (layout->have_slck_mck) + tmp &= AT91_PMC_CSSMCK_MCK; + if (index > layout->css_mask) { if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) { - prog->css = 0; - prog->slckmck = 1; + tmp |= AT91_PMC_CSSMCK_MCK; return 0; } else { return -EINVAL; } } - prog->css = index; + pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index); return 0; } @@ -169,13 +126,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw) const struct clk_programmable_layout *layout = prog->layout; tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); - prog->css = tmp & layout->css_mask; - ret = prog->css; - if (layout->have_slck_mck) { - prog->slckmck = !!(tmp & AT91_PMC_CSSMCK_MCK); - if (prog->slckmck && !ret) - ret = PROG_MAX_RM9200_CSS + 1; - } + ret = tmp & layout->css_mask; + if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret) + ret = PROG_MAX_RM9200_CSS + 1; return ret; } @@ -184,67 +137,47 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_programmable *prog = to_clk_programmable(hw); - unsigned long best_rate = parent_rate; - unsigned long best_diff; - unsigned long new_diff; - unsigned long cur_rate; + struct at91_pmc *pmc = prog->pmc; + const struct clk_programmable_layout *layout = prog->layout; + unsigned long div = parent_rate / rate; int shift = 0; + u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & + ~(PROG_PRES_MASK << layout->pres_shift); - if (rate > parent_rate) - return parent_rate; - else - best_diff = parent_rate - rate; - - if (!best_diff) { - prog->pres = shift; - return 0; - } + if (!div) + return -EINVAL; - for (shift = 1; shift < PROG_PRES_MASK; shift++) { - cur_rate = parent_rate >> shift; + shift = fls(div) - 1; - if (cur_rate > rate) - new_diff = cur_rate - rate; - else - new_diff = rate - cur_rate; + if (div != (1<<shift)) + return -EINVAL; - if (!new_diff) - break; + if (shift >= PROG_PRES_MASK) + return -EINVAL; - if (new_diff < best_diff) { - best_diff = new_diff; - best_rate = cur_rate; - } + pmc_write(pmc, AT91_PMC_PCKR(prog->id), + tmp | (shift << layout->pres_shift)); - if (rate > cur_rate) - break; - } - - prog->pres = shift; return 0; } static const struct clk_ops programmable_ops = { - .prepare = clk_programmable_prepare, - .is_prepared = clk_programmable_is_ready, .recalc_rate = clk_programmable_recalc_rate, - .round_rate = clk_programmable_round_rate, + .determine_rate = clk_programmable_determine_rate, .get_parent = clk_programmable_get_parent, .set_parent = clk_programmable_set_parent, .set_rate = clk_programmable_set_rate, }; static struct clk * __init -at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq, +at91_clk_register_programmable(struct at91_pmc *pmc, const char *name, const char **parent_names, u8 num_parents, u8 id, const struct clk_programmable_layout *layout) { - int ret; struct clk_programmable *prog; struct clk *clk = NULL; struct clk_init_data init; - char irq_name[11]; if (id > PROG_ID_MAX) return ERR_PTR(-EINVAL); @@ -263,14 +196,6 @@ at91_clk_register_programmable(struct at91_pmc *pmc, unsigned int irq, prog->layout = layout; prog->hw.init = &init; prog->pmc = pmc; - prog->irq = irq; - init_waitqueue_head(&prog->wait); - irq_set_status_flags(prog->irq, IRQ_NOAUTOEN); - snprintf(irq_name, sizeof(irq_name), "clk-prog%d", id); - ret = request_irq(prog->irq, clk_programmable_irq_handler, - IRQF_TRIGGER_HIGH, irq_name, prog); - if (ret) - return ERR_PTR(ret); clk = clk_register(NULL, &prog->hw); if (IS_ERR(clk)) @@ -304,7 +229,6 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, int num; u32 id; int i; - unsigned int irq; struct clk *clk; int num_parents; const char *parent_names[PROG_SOURCE_MAX]; @@ -332,11 +256,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, if (of_property_read_string(np, "clock-output-names", &name)) name = progclknp->name; - irq = irq_of_parse_and_map(progclknp, 0); - if (!irq) - continue; - - clk = at91_clk_register_programmable(pmc, irq, name, + clk = at91_clk_register_programmable(pmc, name, parent_names, num_parents, id, layout); if (IS_ERR(clk)) diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c index 8f7c0434a09f..8c96307d7363 100644 --- a/drivers/clk/at91/clk-system.c +++ b/drivers/clk/at91/clk-system.c @@ -14,6 +14,11 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> +#include <linux/irq.h> +#include <linux/of_irq.h> +#include <linux/interrupt.h> +#include <linux/wait.h> +#include <linux/sched.h> #include "pmc.h" @@ -25,19 +30,48 @@ struct clk_system { struct clk_hw hw; struct at91_pmc *pmc; + unsigned int irq; + wait_queue_head_t wait; u8 id; }; -static int clk_system_enable(struct clk_hw *hw) +static inline int is_pck(int id) +{ + return (id >= 8) && (id <= 15); +} +static irqreturn_t clk_system_irq_handler(int irq, void *dev_id) +{ + struct clk_system *sys = (struct clk_system *)dev_id; + + wake_up(&sys->wait); + disable_irq_nosync(sys->irq); + + return IRQ_HANDLED; +} + +static int clk_system_prepare(struct clk_hw *hw) { struct clk_system *sys = to_clk_system(hw); struct at91_pmc *pmc = sys->pmc; + u32 mask = 1 << sys->id; - pmc_write(pmc, AT91_PMC_SCER, 1 << sys->id); + pmc_write(pmc, AT91_PMC_SCER, mask); + + if (!is_pck(sys->id)) + return 0; + + while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) { + if (sys->irq) { + enable_irq(sys->irq); + wait_event(sys->wait, + pmc_read(pmc, AT91_PMC_SR) & mask); + } else + cpu_relax(); + } return 0; } -static void clk_system_disable(struct clk_hw *hw) +static void clk_system_unprepare(struct clk_hw *hw) { struct clk_system *sys = to_clk_system(hw); struct at91_pmc *pmc = sys->pmc; @@ -45,27 +79,34 @@ static void clk_system_disable(struct clk_hw *hw) pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id); } -static int clk_system_is_enabled(struct clk_hw *hw) +static int clk_system_is_prepared(struct clk_hw *hw) { struct clk_system *sys = to_clk_system(hw); struct at91_pmc *pmc = sys->pmc; - return !!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id)); + if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id))) + return 0; + + if (!is_pck(sys->id)) + return 1; + + return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id)); } static const struct clk_ops system_ops = { - .enable = clk_system_enable, - .disable = clk_system_disable, - .is_enabled = clk_system_is_enabled, + .prepare = clk_system_prepare, + .unprepare = clk_system_unprepare, + .is_prepared = clk_system_is_prepared, }; static struct clk * __init at91_clk_register_system(struct at91_pmc *pmc, const char *name, - const char *parent_name, u8 id) + const char *parent_name, u8 id, int irq) { struct clk_system *sys; struct clk *clk = NULL; struct clk_init_data init; + int ret; if (!parent_name || id > SYSTEM_MAX_ID) return ERR_PTR(-EINVAL); @@ -84,11 +125,20 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name, * (see drivers/memory) which would request and enable the ddrck clock. * When this is done we will be able to remove CLK_IGNORE_UNUSED flag. */ - init.flags = CLK_IGNORE_UNUSED; + init.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED; sys->id = id; sys->hw.init = &init; sys->pmc = pmc; + sys->irq = irq; + if (irq) { + init_waitqueue_head(&sys->wait); + irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); + ret = request_irq(sys->irq, clk_system_irq_handler, + IRQF_TRIGGER_HIGH, name, sys); + if (ret) + return ERR_PTR(ret); + } clk = clk_register(NULL, &sys->hw); if (IS_ERR(clk)) @@ -101,6 +151,7 @@ static void __init of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc) { int num; + int irq = 0; u32 id; struct clk *clk; const char *name; @@ -118,9 +169,12 @@ of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc) if (of_property_read_string(np, "clock-output-names", &name)) name = sysclknp->name; + if (is_pck(id)) + irq = irq_of_parse_and_map(sysclknp, 0); + parent_name = of_clk_get_parent_name(sysclknp, 0); - clk = at91_clk_register_system(pmc, name, parent_name, id); + clk = at91_clk_register_system(pmc, name, parent_name, id, irq); if (IS_ERR(clk)) continue; diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig new file mode 100644 index 000000000000..a7262fb8ce55 --- /dev/null +++ b/drivers/clk/bcm/Kconfig @@ -0,0 +1,9 @@ +config CLK_BCM_KONA + bool "Broadcom Kona CCU clock support" + depends on ARCH_BCM_MOBILE + depends on COMMON_CLK + default y + help + Enable common clock framework support for Broadcom SoCs + using "Kona" style clock control units, including those + in the BCM281xx family. diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile new file mode 100644 index 000000000000..cf93359aa862 --- /dev/null +++ b/drivers/clk/bcm/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o +obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o +obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o diff --git a/drivers/clk/bcm/clk-bcm281xx.c b/drivers/clk/bcm/clk-bcm281xx.c new file mode 100644 index 000000000000..3c66de696aeb --- /dev/null +++ b/drivers/clk/bcm/clk-bcm281xx.c @@ -0,0 +1,416 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "clk-kona.h" +#include "dt-bindings/clock/bcm281xx.h" + +/* bcm11351 CCU device tree "compatible" strings */ +#define BCM11351_DT_ROOT_CCU_COMPAT "brcm,bcm11351-root-ccu" +#define BCM11351_DT_AON_CCU_COMPAT "brcm,bcm11351-aon-ccu" +#define BCM11351_DT_HUB_CCU_COMPAT "brcm,bcm11351-hub-ccu" +#define BCM11351_DT_MASTER_CCU_COMPAT "brcm,bcm11351-master-ccu" +#define BCM11351_DT_SLAVE_CCU_COMPAT "brcm,bcm11351-slave-ccu" + +/* Root CCU clocks */ + +static struct peri_clk_data frac_1m_data = { + .gate = HW_SW_GATE(0x214, 16, 0, 1), + .trig = TRIGGER(0x0e04, 0), + .div = FRAC_DIVIDER(0x0e00, 0, 22, 16), + .clocks = CLOCKS("ref_crystal"), +}; + +/* AON CCU clocks */ + +static struct peri_clk_data hub_timer_data = { + .gate = HW_SW_GATE(0x0414, 16, 0, 1), + .clocks = CLOCKS("bbl_32k", + "frac_1m", + "dft_19_5m"), + .sel = SELECTOR(0x0a10, 0, 2), + .trig = TRIGGER(0x0a40, 4), +}; + +static struct peri_clk_data pmu_bsc_data = { + .gate = HW_SW_GATE(0x0418, 16, 0, 1), + .clocks = CLOCKS("ref_crystal", + "pmu_bsc_var", + "bbl_32k"), + .sel = SELECTOR(0x0a04, 0, 2), + .div = DIVIDER(0x0a04, 3, 4), + .trig = TRIGGER(0x0a40, 0), +}; + +static struct peri_clk_data pmu_bsc_var_data = { + .clocks = CLOCKS("var_312m", + "ref_312m"), + .sel = SELECTOR(0x0a00, 0, 2), + .div = DIVIDER(0x0a00, 4, 5), + .trig = TRIGGER(0x0a40, 2), +}; + +/* Hub CCU clocks */ + +static struct peri_clk_data tmon_1m_data = { + .gate = HW_SW_GATE(0x04a4, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "frac_1m"), + .sel = SELECTOR(0x0e74, 0, 2), + .trig = TRIGGER(0x0e84, 1), +}; + +/* Master CCU clocks */ + +static struct peri_clk_data sdio1_data = { + .gate = HW_SW_GATE(0x0358, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a28, 0, 3), + .div = DIVIDER(0x0a28, 4, 14), + .trig = TRIGGER(0x0afc, 9), +}; + +static struct peri_clk_data sdio2_data = { + .gate = HW_SW_GATE(0x035c, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a2c, 0, 3), + .div = DIVIDER(0x0a2c, 4, 14), + .trig = TRIGGER(0x0afc, 10), +}; + +static struct peri_clk_data sdio3_data = { + .gate = HW_SW_GATE(0x0364, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a34, 0, 3), + .div = DIVIDER(0x0a34, 4, 14), + .trig = TRIGGER(0x0afc, 12), +}; + +static struct peri_clk_data sdio4_data = { + .gate = HW_SW_GATE(0x0360, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_52m", + "ref_52m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a30, 0, 3), + .div = DIVIDER(0x0a30, 4, 14), + .trig = TRIGGER(0x0afc, 11), +}; + +static struct peri_clk_data usb_ic_data = { + .gate = HW_SW_GATE(0x0354, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .div = FIXED_DIVIDER(2), + .sel = SELECTOR(0x0a24, 0, 2), + .trig = TRIGGER(0x0afc, 7), +}; + +/* also called usbh_48m */ +static struct peri_clk_data hsic2_48m_data = { + .gate = HW_SW_GATE(0x0370, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a38, 0, 2), + .div = FIXED_DIVIDER(2), + .trig = TRIGGER(0x0afc, 5), +}; + +/* also called usbh_12m */ +static struct peri_clk_data hsic2_12m_data = { + .gate = HW_SW_GATE(0x0370, 20, 4, 5), + .div = DIVIDER(0x0a38, 12, 2), + .clocks = CLOCKS("ref_crystal", + "var_96m", + "ref_96m"), + .pre_div = FIXED_DIVIDER(2), + .sel = SELECTOR(0x0a38, 0, 2), + .trig = TRIGGER(0x0afc, 5), +}; + +/* Slave CCU clocks */ + +static struct peri_clk_data uartb_data = { + .gate = HW_SW_GATE(0x0400, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a10, 0, 2), + .div = FRAC_DIVIDER(0x0a10, 4, 12, 8), + .trig = TRIGGER(0x0afc, 2), +}; + +static struct peri_clk_data uartb2_data = { + .gate = HW_SW_GATE(0x0404, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a14, 0, 2), + .div = FRAC_DIVIDER(0x0a14, 4, 12, 8), + .trig = TRIGGER(0x0afc, 3), +}; + +static struct peri_clk_data uartb3_data = { + .gate = HW_SW_GATE(0x0408, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a18, 0, 2), + .div = FRAC_DIVIDER(0x0a18, 4, 12, 8), + .trig = TRIGGER(0x0afc, 4), +}; + +static struct peri_clk_data uartb4_data = { + .gate = HW_SW_GATE(0x0408, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_156m", + "ref_156m"), + .sel = SELECTOR(0x0a1c, 0, 2), + .div = FRAC_DIVIDER(0x0a1c, 4, 12, 8), + .trig = TRIGGER(0x0afc, 5), +}; + +static struct peri_clk_data ssp0_data = { + .gate = HW_SW_GATE(0x0410, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a20, 0, 3), + .div = DIVIDER(0x0a20, 4, 14), + .trig = TRIGGER(0x0afc, 6), +}; + +static struct peri_clk_data ssp2_data = { + .gate = HW_SW_GATE(0x0418, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_96m", + "ref_96m"), + .sel = SELECTOR(0x0a28, 0, 3), + .div = DIVIDER(0x0a28, 4, 14), + .trig = TRIGGER(0x0afc, 8), +}; + +static struct peri_clk_data bsc1_data = { + .gate = HW_SW_GATE(0x0458, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a64, 0, 3), + .trig = TRIGGER(0x0afc, 23), +}; + +static struct peri_clk_data bsc2_data = { + .gate = HW_SW_GATE(0x045c, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a68, 0, 3), + .trig = TRIGGER(0x0afc, 24), +}; + +static struct peri_clk_data bsc3_data = { + .gate = HW_SW_GATE(0x0484, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m", + "ref_104m", + "var_13m", + "ref_13m"), + .sel = SELECTOR(0x0a84, 0, 3), + .trig = TRIGGER(0x0b00, 2), +}; + +static struct peri_clk_data pwm_data = { + .gate = HW_SW_GATE(0x0468, 18, 2, 3), + .clocks = CLOCKS("ref_crystal", + "var_104m"), + .sel = SELECTOR(0x0a70, 0, 2), + .div = DIVIDER(0x0a70, 4, 3), + .trig = TRIGGER(0x0afc, 15), +}; + +/* + * CCU setup routines + * + * These are called from kona_dt_ccu_setup() to initialize the array + * of clocks provided by the CCU. Once allocated, the entries in + * the array are initialized by calling kona_clk_setup() with the + * initialization data for each clock. They return 0 if successful + * or an error code otherwise. + */ +static int __init bcm281xx_root_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_ROOT_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate root clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_ROOT_CCU_FRAC_1M, frac_1m); + + return 0; +} + +static int __init bcm281xx_aon_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_AON_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate aon clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_HUB_TIMER, hub_timer); + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC, pmu_bsc); + PERI_CLK_SETUP(clks, ccu, BCM281XX_AON_CCU_PMU_BSC_VAR, pmu_bsc_var); + + return 0; +} + +static int __init bcm281xx_hub_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_HUB_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate hub clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_HUB_CCU_TMON_1M, tmon_1m); + + return 0; +} + +static int __init bcm281xx_master_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_MASTER_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate master clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO1, sdio1); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO2, sdio2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO3, sdio3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_SDIO4, sdio4); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_USB_IC, usb_ic); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_48M, hsic2_48m); + PERI_CLK_SETUP(clks, ccu, BCM281XX_MASTER_CCU_HSIC2_12M, hsic2_12m); + + return 0; +} + +static int __init bcm281xx_slave_ccu_clks_setup(struct ccu_data *ccu) +{ + struct clk **clks; + size_t count = BCM281XX_SLAVE_CCU_CLOCK_COUNT; + + clks = kzalloc(count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + pr_err("%s: failed to allocate slave clocks\n", __func__); + return -ENOMEM; + } + ccu->data.clks = clks; + ccu->data.clk_num = count; + + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB, uartb); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB2, uartb2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB3, uartb3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_UARTB4, uartb4); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP0, ssp0); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_SSP2, ssp2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC1, bsc1); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC2, bsc2); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_BSC3, bsc3); + PERI_CLK_SETUP(clks, ccu, BCM281XX_SLAVE_CCU_PWM, pwm); + + return 0; +} + +/* Device tree match table callback functions */ + +static void __init kona_dt_root_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_root_ccu_clks_setup); +} + +static void __init kona_dt_aon_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_aon_ccu_clks_setup); +} + +static void __init kona_dt_hub_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_hub_ccu_clks_setup); +} + +static void __init kona_dt_master_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_master_ccu_clks_setup); +} + +static void __init kona_dt_slave_ccu_setup(struct device_node *node) +{ + kona_dt_ccu_setup(node, bcm281xx_slave_ccu_clks_setup); +} + +CLK_OF_DECLARE(bcm11351_root_ccu, BCM11351_DT_ROOT_CCU_COMPAT, + kona_dt_root_ccu_setup); +CLK_OF_DECLARE(bcm11351_aon_ccu, BCM11351_DT_AON_CCU_COMPAT, + kona_dt_aon_ccu_setup); +CLK_OF_DECLARE(bcm11351_hub_ccu, BCM11351_DT_HUB_CCU_COMPAT, + kona_dt_hub_ccu_setup); +CLK_OF_DECLARE(bcm11351_master_ccu, BCM11351_DT_MASTER_CCU_COMPAT, + kona_dt_master_ccu_setup); +CLK_OF_DECLARE(bcm11351_slave_ccu, BCM11351_DT_SLAVE_CCU_COMPAT, + kona_dt_slave_ccu_setup); diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c new file mode 100644 index 000000000000..c7607feb18dd --- /dev/null +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -0,0 +1,769 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/io.h> +#include <linux/of_address.h> + +#include "clk-kona.h" + +/* These are used when a selector or trigger is found to be unneeded */ +#define selector_clear_exists(sel) ((sel)->width = 0) +#define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS) + +LIST_HEAD(ccu_list); /* The list of set up CCUs */ + +/* Validity checking */ + +static bool clk_requires_trigger(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri = bcm_clk->peri; + struct bcm_clk_sel *sel; + struct bcm_clk_div *div; + + if (bcm_clk->type != bcm_clk_peri) + return false; + + sel = &peri->sel; + if (sel->parent_count && selector_exists(sel)) + return true; + + div = &peri->div; + if (!divider_exists(div)) + return false; + + /* Fixed dividers don't need triggers */ + if (!divider_is_fixed(div)) + return true; + + div = &peri->pre_div; + + return divider_exists(div) && !divider_is_fixed(div); +} + +static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri; + struct bcm_clk_gate *gate; + struct bcm_clk_div *div; + struct bcm_clk_sel *sel; + struct bcm_clk_trig *trig; + const char *name; + u32 range; + u32 limit; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + peri = bcm_clk->peri; + name = bcm_clk->name; + range = bcm_clk->ccu->range; + + limit = range - sizeof(u32); + limit = round_down(limit, sizeof(u32)); + + gate = &peri->gate; + if (gate_exists(gate)) { + if (gate->offset > limit) { + pr_err("%s: bad gate offset for %s (%u > %u)\n", + __func__, name, gate->offset, limit); + return false; + } + } + + div = &peri->div; + if (divider_exists(div)) { + if (div->offset > limit) { + pr_err("%s: bad divider offset for %s (%u > %u)\n", + __func__, name, div->offset, limit); + return false; + } + } + + div = &peri->pre_div; + if (divider_exists(div)) { + if (div->offset > limit) { + pr_err("%s: bad pre-divider offset for %s " + "(%u > %u)\n", + __func__, name, div->offset, limit); + return false; + } + } + + sel = &peri->sel; + if (selector_exists(sel)) { + if (sel->offset > limit) { + pr_err("%s: bad selector offset for %s (%u > %u)\n", + __func__, name, sel->offset, limit); + return false; + } + } + + trig = &peri->trig; + if (trigger_exists(trig)) { + if (trig->offset > limit) { + pr_err("%s: bad trigger offset for %s (%u > %u)\n", + __func__, name, trig->offset, limit); + return false; + } + } + + trig = &peri->pre_trig; + if (trigger_exists(trig)) { + if (trig->offset > limit) { + pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n", + __func__, name, trig->offset, limit); + return false; + } + } + + return true; +} + +/* A bit position must be less than the number of bits in a 32-bit register. */ +static bool bit_posn_valid(u32 bit_posn, const char *field_name, + const char *clock_name) +{ + u32 limit = BITS_PER_BYTE * sizeof(u32) - 1; + + if (bit_posn > limit) { + pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__, + field_name, clock_name, bit_posn, limit); + return false; + } + return true; +} + +/* + * A bitfield must be at least 1 bit wide. Both the low-order and + * high-order bits must lie within a 32-bit register. We require + * fields to be less than 32 bits wide, mainly because we use + * shifting to produce field masks, and shifting a full word width + * is not well-defined by the C standard. + */ +static bool bitfield_valid(u32 shift, u32 width, const char *field_name, + const char *clock_name) +{ + u32 limit = BITS_PER_BYTE * sizeof(u32); + + if (!width) { + pr_err("%s: bad %s field width 0 for %s\n", __func__, + field_name, clock_name); + return false; + } + if (shift + width > limit) { + pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__, + field_name, clock_name, shift, width, limit); + return false; + } + return true; +} + +/* + * All gates, if defined, have a status bit, and for hardware-only + * gates, that's it. Gates that can be software controlled also + * have an enable bit. And a gate that can be hardware or software + * controlled will have a hardware/software select bit. + */ +static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name, + const char *clock_name) +{ + if (!bit_posn_valid(gate->status_bit, "gate status", clock_name)) + return false; + + if (gate_is_sw_controllable(gate)) { + if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name)) + return false; + + if (gate_is_hw_controllable(gate)) { + if (!bit_posn_valid(gate->hw_sw_sel_bit, + "gate hw/sw select", + clock_name)) + return false; + } + } else { + BUG_ON(!gate_is_hw_controllable(gate)); + } + + return true; +} + +/* + * A selector bitfield must be valid. Its parent_sel array must + * also be reasonable for the field. + */ +static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name, + const char *clock_name) +{ + if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name)) + return false; + + if (sel->parent_count) { + u32 max_sel; + u32 limit; + + /* + * Make sure the selector field can hold all the + * selector values we expect to be able to use. A + * clock only needs to have a selector defined if it + * has more than one parent. And in that case the + * highest selector value will be in the last entry + * in the array. + */ + max_sel = sel->parent_sel[sel->parent_count - 1]; + limit = (1 << sel->width) - 1; + if (max_sel > limit) { + pr_err("%s: bad selector for %s " + "(%u needs > %u bits)\n", + __func__, clock_name, max_sel, + sel->width); + return false; + } + } else { + pr_warn("%s: ignoring selector for %s (no parents)\n", + __func__, clock_name); + selector_clear_exists(sel); + kfree(sel->parent_sel); + sel->parent_sel = NULL; + } + + return true; +} + +/* + * A fixed divider just needs to be non-zero. A variable divider + * has to have a valid divider bitfield, and if it has a fraction, + * the width of the fraction must not be no more than the width of + * the divider as a whole. + */ +static bool div_valid(struct bcm_clk_div *div, const char *field_name, + const char *clock_name) +{ + if (divider_is_fixed(div)) { + /* Any fixed divider value but 0 is OK */ + if (div->fixed == 0) { + pr_err("%s: bad %s fixed value 0 for %s\n", __func__, + field_name, clock_name); + return false; + } + return true; + } + if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) + return false; + + if (divider_has_fraction(div)) + if (div->frac_width > div->width) { + pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", + __func__, field_name, clock_name, + div->frac_width, div->width); + return false; + } + + return true; +} + +/* + * If a clock has two dividers, the combined number of fractional + * bits must be representable in a 32-bit unsigned value. This + * is because we scale up a dividend using both dividers before + * dividing to improve accuracy, and we need to avoid overflow. + */ +static bool kona_dividers_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri = bcm_clk->peri; + struct bcm_clk_div *div; + struct bcm_clk_div *pre_div; + u32 limit; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div)) + return true; + + div = &peri->div; + pre_div = &peri->pre_div; + if (divider_is_fixed(div) || divider_is_fixed(pre_div)) + return true; + + limit = BITS_PER_BYTE * sizeof(u32); + + return div->frac_width + pre_div->frac_width <= limit; +} + + +/* A trigger just needs to represent a valid bit position */ +static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name, + const char *clock_name) +{ + return bit_posn_valid(trig->bit, field_name, clock_name); +} + +/* Determine whether the set of peripheral clock registers are valid. */ +static bool +peri_clk_data_valid(struct kona_clk *bcm_clk) +{ + struct peri_clk_data *peri; + struct bcm_clk_gate *gate; + struct bcm_clk_sel *sel; + struct bcm_clk_div *div; + struct bcm_clk_div *pre_div; + struct bcm_clk_trig *trig; + const char *name; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + /* + * First validate register offsets. This is the only place + * where we need something from the ccu, so we do these + * together. + */ + if (!peri_clk_data_offsets_valid(bcm_clk)) + return false; + + peri = bcm_clk->peri; + name = bcm_clk->name; + gate = &peri->gate; + if (gate_exists(gate) && !gate_valid(gate, "gate", name)) + return false; + + sel = &peri->sel; + if (selector_exists(sel)) { + if (!sel_valid(sel, "selector", name)) + return false; + + } else if (sel->parent_count > 1) { + pr_err("%s: multiple parents but no selector for %s\n", + __func__, name); + + return false; + } + + div = &peri->div; + pre_div = &peri->pre_div; + if (divider_exists(div)) { + if (!div_valid(div, "divider", name)) + return false; + + if (divider_exists(pre_div)) + if (!div_valid(pre_div, "pre-divider", name)) + return false; + } else if (divider_exists(pre_div)) { + pr_err("%s: pre-divider but no divider for %s\n", __func__, + name); + return false; + } + + trig = &peri->trig; + if (trigger_exists(trig)) { + if (!trig_valid(trig, "trigger", name)) + return false; + + if (trigger_exists(&peri->pre_trig)) { + if (!trig_valid(trig, "pre-trigger", name)) { + return false; + } + } + if (!clk_requires_trigger(bcm_clk)) { + pr_warn("%s: ignoring trigger for %s (not needed)\n", + __func__, name); + trigger_clear_exists(trig); + } + } else if (trigger_exists(&peri->pre_trig)) { + pr_err("%s: pre-trigger but no trigger for %s\n", __func__, + name); + return false; + } else if (clk_requires_trigger(bcm_clk)) { + pr_err("%s: required trigger missing for %s\n", __func__, + name); + return false; + } + + return kona_dividers_valid(bcm_clk); +} + +static bool kona_clk_valid(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + if (!peri_clk_data_valid(bcm_clk)) + return false; + break; + default: + pr_err("%s: unrecognized clock type (%d)\n", __func__, + (int)bcm_clk->type); + return false; + } + return true; +} + +/* + * Scan an array of parent clock names to determine whether there + * are any entries containing BAD_CLK_NAME. Such entries are + * placeholders for non-supported clocks. Keep track of the + * position of each clock name in the original array. + * + * Allocates an array of pointers to to hold the names of all + * non-null entries in the original array, and returns a pointer to + * that array in *names. This will be used for registering the + * clock with the common clock code. On successful return, + * *count indicates how many entries are in that names array. + * + * If there is more than one entry in the resulting names array, + * another array is allocated to record the parent selector value + * for each (defined) parent clock. This is the value that + * represents this parent clock in the clock's source selector + * register. The position of the clock in the original parent array + * defines that selector value. The number of entries in this array + * is the same as the number of entries in the parent names array. + * + * The array of selector values is returned. If the clock has no + * parents, no selector is required and a null pointer is returned. + * + * Returns a null pointer if the clock names array supplied was + * null. (This is not an error.) + * + * Returns a pointer-coded error if an error occurs. + */ +static u32 *parent_process(const char *clocks[], + u32 *count, const char ***names) +{ + static const char **parent_names; + static u32 *parent_sel; + const char **clock; + u32 parent_count; + u32 bad_count = 0; + u32 orig_count; + u32 i; + u32 j; + + *count = 0; /* In case of early return */ + *names = NULL; + if (!clocks) + return NULL; + + /* + * Count the number of names in the null-terminated array, + * and find out how many of those are actually clock names. + */ + for (clock = clocks; *clock; clock++) + if (*clock == BAD_CLK_NAME) + bad_count++; + orig_count = (u32)(clock - clocks); + parent_count = orig_count - bad_count; + + /* If all clocks are unsupported, we treat it as no clock */ + if (!parent_count) + return NULL; + + /* Avoid exceeding our parent clock limit */ + if (parent_count > PARENT_COUNT_MAX) { + pr_err("%s: too many parents (%u > %u)\n", __func__, + parent_count, PARENT_COUNT_MAX); + return ERR_PTR(-EINVAL); + } + + /* + * There is one parent name for each defined parent clock. + * We also maintain an array containing the selector value + * for each defined clock. If there's only one clock, the + * selector is not required, but we allocate space for the + * array anyway to keep things simple. + */ + parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL); + if (!parent_names) { + pr_err("%s: error allocating %u parent names\n", __func__, + parent_count); + return ERR_PTR(-ENOMEM); + } + + /* There is at least one parent, so allocate a selector array */ + + parent_sel = kmalloc(parent_count * sizeof(*parent_sel), GFP_KERNEL); + if (!parent_sel) { + pr_err("%s: error allocating %u parent selectors\n", __func__, + parent_count); + kfree(parent_names); + + return ERR_PTR(-ENOMEM); + } + + /* Now fill in the parent names and selector arrays */ + for (i = 0, j = 0; i < orig_count; i++) { + if (clocks[i] != BAD_CLK_NAME) { + parent_names[j] = clocks[i]; + parent_sel[j] = i; + j++; + } + } + *names = parent_names; + *count = parent_count; + + return parent_sel; +} + +static int +clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel, + struct clk_init_data *init_data) +{ + const char **parent_names = NULL; + u32 parent_count = 0; + u32 *parent_sel; + + /* + * If a peripheral clock has multiple parents, the value + * used by the hardware to select that parent is represented + * by the parent clock's position in the "clocks" list. Some + * values don't have defined or supported clocks; these will + * have BAD_CLK_NAME entries in the parents[] array. The + * list is terminated by a NULL entry. + * + * We need to supply (only) the names of defined parent + * clocks when registering a clock though, so we use an + * array of parent selector values to map between the + * indexes the common clock code uses and the selector + * values we need. + */ + parent_sel = parent_process(clocks, &parent_count, &parent_names); + if (IS_ERR(parent_sel)) { + int ret = PTR_ERR(parent_sel); + + pr_err("%s: error processing parent clocks for %s (%d)\n", + __func__, init_data->name, ret); + + return ret; + } + + init_data->parent_names = parent_names; + init_data->num_parents = parent_count; + + sel->parent_count = parent_count; + sel->parent_sel = parent_sel; + + return 0; +} + +static void clk_sel_teardown(struct bcm_clk_sel *sel, + struct clk_init_data *init_data) +{ + kfree(sel->parent_sel); + sel->parent_sel = NULL; + sel->parent_count = 0; + + init_data->num_parents = 0; + kfree(init_data->parent_names); + init_data->parent_names = NULL; +} + +static void peri_clk_teardown(struct peri_clk_data *data, + struct clk_init_data *init_data) +{ + clk_sel_teardown(&data->sel, init_data); + init_data->ops = NULL; +} + +/* + * Caller is responsible for freeing the parent_names[] and + * parent_sel[] arrays in the peripheral clock's "data" structure + * that can be assigned if the clock has one or more parent clocks + * associated with it. + */ +static int peri_clk_setup(struct ccu_data *ccu, struct peri_clk_data *data, + struct clk_init_data *init_data) +{ + init_data->ops = &kona_peri_clk_ops; + init_data->flags = CLK_IGNORE_UNUSED; + + return clk_sel_setup(data->clocks, &data->sel, init_data); +} + +static void bcm_clk_teardown(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); + break; + default: + break; + } + bcm_clk->data = NULL; + bcm_clk->type = bcm_clk_none; +} + +static void kona_clk_teardown(struct clk *clk) +{ + struct clk_hw *hw; + struct kona_clk *bcm_clk; + + if (!clk) + return; + + hw = __clk_get_hw(clk); + if (!hw) { + pr_err("%s: clk %p has null hw pointer\n", __func__, clk); + return; + } + clk_unregister(clk); + + bcm_clk = to_kona_clk(hw); + bcm_clk_teardown(bcm_clk); +} + +struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, + enum bcm_clk_type type, void *data) +{ + struct kona_clk *bcm_clk; + struct clk_init_data *init_data; + struct clk *clk = NULL; + + bcm_clk = kzalloc(sizeof(*bcm_clk), GFP_KERNEL); + if (!bcm_clk) { + pr_err("%s: failed to allocate bcm_clk for %s\n", __func__, + name); + return NULL; + } + bcm_clk->ccu = ccu; + bcm_clk->name = name; + + init_data = &bcm_clk->init_data; + init_data->name = name; + switch (type) { + case bcm_clk_peri: + if (peri_clk_setup(ccu, data, init_data)) + goto out_free; + break; + default: + data = NULL; + break; + } + bcm_clk->type = type; + bcm_clk->data = data; + + /* Make sure everything makes sense before we set it up */ + if (!kona_clk_valid(bcm_clk)) { + pr_err("%s: clock data invalid for %s\n", __func__, name); + goto out_teardown; + } + + bcm_clk->hw.init = init_data; + clk = clk_register(NULL, &bcm_clk->hw); + if (IS_ERR(clk)) { + pr_err("%s: error registering clock %s (%ld)\n", __func__, + name, PTR_ERR(clk)); + goto out_teardown; + } + BUG_ON(!clk); + + return clk; +out_teardown: + bcm_clk_teardown(bcm_clk); +out_free: + kfree(bcm_clk); + + return NULL; +} + +static void ccu_clks_teardown(struct ccu_data *ccu) +{ + u32 i; + + for (i = 0; i < ccu->data.clk_num; i++) + kona_clk_teardown(ccu->data.clks[i]); + kfree(ccu->data.clks); +} + +static void kona_ccu_teardown(struct ccu_data *ccu) +{ + if (!ccu) + return; + + if (!ccu->base) + goto done; + + of_clk_del_provider(ccu->node); /* safe if never added */ + ccu_clks_teardown(ccu); + list_del(&ccu->links); + of_node_put(ccu->node); + iounmap(ccu->base); +done: + kfree(ccu->name); + kfree(ccu); +} + +/* + * Set up a CCU. Call the provided ccu_clks_setup callback to + * initialize the array of clocks provided by the CCU. + */ +void __init kona_dt_ccu_setup(struct device_node *node, + int (*ccu_clks_setup)(struct ccu_data *)) +{ + struct ccu_data *ccu; + struct resource res = { 0 }; + resource_size_t range; + int ret; + + ccu = kzalloc(sizeof(*ccu), GFP_KERNEL); + if (ccu) + ccu->name = kstrdup(node->name, GFP_KERNEL); + if (!ccu || !ccu->name) { + pr_err("%s: unable to allocate CCU struct for %s\n", + __func__, node->name); + kfree(ccu); + + return; + } + + ret = of_address_to_resource(node, 0, &res); + if (ret) { + pr_err("%s: no valid CCU registers found for %s\n", __func__, + node->name); + goto out_err; + } + + range = resource_size(&res); + if (range > (resource_size_t)U32_MAX) { + pr_err("%s: address range too large for %s\n", __func__, + node->name); + goto out_err; + } + + ccu->range = (u32)range; + ccu->base = ioremap(res.start, ccu->range); + if (!ccu->base) { + pr_err("%s: unable to map CCU registers for %s\n", __func__, + node->name); + goto out_err; + } + + spin_lock_init(&ccu->lock); + INIT_LIST_HEAD(&ccu->links); + ccu->node = of_node_get(node); + + list_add_tail(&ccu->links, &ccu_list); + + /* Set up clocks array (in ccu->data) */ + if (ccu_clks_setup(ccu)) + goto out_err; + + ret = of_clk_add_provider(node, of_clk_src_onecell_get, &ccu->data); + if (ret) { + pr_err("%s: error adding ccu %s as provider (%d)\n", __func__, + node->name, ret); + goto out_err; + } + + if (!kona_ccu_init(ccu)) + pr_err("Broadcom %s initialization had errors\n", node->name); + + return; +out_err: + kona_ccu_teardown(ccu); + pr_err("Broadcom %s setup aborted\n", node->name); +} diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c new file mode 100644 index 000000000000..e3d339e08309 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.c @@ -0,0 +1,1033 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "clk-kona.h" + +#include <linux/delay.h> + +#define CCU_ACCESS_PASSWORD 0xA5A500 +#define CLK_GATE_DELAY_LOOP 2000 + +/* Bitfield operations */ + +/* Produces a mask of set bits covering a range of a 32-bit value */ +static inline u32 bitfield_mask(u32 shift, u32 width) +{ + return ((1 << width) - 1) << shift; +} + +/* Extract the value of a bitfield found within a given register value */ +static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) +{ + return (reg_val & bitfield_mask(shift, width)) >> shift; +} + +/* Replace the value of a bitfield found within a given register value */ +static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) +{ + u32 mask = bitfield_mask(shift, width); + + return (reg_val & ~mask) | (val << shift); +} + +/* Divider and scaling helpers */ + +/* + * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values + * unsigned. Note that unlike do_div(), the remainder is discarded + * and the return value is the quotient (not the remainder). + */ +u64 do_div_round_closest(u64 dividend, unsigned long divisor) +{ + u64 result; + + result = dividend + ((u64)divisor >> 1); + (void)do_div(result, divisor); + + return result; +} + +/* Convert a divider into the scaled divisor value it represents. */ +static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) +{ + return (u64)reg_div + ((u64)1 << div->frac_width); +} + +/* + * Build a scaled divider value as close as possible to the + * given whole part (div_value) and fractional part (expressed + * in billionths). + */ +u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) +{ + u64 combined; + + BUG_ON(!div_value); + BUG_ON(billionths >= BILLION); + + combined = (u64)div_value * BILLION + billionths; + combined <<= div->frac_width; + + return do_div_round_closest(combined, BILLION); +} + +/* The scaled minimum divisor representable by a divider */ +static inline u64 +scaled_div_min(struct bcm_clk_div *div) +{ + if (divider_is_fixed(div)) + return (u64)div->fixed; + + return scaled_div_value(div, 0); +} + +/* The scaled maximum divisor representable by a divider */ +u64 scaled_div_max(struct bcm_clk_div *div) +{ + u32 reg_div; + + if (divider_is_fixed(div)) + return (u64)div->fixed; + + reg_div = ((u32)1 << div->width) - 1; + + return scaled_div_value(div, reg_div); +} + +/* + * Convert a scaled divisor into its divider representation as + * stored in a divider register field. + */ +static inline u32 +divider(struct bcm_clk_div *div, u64 scaled_div) +{ + BUG_ON(scaled_div < scaled_div_min(div)); + BUG_ON(scaled_div > scaled_div_max(div)); + + return (u32)(scaled_div - ((u64)1 << div->frac_width)); +} + +/* Return a rate scaled for use when dividing by a scaled divisor. */ +static inline u64 +scale_rate(struct bcm_clk_div *div, u32 rate) +{ + if (divider_is_fixed(div)) + return (u64)rate; + + return (u64)rate << div->frac_width; +} + +/* CCU access */ + +/* Read a 32-bit register value from a CCU's address space. */ +static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) +{ + return readl(ccu->base + reg_offset); +} + +/* Write a 32-bit register value into a CCU's address space. */ +static inline void +__ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) +{ + writel(reg_val, ccu->base + reg_offset); +} + +static inline unsigned long ccu_lock(struct ccu_data *ccu) +{ + unsigned long flags; + + spin_lock_irqsave(&ccu->lock, flags); + + return flags; +} +static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) +{ + spin_unlock_irqrestore(&ccu->lock, flags); +} + +/* + * Enable/disable write access to CCU protected registers. The + * WR_ACCESS register for all CCUs is at offset 0. + */ +static inline void __ccu_write_enable(struct ccu_data *ccu) +{ + if (ccu->write_enabled) { + pr_err("%s: access already enabled for %s\n", __func__, + ccu->name); + return; + } + ccu->write_enabled = true; + __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); +} + +static inline void __ccu_write_disable(struct ccu_data *ccu) +{ + if (!ccu->write_enabled) { + pr_err("%s: access wasn't enabled for %s\n", __func__, + ccu->name); + return; + } + + __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); + ccu->write_enabled = false; +} + +/* + * Poll a register in a CCU's address space, returning when the + * specified bit in that register's value is set (or clear). Delay + * a microsecond after each read of the register. Returns true if + * successful, or false if we gave up trying. + * + * Caller must ensure the CCU lock is held. + */ +static inline bool +__ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) +{ + unsigned int tries; + u32 bit_mask = 1 << bit; + + for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { + u32 val; + bool bit_val; + + val = __ccu_read(ccu, reg_offset); + bit_val = (val & bit_mask) != 0; + if (bit_val == want) + return true; + udelay(1); + } + return false; +} + +/* Gate operations */ + +/* Determine whether a clock is gated. CCU lock must be held. */ +static bool +__is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + u32 bit_mask; + u32 reg_val; + + /* If there is no gate we can assume it's enabled. */ + if (!gate_exists(gate)) + return true; + + bit_mask = 1 << gate->status_bit; + reg_val = __ccu_read(ccu, gate->offset); + + return (reg_val & bit_mask) != 0; +} + +/* Determine whether a clock is gated. */ +static bool +is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + long flags; + bool ret; + + /* Avoid taking the lock if we can */ + if (!gate_exists(gate)) + return true; + + flags = ccu_lock(ccu); + ret = __is_clk_gate_enabled(ccu, gate); + ccu_unlock(ccu, flags); + + return ret; +} + +/* + * Commit our desired gate state to the hardware. + * Returns true if successful, false otherwise. + */ +static bool +__gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + u32 reg_val; + u32 mask; + bool enabled = false; + + BUG_ON(!gate_exists(gate)); + if (!gate_is_sw_controllable(gate)) + return true; /* Nothing we can change */ + + reg_val = __ccu_read(ccu, gate->offset); + + /* For a hardware/software gate, set which is in control */ + if (gate_is_hw_controllable(gate)) { + mask = (u32)1 << gate->hw_sw_sel_bit; + if (gate_is_sw_managed(gate)) + reg_val |= mask; + else + reg_val &= ~mask; + } + + /* + * If software is in control, enable or disable the gate. + * If hardware is, clear the enabled bit for good measure. + * If a software controlled gate can't be disabled, we're + * required to write a 0 into the enable bit (but the gate + * will be enabled). + */ + mask = (u32)1 << gate->en_bit; + if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && + !gate_is_no_disable(gate)) + reg_val |= mask; + else + reg_val &= ~mask; + + __ccu_write(ccu, gate->offset, reg_val); + + /* For a hardware controlled gate, we're done */ + if (!gate_is_sw_managed(gate)) + return true; + + /* Otherwise wait for the gate to be in desired state */ + return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); +} + +/* + * Initialize a gate. Our desired state (hardware/software select, + * and if software, its enable state) is committed to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) +{ + if (!gate_exists(gate)) + return true; + return __gate_commit(ccu, gate); +} + +/* + * Set a gate to enabled or disabled state. Does nothing if the + * gate is not currently under software control, or if it is already + * in the requested state. Returns true if successful, false + * otherwise. CCU lock must be held. + */ +static bool +__clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) +{ + bool ret; + + if (!gate_exists(gate) || !gate_is_sw_managed(gate)) + return true; /* Nothing to do */ + + if (!enable && gate_is_no_disable(gate)) { + pr_warn("%s: invalid gate disable request (ignoring)\n", + __func__); + return true; + } + + if (enable == gate_is_enabled(gate)) + return true; /* No change */ + + gate_flip_enabled(gate); + ret = __gate_commit(ccu, gate); + if (!ret) + gate_flip_enabled(gate); /* Revert the change */ + + return ret; +} + +/* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ +static int clk_gate(struct ccu_data *ccu, const char *name, + struct bcm_clk_gate *gate, bool enable) +{ + unsigned long flags; + bool success; + + /* + * Avoid taking the lock if we can. We quietly ignore + * requests to change state that don't make sense. + */ + if (!gate_exists(gate) || !gate_is_sw_managed(gate)) + return 0; + if (!enable && gate_is_no_disable(gate)) + return 0; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + success = __clk_gate(ccu, gate, enable); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (success) + return 0; + + pr_err("%s: failed to %s gate for %s\n", __func__, + enable ? "enable" : "disable", name); + + return -EIO; +} + +/* Trigger operations */ + +/* + * Caller must ensure CCU lock is held and access is enabled. + * Returns true if successful, false otherwise. + */ +static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) +{ + /* Trigger the clock and wait for it to finish */ + __ccu_write(ccu, trig->offset, 1 << trig->bit); + + return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); +} + +/* Divider operations */ + +/* Read a divider value and return the scaled divisor it represents. */ +static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) +{ + unsigned long flags; + u32 reg_val; + u32 reg_div; + + if (divider_is_fixed(div)) + return (u64)div->fixed; + + flags = ccu_lock(ccu); + reg_val = __ccu_read(ccu, div->offset); + ccu_unlock(ccu, flags); + + /* Extract the full divider field from the register value */ + reg_div = bitfield_extract(reg_val, div->shift, div->width); + + /* Return the scaled divisor value it represents */ + return scaled_div_value(div, reg_div); +} + +/* + * Convert a divider's scaled divisor value into its recorded form + * and commit it into the hardware divider register. + * + * Returns 0 on success. Returns -EINVAL for invalid arguments. + * Returns -ENXIO if gating failed, and -EIO if a trigger failed. + */ +static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig) +{ + bool enabled; + u32 reg_div; + u32 reg_val; + int ret = 0; + + BUG_ON(divider_is_fixed(div)); + + /* + * If we're just initializing the divider, and no initial + * state was defined in the device tree, we just find out + * what its current value is rather than updating it. + */ + if (div->scaled_div == BAD_SCALED_DIV_VALUE) { + reg_val = __ccu_read(ccu, div->offset); + reg_div = bitfield_extract(reg_val, div->shift, div->width); + div->scaled_div = scaled_div_value(div, reg_div); + + return 0; + } + + /* Convert the scaled divisor to the value we need to record */ + reg_div = divider(div, div->scaled_div); + + /* Clock needs to be enabled before changing the rate */ + enabled = __is_clk_gate_enabled(ccu, gate); + if (!enabled && !__clk_gate(ccu, gate, true)) { + ret = -ENXIO; + goto out; + } + + /* Replace the divider value and record the result */ + reg_val = __ccu_read(ccu, div->offset); + reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); + __ccu_write(ccu, div->offset, reg_val); + + /* If the trigger fails we still want to disable the gate */ + if (!__clk_trigger(ccu, trig)) + ret = -EIO; + + /* Disable the clock again if it was disabled to begin with */ + if (!enabled && !__clk_gate(ccu, gate, false)) + ret = ret ? ret : -ENXIO; /* return first error */ +out: + return ret; +} + +/* + * Initialize a divider by committing our desired state to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig) +{ + if (!divider_exists(div) || divider_is_fixed(div)) + return true; + return !__div_commit(ccu, gate, div, trig); +} + +static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_div *div, struct bcm_clk_trig *trig, + u64 scaled_div) +{ + unsigned long flags; + u64 previous; + int ret; + + BUG_ON(divider_is_fixed(div)); + + previous = div->scaled_div; + if (previous == scaled_div) + return 0; /* No change */ + + div->scaled_div = scaled_div; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + ret = __div_commit(ccu, gate, div, trig); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (ret) + div->scaled_div = previous; /* Revert the change */ + + return ret; + +} + +/* Common clock rate helpers */ + +/* + * Implement the common clock framework recalc_rate method, taking + * into account a divider and an optional pre-divider. The + * pre-divider register pointer may be NULL. + */ +static unsigned long clk_recalc_rate(struct ccu_data *ccu, + struct bcm_clk_div *div, struct bcm_clk_div *pre_div, + unsigned long parent_rate) +{ + u64 scaled_parent_rate; + u64 scaled_div; + u64 result; + + if (!divider_exists(div)) + return parent_rate; + + if (parent_rate > (unsigned long)LONG_MAX) + return 0; /* actually this would be a caller bug */ + + /* + * If there is a pre-divider, divide the scaled parent rate + * by the pre-divider value first. In this case--to improve + * accuracy--scale the parent rate by *both* the pre-divider + * value and the divider before actually computing the + * result of the pre-divider. + * + * If there's only one divider, just scale the parent rate. + */ + if (pre_div && divider_exists(pre_div)) { + u64 scaled_rate; + + scaled_rate = scale_rate(pre_div, parent_rate); + scaled_rate = scale_rate(div, scaled_rate); + scaled_div = divider_read_scaled(ccu, pre_div); + scaled_parent_rate = do_div_round_closest(scaled_rate, + scaled_div); + } else { + scaled_parent_rate = scale_rate(div, parent_rate); + } + + /* + * Get the scaled divisor value, and divide the scaled + * parent rate by that to determine this clock's resulting + * rate. + */ + scaled_div = divider_read_scaled(ccu, div); + result = do_div_round_closest(scaled_parent_rate, scaled_div); + + return (unsigned long)result; +} + +/* + * Compute the output rate produced when a given parent rate is fed + * into two dividers. The pre-divider can be NULL, and even if it's + * non-null it may be nonexistent. It's also OK for the divider to + * be nonexistent, and in that case the pre-divider is also ignored. + * + * If scaled_div is non-null, it is used to return the scaled divisor + * value used by the (downstream) divider to produce that rate. + */ +static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, + struct bcm_clk_div *pre_div, + unsigned long rate, unsigned long parent_rate, + u64 *scaled_div) +{ + u64 scaled_parent_rate; + u64 min_scaled_div; + u64 max_scaled_div; + u64 best_scaled_div; + u64 result; + + BUG_ON(!divider_exists(div)); + BUG_ON(!rate); + BUG_ON(parent_rate > (u64)LONG_MAX); + + /* + * If there is a pre-divider, divide the scaled parent rate + * by the pre-divider value first. In this case--to improve + * accuracy--scale the parent rate by *both* the pre-divider + * value and the divider before actually computing the + * result of the pre-divider. + * + * If there's only one divider, just scale the parent rate. + * + * For simplicity we treat the pre-divider as fixed (for now). + */ + if (divider_exists(pre_div)) { + u64 scaled_rate; + u64 scaled_pre_div; + + scaled_rate = scale_rate(pre_div, parent_rate); + scaled_rate = scale_rate(div, scaled_rate); + scaled_pre_div = divider_read_scaled(ccu, pre_div); + scaled_parent_rate = do_div_round_closest(scaled_rate, + scaled_pre_div); + } else { + scaled_parent_rate = scale_rate(div, parent_rate); + } + + /* + * Compute the best possible divider and ensure it is in + * range. A fixed divider can't be changed, so just report + * the best we can do. + */ + if (!divider_is_fixed(div)) { + best_scaled_div = do_div_round_closest(scaled_parent_rate, + rate); + min_scaled_div = scaled_div_min(div); + max_scaled_div = scaled_div_max(div); + if (best_scaled_div > max_scaled_div) + best_scaled_div = max_scaled_div; + else if (best_scaled_div < min_scaled_div) + best_scaled_div = min_scaled_div; + } else { + best_scaled_div = divider_read_scaled(ccu, div); + } + + /* OK, figure out the resulting rate */ + result = do_div_round_closest(scaled_parent_rate, best_scaled_div); + + if (scaled_div) + *scaled_div = best_scaled_div; + + return (long)result; +} + +/* Common clock parent helpers */ + +/* + * For a given parent selector (register field) value, find the + * index into a selector's parent_sel array that contains it. + * Returns the index, or BAD_CLK_INDEX if it's not found. + */ +static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) +{ + u8 i; + + BUG_ON(sel->parent_count > (u32)U8_MAX); + for (i = 0; i < sel->parent_count; i++) + if (sel->parent_sel[i] == parent_sel) + return i; + return BAD_CLK_INDEX; +} + +/* + * Fetch the current value of the selector, and translate that into + * its corresponding index in the parent array we registered with + * the clock framework. + * + * Returns parent array index that corresponds with the value found, + * or BAD_CLK_INDEX if the found value is out of range. + */ +static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) +{ + unsigned long flags; + u32 reg_val; + u32 parent_sel; + u8 index; + + /* If there's no selector, there's only one parent */ + if (!selector_exists(sel)) + return 0; + + /* Get the value in the selector register */ + flags = ccu_lock(ccu); + reg_val = __ccu_read(ccu, sel->offset); + ccu_unlock(ccu, flags); + + parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); + + /* Look up that selector's parent array index and return it */ + index = parent_index(sel, parent_sel); + if (index == BAD_CLK_INDEX) + pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", + __func__, parent_sel, ccu->name, sel->offset); + + return index; +} + +/* + * Commit our desired selector value to the hardware. + * + * Returns 0 on success. Returns -EINVAL for invalid arguments. + * Returns -ENXIO if gating failed, and -EIO if a trigger failed. + */ +static int +__sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) +{ + u32 parent_sel; + u32 reg_val; + bool enabled; + int ret = 0; + + BUG_ON(!selector_exists(sel)); + + /* + * If we're just initializing the selector, and no initial + * state was defined in the device tree, we just find out + * what its current value is rather than updating it. + */ + if (sel->clk_index == BAD_CLK_INDEX) { + u8 index; + + reg_val = __ccu_read(ccu, sel->offset); + parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); + index = parent_index(sel, parent_sel); + if (index == BAD_CLK_INDEX) + return -EINVAL; + sel->clk_index = index; + + return 0; + } + + BUG_ON((u32)sel->clk_index >= sel->parent_count); + parent_sel = sel->parent_sel[sel->clk_index]; + + /* Clock needs to be enabled before changing the parent */ + enabled = __is_clk_gate_enabled(ccu, gate); + if (!enabled && !__clk_gate(ccu, gate, true)) + return -ENXIO; + + /* Replace the selector value and record the result */ + reg_val = __ccu_read(ccu, sel->offset); + reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); + __ccu_write(ccu, sel->offset, reg_val); + + /* If the trigger fails we still want to disable the gate */ + if (!__clk_trigger(ccu, trig)) + ret = -EIO; + + /* Disable the clock again if it was disabled to begin with */ + if (!enabled && !__clk_gate(ccu, gate, false)) + ret = ret ? ret : -ENXIO; /* return first error */ + + return ret; +} + +/* + * Initialize a selector by committing our desired state to hardware + * without the usual checks to see if it's already set up that way. + * Returns true if successful, false otherwise. + */ +static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) +{ + if (!selector_exists(sel)) + return true; + return !__sel_commit(ccu, gate, sel, trig); +} + +/* + * Write a new value into a selector register to switch to a + * different parent clock. Returns 0 on success, or an error code + * (from __sel_commit()) otherwise. + */ +static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, + struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, + u8 index) +{ + unsigned long flags; + u8 previous; + int ret; + + previous = sel->clk_index; + if (previous == index) + return 0; /* No change */ + + sel->clk_index = index; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + ret = __sel_commit(ccu, gate, sel, trig); + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + + if (ret) + sel->clk_index = previous; /* Revert the change */ + + return ret; +} + +/* Clock operations */ + +static int kona_peri_clk_enable(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); +} + +static void kona_peri_clk_disable(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); +} + +static int kona_peri_clk_is_enabled(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + + return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; +} + +static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + + return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, + parent_rate); +} + +static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct bcm_clk_div *div = &bcm_clk->peri->div; + + if (!divider_exists(div)) + return __clk_get_rate(hw->clk); + + /* Quietly avoid a zero rate */ + return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, + rate ? rate : 1, *parent_rate, NULL); +} + +static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + struct bcm_clk_sel *sel = &data->sel; + struct bcm_clk_trig *trig; + int ret; + + BUG_ON(index >= sel->parent_count); + + /* If there's only one parent we don't require a selector */ + if (!selector_exists(sel)) + return 0; + + /* + * The regular trigger is used by default, but if there's a + * pre-trigger we want to use that instead. + */ + trig = trigger_exists(&data->pre_trig) ? &data->pre_trig + : &data->trig; + + ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); + if (ret == -ENXIO) { + pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); + ret = -EIO; /* Don't proliferate weird errors */ + } else if (ret == -EIO) { + pr_err("%s: %strigger failed for %s\n", __func__, + trig == &data->pre_trig ? "pre-" : "", + bcm_clk->name); + } + + return ret; +} + +static u8 kona_peri_clk_get_parent(struct clk_hw *hw) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + u8 index; + + index = selector_read_index(bcm_clk->ccu, &data->sel); + + /* Not all callers would handle an out-of-range value gracefully */ + return index == BAD_CLK_INDEX ? 0 : index; +} + +static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct kona_clk *bcm_clk = to_kona_clk(hw); + struct peri_clk_data *data = bcm_clk->peri; + struct bcm_clk_div *div = &data->div; + u64 scaled_div = 0; + int ret; + + if (parent_rate > (unsigned long)LONG_MAX) + return -EINVAL; + + if (rate == __clk_get_rate(hw->clk)) + return 0; + + if (!divider_exists(div)) + return rate == parent_rate ? 0 : -EINVAL; + + /* + * A fixed divider can't be changed. (Nor can a fixed + * pre-divider be, but for now we never actually try to + * change that.) Tolerate a request for a no-op change. + */ + if (divider_is_fixed(&data->div)) + return rate == parent_rate ? 0 : -EINVAL; + + /* + * Get the scaled divisor value needed to achieve a clock + * rate as close as possible to what was requested, given + * the parent clock rate supplied. + */ + (void)round_rate(bcm_clk->ccu, div, &data->pre_div, + rate ? rate : 1, parent_rate, &scaled_div); + + /* + * We aren't updating any pre-divider at this point, so + * we'll use the regular trigger. + */ + ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, + &data->trig, scaled_div); + if (ret == -ENXIO) { + pr_err("%s: gating failure for %s\n", __func__, bcm_clk->name); + ret = -EIO; /* Don't proliferate weird errors */ + } else if (ret == -EIO) { + pr_err("%s: trigger failed for %s\n", __func__, bcm_clk->name); + } + + return ret; +} + +struct clk_ops kona_peri_clk_ops = { + .enable = kona_peri_clk_enable, + .disable = kona_peri_clk_disable, + .is_enabled = kona_peri_clk_is_enabled, + .recalc_rate = kona_peri_clk_recalc_rate, + .round_rate = kona_peri_clk_round_rate, + .set_parent = kona_peri_clk_set_parent, + .get_parent = kona_peri_clk_get_parent, + .set_rate = kona_peri_clk_set_rate, +}; + +/* Put a peripheral clock into its initial state */ +static bool __peri_clk_init(struct kona_clk *bcm_clk) +{ + struct ccu_data *ccu = bcm_clk->ccu; + struct peri_clk_data *peri = bcm_clk->peri; + const char *name = bcm_clk->name; + struct bcm_clk_trig *trig; + + BUG_ON(bcm_clk->type != bcm_clk_peri); + + if (!gate_init(ccu, &peri->gate)) { + pr_err("%s: error initializing gate for %s\n", __func__, name); + return false; + } + if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { + pr_err("%s: error initializing divider for %s\n", __func__, + name); + return false; + } + + /* + * For the pre-divider and selector, the pre-trigger is used + * if it's present, otherwise we just use the regular trigger. + */ + trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig + : &peri->trig; + + if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { + pr_err("%s: error initializing pre-divider for %s\n", __func__, + name); + return false; + } + + if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { + pr_err("%s: error initializing selector for %s\n", __func__, + name); + return false; + } + + return true; +} + +static bool __kona_clk_init(struct kona_clk *bcm_clk) +{ + switch (bcm_clk->type) { + case bcm_clk_peri: + return __peri_clk_init(bcm_clk); + default: + BUG(); + } + return -EINVAL; +} + +/* Set a CCU and all its clocks into their desired initial state */ +bool __init kona_ccu_init(struct ccu_data *ccu) +{ + unsigned long flags; + unsigned int which; + struct clk **clks = ccu->data.clks; + bool success = true; + + flags = ccu_lock(ccu); + __ccu_write_enable(ccu); + + for (which = 0; which < ccu->data.clk_num; which++) { + struct kona_clk *bcm_clk; + + if (!clks[which]) + continue; + bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); + success &= __kona_clk_init(bcm_clk); + } + + __ccu_write_disable(ccu); + ccu_unlock(ccu, flags); + return success; +} diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h new file mode 100644 index 000000000000..5e139adc3dc5 --- /dev/null +++ b/drivers/clk/bcm/clk-kona.h @@ -0,0 +1,410 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * Copyright 2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLK_KONA_H +#define _CLK_KONA_H + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/clk-provider.h> + +#define BILLION 1000000000 + +/* The common clock framework uses u8 to represent a parent index */ +#define PARENT_COUNT_MAX ((u32)U8_MAX) + +#define BAD_CLK_INDEX U8_MAX /* Can't ever be valid */ +#define BAD_CLK_NAME ((const char *)-1) + +#define BAD_SCALED_DIV_VALUE U64_MAX + +/* + * Utility macros for object flag management. If possible, flags + * should be defined such that 0 is the desired default value. + */ +#define FLAG(type, flag) BCM_CLK_ ## type ## _FLAGS_ ## flag +#define FLAG_SET(obj, type, flag) ((obj)->flags |= FLAG(type, flag)) +#define FLAG_CLEAR(obj, type, flag) ((obj)->flags &= ~(FLAG(type, flag))) +#define FLAG_FLIP(obj, type, flag) ((obj)->flags ^= FLAG(type, flag)) +#define FLAG_TEST(obj, type, flag) (!!((obj)->flags & FLAG(type, flag))) + +/* Clock field state tests */ + +#define gate_exists(gate) FLAG_TEST(gate, GATE, EXISTS) +#define gate_is_enabled(gate) FLAG_TEST(gate, GATE, ENABLED) +#define gate_is_hw_controllable(gate) FLAG_TEST(gate, GATE, HW) +#define gate_is_sw_controllable(gate) FLAG_TEST(gate, GATE, SW) +#define gate_is_sw_managed(gate) FLAG_TEST(gate, GATE, SW_MANAGED) +#define gate_is_no_disable(gate) FLAG_TEST(gate, GATE, NO_DISABLE) + +#define gate_flip_enabled(gate) FLAG_FLIP(gate, GATE, ENABLED) + +#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) +#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) +#define divider_has_fraction(div) (!divider_is_fixed(div) && \ + (div)->frac_width > 0) + +#define selector_exists(sel) ((sel)->width != 0) +#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) + +/* Clock type, used to tell common block what it's part of */ +enum bcm_clk_type { + bcm_clk_none, /* undefined clock type */ + bcm_clk_bus, + bcm_clk_core, + bcm_clk_peri +}; + +/* + * Each CCU defines a mapped area of memory containing registers + * used to manage clocks implemented by the CCU. Access to memory + * within the CCU's space is serialized by a spinlock. Before any + * (other) address can be written, a special access "password" value + * must be written to its WR_ACCESS register (located at the base + * address of the range). We keep track of the name of each CCU as + * it is set up, and maintain them in a list. + */ +struct ccu_data { + void __iomem *base; /* base of mapped address space */ + spinlock_t lock; /* serialization lock */ + bool write_enabled; /* write access is currently enabled */ + struct list_head links; /* for ccu_list */ + struct device_node *node; + struct clk_onecell_data data; + const char *name; + u32 range; /* byte range of address space */ +}; + +/* + * Gating control and status is managed by a 32-bit gate register. + * + * There are several types of gating available: + * - (no gate) + * A clock with no gate is assumed to be always enabled. + * - hardware-only gating (auto-gating) + * Enabling or disabling clocks with this type of gate is + * managed automatically by the hardware. Such clocks can be + * considered by the software to be enabled. The current status + * of auto-gated clocks can be read from the gate status bit. + * - software-only gating + * Auto-gating is not available for this type of clock. + * Instead, software manages whether it's enabled by setting or + * clearing the enable bit. The current gate status of a gate + * under software control can be read from the gate status bit. + * To ensure a change to the gating status is complete, the + * status bit can be polled to verify that the gate has entered + * the desired state. + * - selectable hardware or software gating + * Gating for this type of clock can be configured to be either + * under software or hardware control. Which type is in use is + * determined by the hw_sw_sel bit of the gate register. + */ +struct bcm_clk_gate { + u32 offset; /* gate register offset */ + u32 status_bit; /* 0: gate is disabled; 0: gatge is enabled */ + u32 en_bit; /* 0: disable; 1: enable */ + u32 hw_sw_sel_bit; /* 0: hardware gating; 1: software gating */ + u32 flags; /* BCM_CLK_GATE_FLAGS_* below */ +}; + +/* + * Gate flags: + * HW means this gate can be auto-gated + * SW means the state of this gate can be software controlled + * NO_DISABLE means this gate is (only) enabled if under software control + * SW_MANAGED means the status of this gate is under software control + * ENABLED means this software-managed gate is *supposed* to be enabled + */ +#define BCM_CLK_GATE_FLAGS_EXISTS ((u32)1 << 0) /* Gate is valid */ +#define BCM_CLK_GATE_FLAGS_HW ((u32)1 << 1) /* Can auto-gate */ +#define BCM_CLK_GATE_FLAGS_SW ((u32)1 << 2) /* Software control */ +#define BCM_CLK_GATE_FLAGS_NO_DISABLE ((u32)1 << 3) /* HW or enabled */ +#define BCM_CLK_GATE_FLAGS_SW_MANAGED ((u32)1 << 4) /* SW now in control */ +#define BCM_CLK_GATE_FLAGS_ENABLED ((u32)1 << 5) /* If SW_MANAGED */ + +/* + * Gate initialization macros. + * + * Any gate initially under software control will be enabled. + */ + +/* A hardware/software gate initially under software control */ +#define HW_SW_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, SW_MANAGED)|FLAG(GATE, ENABLED)| \ + FLAG(GATE, EXISTS), \ + } + +/* A hardware/software gate initially under hardware control */ +#define HW_SW_GATE_AUTO(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, EXISTS), \ + } + +/* A hardware-or-enabled gate (enabled if not under hardware control) */ +#define HW_ENABLE_GATE(_offset, _status_bit, _en_bit, _hw_sw_sel_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .hw_sw_sel_bit = (_hw_sw_sel_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, SW)| \ + FLAG(GATE, NO_DISABLE)|FLAG(GATE, EXISTS), \ + } + +/* A software-only gate */ +#define SW_ONLY_GATE(_offset, _status_bit, _en_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .en_bit = (_en_bit), \ + .flags = FLAG(GATE, SW)|FLAG(GATE, SW_MANAGED)| \ + FLAG(GATE, ENABLED)|FLAG(GATE, EXISTS), \ + } + +/* A hardware-only gate */ +#define HW_ONLY_GATE(_offset, _status_bit) \ + { \ + .offset = (_offset), \ + .status_bit = (_status_bit), \ + .flags = FLAG(GATE, HW)|FLAG(GATE, EXISTS), \ + } + +/* + * Each clock can have zero, one, or two dividers which change the + * output rate of the clock. Each divider can be either fixed or + * variable. If there are two dividers, they are the "pre-divider" + * and the "regular" or "downstream" divider. If there is only one, + * there is no pre-divider. + * + * A fixed divider is any non-zero (positive) value, and it + * indicates how the input rate is affected by the divider. + * + * The value of a variable divider is maintained in a sub-field of a + * 32-bit divider register. The position of the field in the + * register is defined by its offset and width. The value recorded + * in this field is always 1 less than the value it represents. + * + * In addition, a variable divider can indicate that some subset + * of its bits represent a "fractional" part of the divider. Such + * bits comprise the low-order portion of the divider field, and can + * be viewed as representing the portion of the divider that lies to + * the right of the decimal point. Most variable dividers have zero + * fractional bits. Variable dividers with non-zero fraction width + * still record a value 1 less than the value they represent; the + * added 1 does *not* affect the low-order bit in this case, it + * affects the bits above the fractional part only. (Often in this + * code a divider field value is distinguished from the value it + * represents by referring to the latter as a "divisor".) + * + * In order to avoid dealing with fractions, divider arithmetic is + * performed using "scaled" values. A scaled value is one that's + * been left-shifted by the fractional width of a divider. Dividing + * a scaled value by a scaled divisor produces the desired quotient + * without loss of precision and without any other special handling + * for fractions. + * + * The recorded value of a variable divider can be modified. To + * modify either divider (or both), a clock must be enabled (i.e., + * using its gate). In addition, a trigger register (described + * below) must be used to commit the change, and polled to verify + * the change is complete. + */ +struct bcm_clk_div { + union { + struct { /* variable divider */ + u32 offset; /* divider register offset */ + u32 shift; /* field shift */ + u32 width; /* field width */ + u32 frac_width; /* field fraction width */ + + u64 scaled_div; /* scaled divider value */ + }; + u32 fixed; /* non-zero fixed divider value */ + }; + u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ +}; + +/* + * Divider flags: + * EXISTS means this divider exists + * FIXED means it is a fixed-rate divider + */ +#define BCM_CLK_DIV_FLAGS_EXISTS ((u32)1 << 0) /* Divider is valid */ +#define BCM_CLK_DIV_FLAGS_FIXED ((u32)1 << 1) /* Fixed-value */ + +/* Divider initialization macros */ + +/* A fixed (non-zero) divider */ +#define FIXED_DIVIDER(_value) \ + { \ + .fixed = (_value), \ + .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ + } + +/* A divider with an integral divisor */ +#define DIVIDER(_offset, _shift, _width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .scaled_div = BAD_SCALED_DIV_VALUE, \ + .flags = FLAG(DIV, EXISTS), \ + } + +/* A divider whose divisor has an integer and fractional part */ +#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .frac_width = (_frac_width), \ + .scaled_div = BAD_SCALED_DIV_VALUE, \ + .flags = FLAG(DIV, EXISTS), \ + } + +/* + * Clocks may have multiple "parent" clocks. If there is more than + * one, a selector must be specified to define which of the parent + * clocks is currently in use. The selected clock is indicated in a + * sub-field of a 32-bit selector register. The range of + * representable selector values typically exceeds the number of + * available parent clocks. Occasionally the reset value of a + * selector field is explicitly set to a (specific) value that does + * not correspond to a defined input clock. + * + * We register all known parent clocks with the common clock code + * using a packed array (i.e., no empty slots) of (parent) clock + * names, and refer to them later using indexes into that array. + * We maintain an array of selector values indexed by common clock + * index values in order to map between these common clock indexes + * and the selector values used by the hardware. + * + * Like dividers, a selector can be modified, but to do so a clock + * must be enabled, and a trigger must be used to commit the change. + */ +struct bcm_clk_sel { + u32 offset; /* selector register offset */ + u32 shift; /* field shift */ + u32 width; /* field width */ + + u32 parent_count; /* number of entries in parent_sel[] */ + u32 *parent_sel; /* array of parent selector values */ + u8 clk_index; /* current selected index in parent_sel[] */ +}; + +/* Selector initialization macro */ +#define SELECTOR(_offset, _shift, _width) \ + { \ + .offset = (_offset), \ + .shift = (_shift), \ + .width = (_width), \ + .clk_index = BAD_CLK_INDEX, \ + } + +/* + * Making changes to a variable divider or a selector for a clock + * requires the use of a trigger. A trigger is defined by a single + * bit within a register. To signal a change, a 1 is written into + * that bit. To determine when the change has been completed, that + * trigger bit is polled; the read value will be 1 while the change + * is in progress, and 0 when it is complete. + * + * Occasionally a clock will have more than one trigger. In this + * case, the "pre-trigger" will be used when changing a clock's + * selector and/or its pre-divider. + */ +struct bcm_clk_trig { + u32 offset; /* trigger register offset */ + u32 bit; /* trigger bit */ + u32 flags; /* BCM_CLK_TRIG_FLAGS_* below */ +}; + +/* + * Trigger flags: + * EXISTS means this trigger exists + */ +#define BCM_CLK_TRIG_FLAGS_EXISTS ((u32)1 << 0) /* Trigger is valid */ + +/* Trigger initialization macro */ +#define TRIGGER(_offset, _bit) \ + { \ + .offset = (_offset), \ + .bit = (_bit), \ + .flags = FLAG(TRIG, EXISTS), \ + } + +struct peri_clk_data { + struct bcm_clk_gate gate; + struct bcm_clk_trig pre_trig; + struct bcm_clk_div pre_div; + struct bcm_clk_trig trig; + struct bcm_clk_div div; + struct bcm_clk_sel sel; + const char *clocks[]; /* must be last; use CLOCKS() to declare */ +}; +#define CLOCKS(...) { __VA_ARGS__, NULL, } +#define NO_CLOCKS { NULL, } /* Must use of no parent clocks */ + +struct kona_clk { + struct clk_hw hw; + struct clk_init_data init_data; + const char *name; /* name of this clock */ + struct ccu_data *ccu; /* ccu this clock is associated with */ + enum bcm_clk_type type; + union { + void *data; + struct peri_clk_data *peri; + }; +}; +#define to_kona_clk(_hw) \ + container_of(_hw, struct kona_clk, hw) + +/* Exported globals */ + +extern struct clk_ops kona_peri_clk_ops; + +/* Help functions */ + +#define PERI_CLK_SETUP(clks, ccu, id, name) \ + clks[id] = kona_clk_setup(ccu, #name, bcm_clk_peri, &name ## _data) + +/* Externally visible functions */ + +extern u64 do_div_round_closest(u64 dividend, unsigned long divisor); +extern u64 scaled_div_max(struct bcm_clk_div *div); +extern u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, + u32 billionths); + +extern struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, + enum bcm_clk_type type, void *data); +extern void __init kona_dt_ccu_setup(struct device_node *node, + int (*ccu_clks_setup)(struct ccu_data *)); +extern bool __init kona_ccu_init(struct ccu_data *ccu); + +#endif /* _CLK_KONA_H */ diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index 8137327847c3..1127ee46b802 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -17,23 +17,75 @@ #include <linux/module.h> #include <linux/err.h> -#define AXI_CLKGEN_REG_UPDATE_ENABLE 0x04 -#define AXI_CLKGEN_REG_CLK_OUT1 0x08 -#define AXI_CLKGEN_REG_CLK_OUT2 0x0c -#define AXI_CLKGEN_REG_CLK_DIV 0x10 -#define AXI_CLKGEN_REG_CLK_FB1 0x14 -#define AXI_CLKGEN_REG_CLK_FB2 0x18 -#define AXI_CLKGEN_REG_LOCK1 0x1c -#define AXI_CLKGEN_REG_LOCK2 0x20 -#define AXI_CLKGEN_REG_LOCK3 0x24 -#define AXI_CLKGEN_REG_FILTER1 0x28 -#define AXI_CLKGEN_REG_FILTER2 0x2c +#define AXI_CLKGEN_V1_REG_UPDATE_ENABLE 0x04 +#define AXI_CLKGEN_V1_REG_CLK_OUT1 0x08 +#define AXI_CLKGEN_V1_REG_CLK_OUT2 0x0c +#define AXI_CLKGEN_V1_REG_CLK_DIV 0x10 +#define AXI_CLKGEN_V1_REG_CLK_FB1 0x14 +#define AXI_CLKGEN_V1_REG_CLK_FB2 0x18 +#define AXI_CLKGEN_V1_REG_LOCK1 0x1c +#define AXI_CLKGEN_V1_REG_LOCK2 0x20 +#define AXI_CLKGEN_V1_REG_LOCK3 0x24 +#define AXI_CLKGEN_V1_REG_FILTER1 0x28 +#define AXI_CLKGEN_V1_REG_FILTER2 0x2c + +#define AXI_CLKGEN_V2_REG_RESET 0x40 +#define AXI_CLKGEN_V2_REG_DRP_CNTRL 0x70 +#define AXI_CLKGEN_V2_REG_DRP_STATUS 0x74 + +#define AXI_CLKGEN_V2_RESET_MMCM_ENABLE BIT(1) +#define AXI_CLKGEN_V2_RESET_ENABLE BIT(0) + +#define AXI_CLKGEN_V2_DRP_CNTRL_SEL BIT(29) +#define AXI_CLKGEN_V2_DRP_CNTRL_READ BIT(28) + +#define AXI_CLKGEN_V2_DRP_STATUS_BUSY BIT(16) + +#define MMCM_REG_CLKOUT0_1 0x08 +#define MMCM_REG_CLKOUT0_2 0x09 +#define MMCM_REG_CLK_FB1 0x14 +#define MMCM_REG_CLK_FB2 0x15 +#define MMCM_REG_CLK_DIV 0x16 +#define MMCM_REG_LOCK1 0x18 +#define MMCM_REG_LOCK2 0x19 +#define MMCM_REG_LOCK3 0x1a +#define MMCM_REG_FILTER1 0x4e +#define MMCM_REG_FILTER2 0x4f + +struct axi_clkgen; + +struct axi_clkgen_mmcm_ops { + void (*enable)(struct axi_clkgen *axi_clkgen, bool enable); + int (*write)(struct axi_clkgen *axi_clkgen, unsigned int reg, + unsigned int val, unsigned int mask); + int (*read)(struct axi_clkgen *axi_clkgen, unsigned int reg, + unsigned int *val); +}; struct axi_clkgen { void __iomem *base; + const struct axi_clkgen_mmcm_ops *mmcm_ops; struct clk_hw clk_hw; }; +static void axi_clkgen_mmcm_enable(struct axi_clkgen *axi_clkgen, + bool enable) +{ + axi_clkgen->mmcm_ops->enable(axi_clkgen, enable); +} + +static int axi_clkgen_mmcm_write(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int val, unsigned int mask) +{ + return axi_clkgen->mmcm_ops->write(axi_clkgen, reg, val, mask); +} + +static int axi_clkgen_mmcm_read(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int *val) +{ + return axi_clkgen->mmcm_ops->read(axi_clkgen, reg, val); +} + static uint32_t axi_clkgen_lookup_filter(unsigned int m) { switch (m) { @@ -156,6 +208,148 @@ static void axi_clkgen_read(struct axi_clkgen *axi_clkgen, *val = readl(axi_clkgen->base + reg); } +static unsigned int axi_clkgen_v1_map_mmcm_reg(unsigned int reg) +{ + switch (reg) { + case MMCM_REG_CLKOUT0_1: + return AXI_CLKGEN_V1_REG_CLK_OUT1; + case MMCM_REG_CLKOUT0_2: + return AXI_CLKGEN_V1_REG_CLK_OUT2; + case MMCM_REG_CLK_FB1: + return AXI_CLKGEN_V1_REG_CLK_FB1; + case MMCM_REG_CLK_FB2: + return AXI_CLKGEN_V1_REG_CLK_FB2; + case MMCM_REG_CLK_DIV: + return AXI_CLKGEN_V1_REG_CLK_DIV; + case MMCM_REG_LOCK1: + return AXI_CLKGEN_V1_REG_LOCK1; + case MMCM_REG_LOCK2: + return AXI_CLKGEN_V1_REG_LOCK2; + case MMCM_REG_LOCK3: + return AXI_CLKGEN_V1_REG_LOCK3; + case MMCM_REG_FILTER1: + return AXI_CLKGEN_V1_REG_FILTER1; + case MMCM_REG_FILTER2: + return AXI_CLKGEN_V1_REG_FILTER2; + default: + return 0; + } +} + +static int axi_clkgen_v1_mmcm_write(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int val, unsigned int mask) +{ + reg = axi_clkgen_v1_map_mmcm_reg(reg); + if (reg == 0) + return -EINVAL; + + axi_clkgen_write(axi_clkgen, reg, val); + + return 0; +} + +static int axi_clkgen_v1_mmcm_read(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int *val) +{ + reg = axi_clkgen_v1_map_mmcm_reg(reg); + if (reg == 0) + return -EINVAL; + + axi_clkgen_read(axi_clkgen, reg, val); + + return 0; +} + +static void axi_clkgen_v1_mmcm_enable(struct axi_clkgen *axi_clkgen, + bool enable) +{ + axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V1_REG_UPDATE_ENABLE, enable); +} + +static const struct axi_clkgen_mmcm_ops axi_clkgen_v1_mmcm_ops = { + .write = axi_clkgen_v1_mmcm_write, + .read = axi_clkgen_v1_mmcm_read, + .enable = axi_clkgen_v1_mmcm_enable, +}; + +static int axi_clkgen_wait_non_busy(struct axi_clkgen *axi_clkgen) +{ + unsigned int timeout = 10000; + unsigned int val; + + do { + axi_clkgen_read(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_STATUS, &val); + } while ((val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) && --timeout); + + if (val & AXI_CLKGEN_V2_DRP_STATUS_BUSY) + return -EIO; + + return val & 0xffff; +} + +static int axi_clkgen_v2_mmcm_read(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int *val) +{ + unsigned int reg_val; + int ret; + + ret = axi_clkgen_wait_non_busy(axi_clkgen); + if (ret < 0) + return ret; + + reg_val = AXI_CLKGEN_V2_DRP_CNTRL_SEL | AXI_CLKGEN_V2_DRP_CNTRL_READ; + reg_val |= (reg << 16); + + axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val); + + ret = axi_clkgen_wait_non_busy(axi_clkgen); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int axi_clkgen_v2_mmcm_write(struct axi_clkgen *axi_clkgen, + unsigned int reg, unsigned int val, unsigned int mask) +{ + unsigned int reg_val = 0; + int ret; + + ret = axi_clkgen_wait_non_busy(axi_clkgen); + if (ret < 0) + return ret; + + if (mask != 0xffff) { + axi_clkgen_v2_mmcm_read(axi_clkgen, reg, ®_val); + reg_val &= ~mask; + } + + reg_val |= AXI_CLKGEN_V2_DRP_CNTRL_SEL | (reg << 16) | (val & mask); + + axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_DRP_CNTRL, reg_val); + + return 0; +} + +static void axi_clkgen_v2_mmcm_enable(struct axi_clkgen *axi_clkgen, + bool enable) +{ + unsigned int val = AXI_CLKGEN_V2_RESET_ENABLE; + + if (enable) + val |= AXI_CLKGEN_V2_RESET_MMCM_ENABLE; + + axi_clkgen_write(axi_clkgen, AXI_CLKGEN_V2_REG_RESET, val); +} + +static const struct axi_clkgen_mmcm_ops axi_clkgen_v2_mmcm_ops = { + .write = axi_clkgen_v2_mmcm_write, + .read = axi_clkgen_v2_mmcm_read, + .enable = axi_clkgen_v2_mmcm_enable, +}; + static struct axi_clkgen *clk_hw_to_axi_clkgen(struct clk_hw *clk_hw) { return container_of(clk_hw, struct axi_clkgen, clk_hw); @@ -184,33 +378,29 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw, filter = axi_clkgen_lookup_filter(m - 1); lock = axi_clkgen_lookup_lock(m - 1); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 0); - axi_clkgen_calc_clk_params(dout, &low, &high, &edge, &nocount); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, - (high << 6) | low); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT2, - (edge << 7) | (nocount << 6)); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_1, + (high << 6) | low, 0xefff); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLKOUT0_2, + (edge << 7) | (nocount << 6), 0x03ff); axi_clkgen_calc_clk_params(d, &low, &high, &edge, &nocount); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, - (edge << 13) | (nocount << 12) | (high << 6) | low); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_DIV, + (edge << 13) | (nocount << 12) | (high << 6) | low, 0x3fff); axi_clkgen_calc_clk_params(m, &low, &high, &edge, &nocount); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, - (high << 6) | low); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_CLK_FB2, - (edge << 7) | (nocount << 6)); - - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK1, lock & 0x3ff); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK2, - (((lock >> 16) & 0x1f) << 10) | 0x1); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_LOCK3, - (((lock >> 24) & 0x1f) << 10) | 0x3e9); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER1, filter >> 16); - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_FILTER2, filter); - - axi_clkgen_write(axi_clkgen, AXI_CLKGEN_REG_UPDATE_ENABLE, 1); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB1, + (high << 6) | low, 0xefff); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_CLK_FB2, + (edge << 7) | (nocount << 6), 0x03ff); + + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK1, lock & 0x3ff, 0x3ff); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK2, + (((lock >> 16) & 0x1f) << 10) | 0x1, 0x7fff); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_LOCK3, + (((lock >> 24) & 0x1f) << 10) | 0x3e9, 0x7fff); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER1, filter >> 16, 0x9900); + axi_clkgen_mmcm_write(axi_clkgen, MMCM_REG_FILTER2, filter, 0x9900); return 0; } @@ -236,11 +426,11 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw, unsigned int reg; unsigned long long tmp; - axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_OUT1, ®); + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, ®); dout = (reg & 0x3f) + ((reg >> 6) & 0x3f); - axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_DIV, ®); + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, ®); d = (reg & 0x3f) + ((reg >> 6) & 0x3f); - axi_clkgen_read(axi_clkgen, AXI_CLKGEN_REG_CLK_FB1, ®); + axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, ®); m = (reg & 0x3f) + ((reg >> 6) & 0x3f); if (d == 0 || dout == 0) @@ -255,14 +445,45 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw, return tmp; } +static int axi_clkgen_enable(struct clk_hw *clk_hw) +{ + struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw); + + axi_clkgen_mmcm_enable(axi_clkgen, true); + + return 0; +} + +static void axi_clkgen_disable(struct clk_hw *clk_hw) +{ + struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw); + + axi_clkgen_mmcm_enable(axi_clkgen, false); +} + static const struct clk_ops axi_clkgen_ops = { .recalc_rate = axi_clkgen_recalc_rate, .round_rate = axi_clkgen_round_rate, .set_rate = axi_clkgen_set_rate, + .enable = axi_clkgen_enable, + .disable = axi_clkgen_disable, }; +static const struct of_device_id axi_clkgen_ids[] = { + { + .compatible = "adi,axi-clkgen-1.00.a", + .data = &axi_clkgen_v1_mmcm_ops + }, { + .compatible = "adi,axi-clkgen-2.00.a", + .data = &axi_clkgen_v2_mmcm_ops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, axi_clkgen_ids); + static int axi_clkgen_probe(struct platform_device *pdev) { + const struct of_device_id *id; struct axi_clkgen *axi_clkgen; struct clk_init_data init; const char *parent_name; @@ -270,10 +491,19 @@ static int axi_clkgen_probe(struct platform_device *pdev) struct resource *mem; struct clk *clk; + if (!pdev->dev.of_node) + return -ENODEV; + + id = of_match_node(axi_clkgen_ids, pdev->dev.of_node); + if (!id) + return -ENODEV; + axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL); if (!axi_clkgen) return -ENOMEM; + axi_clkgen->mmcm_ops = id->data; + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(axi_clkgen->base)) @@ -289,10 +519,12 @@ static int axi_clkgen_probe(struct platform_device *pdev) init.name = clk_name; init.ops = &axi_clkgen_ops; - init.flags = 0; + init.flags = CLK_SET_RATE_GATE; init.parent_names = &parent_name; init.num_parents = 1; + axi_clkgen_mmcm_enable(axi_clkgen, false); + axi_clkgen->clk_hw.init = &init; clk = devm_clk_register(&pdev->dev, &axi_clkgen->clk_hw); if (IS_ERR(clk)) @@ -309,12 +541,6 @@ static int axi_clkgen_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id axi_clkgen_ids[] = { - { .compatible = "adi,axi-clkgen-1.00.a" }, - { }, -}; -MODULE_DEVICE_TABLE(of, axi_clkgen_ids); - static struct platform_driver axi_clkgen_driver = { .driver = { .name = "adi-axi-clkgen", diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 5543b7df8e16..ec22112e569f 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -24,7 +24,7 @@ * Traits of this clock: * prepare - clk_prepare only ensures that parents are prepared * enable - clk_enable only ensures that parents are enabled - * rate - rate is adjustable. clk->rate = parent->rate / divisor + * rate - rate is adjustable. clk->rate = DIV_ROUND_UP(parent->rate / divisor) * parent - fixed parent. No clk_set_parent support */ @@ -115,7 +115,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, return parent_rate; } - return parent_rate / div; + return DIV_ROUND_UP(parent_rate, div); } /* @@ -185,7 +185,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), MULT_ROUND_UP(rate, i)); - now = parent_rate / i; + now = DIV_ROUND_UP(parent_rate, i); if (now <= rate && now > best) { bestdiv = i; best = now; @@ -207,7 +207,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, int div; div = clk_divider_bestdiv(hw, rate, prate); - return *prate / div; + return DIV_ROUND_UP(*prate, div); } static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, @@ -218,7 +218,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags = 0; u32 val; - div = parent_rate / rate; + div = DIV_ROUND_UP(parent_rate, rate); value = _get_val(divider, div); if (value > div_mask(divider)) diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c new file mode 100644 index 000000000000..30a3b6999e10 --- /dev/null +++ b/drivers/clk/clk-moxart.c @@ -0,0 +1,97 @@ +/* + * MOXA ART SoCs clock driver. + * + * Copyright (C) 2013 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/clkdev.h> + +void __init moxart_of_pll_clk_init(struct device_node *node) +{ + static void __iomem *base; + struct clk *clk, *ref_clk; + unsigned int mul; + const char *name = node->name; + const char *parent_name; + + of_property_read_string(node, "clock-output-names", &name); + parent_name = of_clk_get_parent_name(node, 0); + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s: of_iomap failed\n", node->full_name); + return; + } + + mul = readl(base + 0x30) >> 3 & 0x3f; + iounmap(base); + + ref_clk = of_clk_get(node, 0); + if (IS_ERR(ref_clk)) { + pr_err("%s: of_clk_get failed\n", node->full_name); + return; + } + + clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mul, 1); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock\n", node->full_name); + return; + } + + clk_register_clkdev(clk, NULL, name); + of_clk_add_provider(node, of_clk_src_simple_get, clk); +} +CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock", + moxart_of_pll_clk_init); + +void __init moxart_of_apb_clk_init(struct device_node *node) +{ + static void __iomem *base; + struct clk *clk, *pll_clk; + unsigned int div, val; + unsigned int div_idx[] = { 2, 3, 4, 6, 8}; + const char *name = node->name; + const char *parent_name; + + of_property_read_string(node, "clock-output-names", &name); + parent_name = of_clk_get_parent_name(node, 0); + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s: of_iomap failed\n", node->full_name); + return; + } + + val = readl(base + 0xc) >> 4 & 0x7; + iounmap(base); + + if (val > 4) + val = 0; + div = div_idx[val] * 2; + + pll_clk = of_clk_get(node, 0); + if (IS_ERR(pll_clk)) { + pr_err("%s: of_clk_get failed\n", node->full_name); + return; + } + + clk = clk_register_fixed_factor(NULL, name, parent_name, 0, 1, div); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock\n", node->full_name); + return; + } + + clk_register_clkdev(clk, NULL, name); + of_clk_add_provider(node, of_clk_src_simple_get, clk); +} +CLK_OF_DECLARE(moxart_apb_clock, "moxa,moxart-apb-clock", + moxart_of_apb_clk_init); diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 6a934a5296bd..05e04ce0f148 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = { static int __init nomadik_src_clk_init_debugfs(void) { + /* Vital for multiplatform */ + if (!src_base) + return -ENODEV; src_pcksr0_boot = readl(src_base + SRC_PCKSR0); src_pcksr1_boot = readl(src_base + SRC_PCKSR1); debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c index c4f76ed914b0..8b284be4efa4 100644 --- a/drivers/clk/clk-ppc-corenet.c +++ b/drivers/clk/clk-ppc-corenet.c @@ -27,7 +27,6 @@ struct cmux_clk { #define CLKSEL_ADJUST BIT(0) #define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) -static void __iomem *base; static unsigned int clocks_per_pll; static int cmux_set_parent(struct clk_hw *hw, u8 idx) @@ -100,7 +99,11 @@ static void __init core_mux_init(struct device_node *np) pr_err("%s: could not allocate cmux_clk\n", __func__); goto err_name; } - cmux_clk->reg = base + offset; + cmux_clk->reg = of_iomap(np, 0); + if (!cmux_clk->reg) { + pr_err("%s: could not map register\n", __func__); + goto err_clk; + } node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); if (node && (offset >= 0x80)) @@ -143,38 +146,39 @@ err_name: static void __init core_pll_init(struct device_node *np) { - u32 offset, mult; + u32 mult; int i, rc, count; const char *clk_name, *parent_name; struct clk_onecell_data *onecell_data; struct clk **subclks; + void __iomem *base; - rc = of_property_read_u32(np, "reg", &offset); - if (rc) { - pr_err("%s: could not get reg property\n", np->name); + base = of_iomap(np, 0); + if (!base) { + pr_err("clk-ppc: iomap error\n"); return; } /* get the multiple of PLL */ - mult = ioread32be(base + offset); + mult = ioread32be(base); /* check if this PLL is disabled */ if (mult & PLL_KILL) { pr_debug("PLL:%s is disabled\n", np->name); - return; + goto err_map; } mult = (mult >> 1) & 0x3f; parent_name = of_clk_get_parent_name(np, 0); if (!parent_name) { pr_err("PLL: %s must have a parent\n", np->name); - return; + goto err_map; } count = of_property_count_strings(np, "clock-output-names"); if (count < 0 || count > 4) { pr_err("%s: clock is not supported\n", np->name); - return; + goto err_map; } /* output clock number per PLL */ @@ -183,7 +187,7 @@ static void __init core_pll_init(struct device_node *np) subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); if (!subclks) { pr_err("%s: could not allocate subclks\n", __func__); - return; + goto err_map; } onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); @@ -230,30 +234,52 @@ static void __init core_pll_init(struct device_node *np) goto err_cell; } + iounmap(base); return; err_cell: kfree(onecell_data); err_clks: kfree(subclks); +err_map: + iounmap(base); +} + +static void __init sysclk_init(struct device_node *node) +{ + struct clk *clk; + const char *clk_name = node->name; + struct device_node *np = of_get_parent(node); + u32 rate; + + if (!np) { + pr_err("ppc-clk: could not get parent node\n"); + return; + } + + if (of_property_read_u32(np, "clock-frequency", &rate)) { + of_node_put(node); + return; + } + + of_property_read_string(np, "clock-output-names", &clk_name); + + clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate); + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); } static const struct of_device_id clk_match[] __initconst = { - { .compatible = "fixed-clock", .data = of_fixed_clk_setup, }, - { .compatible = "fsl,core-pll-clock", .data = core_pll_init, }, - { .compatible = "fsl,core-mux-clock", .data = core_mux_init, }, + { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, }, + { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, }, + { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, }, + { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, }, + { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, }, + { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, }, {} }; static int __init ppc_corenet_clk_probe(struct platform_device *pdev) { - struct device_node *np; - - np = pdev->dev.of_node; - base = of_iomap(np, 0); - if (!base) { - dev_err(&pdev->dev, "iomap error\n"); - return -ENOMEM; - } of_clk_init(clk_match); return 0; diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c index 00a3abe103a5..f2f62a1bf61a 100644 --- a/drivers/clk/clk-s2mps11.c +++ b/drivers/clk/clk-s2mps11.c @@ -27,6 +27,7 @@ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include <linux/mfd/samsung/s2mps11.h> +#include <linux/mfd/samsung/s5m8767.h> #include <linux/mfd/samsung/core.h> #define s2mps11_name(a) (a->hw.init->name) @@ -48,6 +49,7 @@ struct s2mps11_clk { struct clk_lookup *lookup; u32 mask; bool enabled; + unsigned int reg; }; static struct s2mps11_clk *to_s2mps11_clk(struct clk_hw *hw) @@ -61,7 +63,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw) int ret; ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, - S2MPS11_REG_RTC_CTRL, + s2mps11->reg, s2mps11->mask, s2mps11->mask); if (!ret) s2mps11->enabled = true; @@ -74,7 +76,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw) struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw); int ret; - ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL, + ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg, s2mps11->mask, ~s2mps11->mask); if (!ret) @@ -130,9 +132,9 @@ static struct device_node *s2mps11_clk_parse_dt(struct platform_device *pdev) int i; if (!iodev->dev->of_node) - return NULL; + return ERR_PTR(-EINVAL); - clk_np = of_find_node_by_name(iodev->dev->of_node, "clocks"); + clk_np = of_get_child_by_name(iodev->dev->of_node, "clocks"); if (!clk_np) { dev_err(&pdev->dev, "could not find clock sub-node\n"); return ERR_PTR(-EINVAL); @@ -155,6 +157,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev) struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct s2mps11_clk *s2mps11_clks, *s2mps11_clk; struct device_node *clk_np = NULL; + unsigned int s2mps11_reg; int i, ret = 0; u32 val; @@ -169,13 +172,26 @@ static int s2mps11_clk_probe(struct platform_device *pdev) if (IS_ERR(clk_np)) return PTR_ERR(clk_np); + switch(platform_get_device_id(pdev)->driver_data) { + case S2MPS11X: + s2mps11_reg = S2MPS11_REG_RTC_CTRL; + break; + case S5M8767X: + s2mps11_reg = S5M8767_REG_CTRL1; + break; + default: + dev_err(&pdev->dev, "Invalid device type\n"); + return -EINVAL; + }; + for (i = 0; i < S2MPS11_CLKS_NUM; i++, s2mps11_clk++) { s2mps11_clk->iodev = iodev; s2mps11_clk->hw.init = &s2mps11_clks_init[i]; s2mps11_clk->mask = 1 << i; + s2mps11_clk->reg = s2mps11_reg; ret = regmap_read(s2mps11_clk->iodev->regmap_pmic, - S2MPS11_REG_RTC_CTRL, &val); + s2mps11_clk->reg, &val); if (ret < 0) goto err_reg; @@ -241,7 +257,8 @@ static int s2mps11_clk_remove(struct platform_device *pdev) } static const struct platform_device_id s2mps11_clk_id[] = { - { "s2mps11-clk", 0}, + { "s2mps11-clk", S2MPS11X}, + { "s5m8767-clk", S5M8767X}, { }, }; MODULE_DEVICE_TABLE(platform, s2mps11_clk_id); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5517944495d8..dff0373f53c1 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -277,6 +277,10 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) if (!d) goto err_out; + if (clk->ops->debug_init) + if (clk->ops->debug_init(clk->hw, clk->dentry)) + goto err_out; + ret = 0; goto out; @@ -1339,8 +1343,11 @@ static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) if (clk->notifier_count) ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); - if (ret & NOTIFY_STOP_MASK) + if (ret & NOTIFY_STOP_MASK) { + pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", + __func__, clk->name, ret); goto out; + } hlist_for_each_entry(child, &clk->children, child_node) { ret = __clk_speculate_rates(child, new_rate); @@ -1588,7 +1595,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate) /* notify that we are about to change rates */ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); if (fail_clk) { - pr_warn("%s: failed to set %s rate\n", __func__, + pr_debug("%s: failed to set %s rate\n", __func__, fail_clk->name); clk_propagate_rate_change(top, ABORT_RATE_CHANGE); ret = -EBUSY; @@ -2226,24 +2233,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister); */ int __clk_get(struct clk *clk) { - if (clk && !try_module_get(clk->owner)) - return 0; + if (clk) { + if (!try_module_get(clk->owner)) + return 0; - kref_get(&clk->ref); + kref_get(&clk->ref); + } return 1; } void __clk_put(struct clk *clk) { - if (WARN_ON_ONCE(IS_ERR(clk))) + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) return; clk_prepare_lock(); kref_put(&clk->ref, __clk_release); clk_prepare_unlock(); - if (clk) - module_put(clk->owner); + module_put(clk->owner); } /*** clk rate change notifiers ***/ @@ -2259,20 +2267,11 @@ void __clk_put(struct clk *clk) * re-enter into the clk framework by calling any top-level clk APIs; * this will cause a nested prepare_lock mutex. * - * Pre-change notifier callbacks will be passed the current, pre-change - * rate of the clk via struct clk_notifier_data.old_rate. The new, - * post-change rate of the clk is passed via struct + * In all notification cases cases (pre, post and abort rate change) the + * original clock rate is passed to the callback via struct + * clk_notifier_data.old_rate and the new frequency is passed via struct * clk_notifier_data.new_rate. * - * Post-change notifiers will pass the now-current, post-change rate of - * the clk in both struct clk_notifier_data.old_rate and struct - * clk_notifier_data.new_rate. - * - * Abort-change notifiers are effectively the opposite of pre-change - * notifiers: the original pre-change clk rate is passed in via struct - * clk_notifier_data.new_rate and the failed post-change rate is passed - * in via struct clk_notifier_data.old_rate. - * * clk_notifier_register() must be called from non-atomic context. * Returns -EINVAL if called with null arguments, -ENOMEM upon * allocation failure; otherwise, passes along the return value of @@ -2472,7 +2471,7 @@ EXPORT_SYMBOL_GPL(of_clk_del_provider); struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) { struct of_clk_provider *provider; - struct clk *clk = ERR_PTR(-ENOENT); + struct clk *clk = ERR_PTR(-EPROBE_DEFER); /* Check if we have such a provider in our array */ list_for_each_entry(provider, &of_clk_providers, link) { @@ -2505,8 +2504,12 @@ EXPORT_SYMBOL_GPL(of_clk_get_parent_count); const char *of_clk_get_parent_name(struct device_node *np, int index) { struct of_phandle_args clkspec; + struct property *prop; const char *clk_name; + const __be32 *vp; + u32 pv; int rc; + int count; if (index < 0) return NULL; @@ -2516,8 +2519,22 @@ const char *of_clk_get_parent_name(struct device_node *np, int index) if (rc) return NULL; + index = clkspec.args_count ? clkspec.args[0] : 0; + count = 0; + + /* if there is an indices property, use it to transfer the index + * specified into an array offset for the clock-output-names property. + */ + of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) { + if (index == pv) { + index = count; + break; + } + count++; + } + if (of_property_read_string_index(clkspec.np, "clock-output-names", - clkspec.args_count ? clkspec.args[0] : 0, + index, &clk_name) < 0) clk_name = clkspec.np->name; @@ -2526,24 +2543,99 @@ const char *of_clk_get_parent_name(struct device_node *np, int index) } EXPORT_SYMBOL_GPL(of_clk_get_parent_name); +struct clock_provider { + of_clk_init_cb_t clk_init_cb; + struct device_node *np; + struct list_head node; +}; + +static LIST_HEAD(clk_provider_list); + +/* + * This function looks for a parent clock. If there is one, then it + * checks that the provider for this parent clock was initialized, in + * this case the parent clock will be ready. + */ +static int parent_ready(struct device_node *np) +{ + int i = 0; + + while (true) { + struct clk *clk = of_clk_get(np, i); + + /* this parent is ready we can check the next one */ + if (!IS_ERR(clk)) { + clk_put(clk); + i++; + continue; + } + + /* at least one parent is not ready, we exit now */ + if (PTR_ERR(clk) == -EPROBE_DEFER) + return 0; + + /* + * Here we make assumption that the device tree is + * written correctly. So an error means that there is + * no more parent. As we didn't exit yet, then the + * previous parent are ready. If there is no clock + * parent, no need to wait for them, then we can + * consider their absence as being ready + */ + return 1; + } +} + /** * of_clk_init() - Scan and init clock providers from the DT * @matches: array of compatible values and init functions for providers. * - * This function scans the device tree for matching clock providers and - * calls their initialization functions + * This function scans the device tree for matching clock providers + * and calls their initialization functions. It also does it by trying + * to follow the dependencies. */ void __init of_clk_init(const struct of_device_id *matches) { const struct of_device_id *match; struct device_node *np; + struct clock_provider *clk_provider, *next; + bool is_init_done; + bool force = false; if (!matches) matches = &__clk_of_table; + /* First prepare the list of the clocks providers */ for_each_matching_node_and_match(np, matches, &match) { - of_clk_init_cb_t clk_init_cb = match->data; - clk_init_cb(np); + struct clock_provider *parent = + kzalloc(sizeof(struct clock_provider), GFP_KERNEL); + + parent->clk_init_cb = match->data; + parent->np = np; + list_add_tail(&parent->node, &clk_provider_list); + } + + while (!list_empty(&clk_provider_list)) { + is_init_done = false; + list_for_each_entry_safe(clk_provider, next, + &clk_provider_list, node) { + if (force || parent_ready(clk_provider->np)) { + clk_provider->clk_init_cb(clk_provider->np); + list_del(&clk_provider->node); + kfree(clk_provider); + is_init_done = true; + } + } + + /* + * We didn't manage to initialize any of the + * remaining providers during the last loop, so now we + * initialize all the remaining ones unconditionally + * in case the clock parent was not mandatory + */ + if (!is_init_done) + force = true; + } } #endif diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 48f67218247c..a360b2eca5cb 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -167,6 +167,8 @@ struct clk *clk_get(struct device *dev, const char *con_id) clk = of_clk_get_by_name(dev->of_node, con_id); if (!IS_ERR(clk)) return clk; + if (PTR_ERR(clk) == -EPROBE_DEFER) + return clk; } return clk_get_sys(dev_id, con_id); diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index a049108341fc..40b33c6a8257 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -2,4 +2,7 @@ # Hisilicon Clock specific Makefile # -obj-y += clk.o clkgate-separated.o clk-hi3620.o +obj-y += clk.o clkgate-separated.o + +obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o +obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c index f24ad6a3a797..339945d2503b 100644 --- a/drivers/clk/hisilicon/clk-hi3620.c +++ b/drivers/clk/hisilicon/clk-hi3620.c @@ -210,33 +210,297 @@ static struct hisi_gate_clock hi3620_seperated_gate_clks[] __initdata = { static void __init hi3620_clk_init(struct device_node *np) { - void __iomem *base; + struct hisi_clock_data *clk_data; - if (np) { - base = of_iomap(np, 0); - if (!base) { - pr_err("failed to map Hi3620 clock registers\n"); - return; - } - } else { - pr_err("failed to find Hi3620 clock node in DTS\n"); + clk_data = hisi_clk_init(np, HI3620_NR_CLKS); + if (!clk_data) return; - } - - hisi_clk_init(np, HI3620_NR_CLKS); hisi_clk_register_fixed_rate(hi3620_fixed_rate_clks, ARRAY_SIZE(hi3620_fixed_rate_clks), - base); + clk_data); hisi_clk_register_fixed_factor(hi3620_fixed_factor_clks, ARRAY_SIZE(hi3620_fixed_factor_clks), - base); + clk_data); hisi_clk_register_mux(hi3620_mux_clks, ARRAY_SIZE(hi3620_mux_clks), - base); + clk_data); hisi_clk_register_divider(hi3620_div_clks, ARRAY_SIZE(hi3620_div_clks), - base); + clk_data); hisi_clk_register_gate_sep(hi3620_seperated_gate_clks, ARRAY_SIZE(hi3620_seperated_gate_clks), - base); + clk_data); } CLK_OF_DECLARE(hi3620_clk, "hisilicon,hi3620-clock", hi3620_clk_init); + +struct hisi_mmc_clock { + unsigned int id; + const char *name; + const char *parent_name; + unsigned long flags; + u32 clken_reg; + u32 clken_bit; + u32 div_reg; + u32 div_off; + u32 div_bits; + u32 drv_reg; + u32 drv_off; + u32 drv_bits; + u32 sam_reg; + u32 sam_off; + u32 sam_bits; +}; + +struct clk_mmc { + struct clk_hw hw; + u32 id; + void __iomem *clken_reg; + u32 clken_bit; + void __iomem *div_reg; + u32 div_off; + u32 div_bits; + void __iomem *drv_reg; + u32 drv_off; + u32 drv_bits; + void __iomem *sam_reg; + u32 sam_off; + u32 sam_bits; +}; + +#define to_mmc(_hw) container_of(_hw, struct clk_mmc, hw) + +static struct hisi_mmc_clock hi3620_mmc_clks[] __initdata = { + { HI3620_SD_CIUCLK, "sd_bclk1", "sd_clk", CLK_SET_RATE_PARENT, 0x1f8, 0, 0x1f8, 1, 3, 0x1f8, 4, 4, 0x1f8, 8, 4}, + { HI3620_MMC_CIUCLK1, "mmc_bclk1", "mmc_clk1", CLK_SET_RATE_PARENT, 0x1f8, 12, 0x1f8, 13, 3, 0x1f8, 16, 4, 0x1f8, 20, 4}, + { HI3620_MMC_CIUCLK2, "mmc_bclk2", "mmc_clk2", CLK_SET_RATE_PARENT, 0x1f8, 24, 0x1f8, 25, 3, 0x1f8, 28, 4, 0x1fc, 0, 4}, + { HI3620_MMC_CIUCLK3, "mmc_bclk3", "mmc_clk3", CLK_SET_RATE_PARENT, 0x1fc, 4, 0x1fc, 5, 3, 0x1fc, 8, 4, 0x1fc, 12, 4}, +}; + +static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + switch (parent_rate) { + case 26000000: + return 13000000; + case 180000000: + return 25000000; + case 360000000: + return 50000000; + case 720000000: + return 100000000; + case 1440000000: + return 180000000; + default: + return parent_rate; + } +} + +static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk **best_parent_p) +{ + struct clk_mmc *mclk = to_mmc(hw); + unsigned long best = 0; + + if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) { + rate = 13000000; + best = 26000000; + } else if (rate <= 26000000) { + rate = 25000000; + best = 180000000; + } else if (rate <= 52000000) { + rate = 50000000; + best = 360000000; + } else if (rate <= 100000000) { + rate = 100000000; + best = 720000000; + } else { + /* max is 180M */ + rate = 180000000; + best = 1440000000; + } + *best_parent_rate = best; + return rate; +} + +static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) { + if (para % 2) + val |= 1 << (off + i); + else + val &= ~(1 << (off + i)); + para = para >> 1; + } + + return val; +} + +static int mmc_clk_set_timing(struct clk_hw *hw, unsigned long rate) +{ + struct clk_mmc *mclk = to_mmc(hw); + unsigned long flags; + u32 sam, drv, div, val; + static DEFINE_SPINLOCK(mmc_clk_lock); + + switch (rate) { + case 13000000: + sam = 3; + drv = 1; + div = 1; + break; + case 25000000: + sam = 13; + drv = 6; + div = 6; + break; + case 50000000: + sam = 3; + drv = 6; + div = 6; + break; + case 100000000: + sam = 6; + drv = 4; + div = 6; + break; + case 180000000: + sam = 6; + drv = 4; + div = 7; + break; + default: + return -EINVAL; + } + + spin_lock_irqsave(&mmc_clk_lock, flags); + + val = readl_relaxed(mclk->clken_reg); + val &= ~(1 << mclk->clken_bit); + writel_relaxed(val, mclk->clken_reg); + + val = readl_relaxed(mclk->sam_reg); + val = mmc_clk_delay(val, sam, mclk->sam_off, mclk->sam_bits); + writel_relaxed(val, mclk->sam_reg); + + val = readl_relaxed(mclk->drv_reg); + val = mmc_clk_delay(val, drv, mclk->drv_off, mclk->drv_bits); + writel_relaxed(val, mclk->drv_reg); + + val = readl_relaxed(mclk->div_reg); + val = mmc_clk_delay(val, div, mclk->div_off, mclk->div_bits); + writel_relaxed(val, mclk->div_reg); + + val = readl_relaxed(mclk->clken_reg); + val |= 1 << mclk->clken_bit; + writel_relaxed(val, mclk->clken_reg); + + spin_unlock_irqrestore(&mmc_clk_lock, flags); + + return 0; +} + +static int mmc_clk_prepare(struct clk_hw *hw) +{ + struct clk_mmc *mclk = to_mmc(hw); + unsigned long rate; + + if (mclk->id == HI3620_MMC_CIUCLK1) + rate = 13000000; + else + rate = 25000000; + + return mmc_clk_set_timing(hw, rate); +} + +static int mmc_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return mmc_clk_set_timing(hw, rate); +} + +static struct clk_ops clk_mmc_ops = { + .prepare = mmc_clk_prepare, + .determine_rate = mmc_clk_determine_rate, + .set_rate = mmc_clk_set_rate, + .recalc_rate = mmc_clk_recalc_rate, +}; + +static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk, + void __iomem *base, struct device_node *np) +{ + struct clk_mmc *mclk; + struct clk *clk; + struct clk_init_data init; + + mclk = kzalloc(sizeof(*mclk), GFP_KERNEL); + if (!mclk) { + pr_err("%s: fail to allocate mmc clk\n", __func__); + return ERR_PTR(-ENOMEM); + } + + init.name = mmc_clk->name; + init.ops = &clk_mmc_ops; + init.flags = mmc_clk->flags | CLK_IS_BASIC; + init.parent_names = (mmc_clk->parent_name ? &mmc_clk->parent_name : NULL); + init.num_parents = (mmc_clk->parent_name ? 1 : 0); + mclk->hw.init = &init; + + mclk->id = mmc_clk->id; + mclk->clken_reg = base + mmc_clk->clken_reg; + mclk->clken_bit = mmc_clk->clken_bit; + mclk->div_reg = base + mmc_clk->div_reg; + mclk->div_off = mmc_clk->div_off; + mclk->div_bits = mmc_clk->div_bits; + mclk->drv_reg = base + mmc_clk->drv_reg; + mclk->drv_off = mmc_clk->drv_off; + mclk->drv_bits = mmc_clk->drv_bits; + mclk->sam_reg = base + mmc_clk->sam_reg; + mclk->sam_off = mmc_clk->sam_off; + mclk->sam_bits = mmc_clk->sam_bits; + + clk = clk_register(NULL, &mclk->hw); + if (WARN_ON(IS_ERR(clk))) + kfree(mclk); + return clk; +} + +static void __init hi3620_mmc_clk_init(struct device_node *node) +{ + void __iomem *base; + int i, num = ARRAY_SIZE(hi3620_mmc_clks); + struct clk_onecell_data *clk_data; + + if (!node) { + pr_err("failed to find pctrl node in DTS\n"); + return; + } + + base = of_iomap(node, 0); + if (!base) { + pr_err("failed to map pctrl\n"); + return; + } + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (WARN_ON(!clk_data)) + return; + + clk_data->clks = kzalloc(sizeof(struct clk *) * num, GFP_KERNEL); + if (!clk_data->clks) { + pr_err("%s: fail to allocate mmc clk\n", __func__); + return; + } + + for (i = 0; i < num; i++) { + struct hisi_mmc_clock *mmc_clk = &hi3620_mmc_clks[i]; + clk_data->clks[mmc_clk->id] = + hisi_register_clk_mmc(mmc_clk, base, node); + } + + clk_data->clk_num = num; + of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); +} + +CLK_OF_DECLARE(hi3620_mmc_clk, "hisilicon,hi3620-mmc-clock", hi3620_mmc_clk_init); diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c new file mode 100644 index 000000000000..132b57a0ce09 --- /dev/null +++ b/drivers/clk/hisilicon/clk-hip04.c @@ -0,0 +1,58 @@ +/* + * Hisilicon HiP04 clock driver + * + * Copyright (c) 2013-2014 Hisilicon Limited. + * Copyright (c) 2013-2014 Linaro Limited. + * + * Author: Haojian Zhuang <haojian.zhuang@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/clk.h> + +#include <dt-bindings/clock/hip04-clock.h> + +#include "clk.h" + +/* fixed rate clocks */ +static struct hisi_fixed_rate_clock hip04_fixed_rate_clks[] __initdata = { + { HIP04_OSC50M, "osc50m", NULL, CLK_IS_ROOT, 50000000, }, + { HIP04_CLK_50M, "clk50m", NULL, CLK_IS_ROOT, 50000000, }, + { HIP04_CLK_168M, "clk168m", NULL, CLK_IS_ROOT, 168750000, }, +}; + +static void __init hip04_clk_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + clk_data = hisi_clk_init(np, HIP04_NR_CLKS); + if (!clk_data) + return; + + hisi_clk_register_fixed_rate(hip04_fixed_rate_clks, + ARRAY_SIZE(hip04_fixed_rate_clks), + clk_data); +} +CLK_OF_DECLARE(hip04_clk, "hisilicon,hip04-clock", hip04_clk_init); diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c index a3a7152c92d9..276f672e7b1a 100644 --- a/drivers/clk/hisilicon/clk.c +++ b/drivers/clk/hisilicon/clk.c @@ -37,23 +37,49 @@ #include "clk.h" static DEFINE_SPINLOCK(hisi_clk_lock); -static struct clk **clk_table; -static struct clk_onecell_data clk_data; -void __init hisi_clk_init(struct device_node *np, int nr_clks) +struct hisi_clock_data __init *hisi_clk_init(struct device_node *np, + int nr_clks) { + struct hisi_clock_data *clk_data; + struct clk **clk_table; + void __iomem *base; + + if (np) { + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to map Hisilicon clock registers\n"); + goto err; + } + } else { + pr_err("failed to find Hisilicon clock node in DTS\n"); + goto err; + } + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) { + pr_err("%s: could not allocate clock data\n", __func__); + goto err; + } + clk_data->base = base; + clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); if (!clk_table) { pr_err("%s: could not allocate clock lookup table\n", __func__); - return; + goto err_data; } - clk_data.clks = clk_table; - clk_data.clk_num = nr_clks; - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + clk_data->clk_data.clks = clk_table; + clk_data->clk_data.clk_num = nr_clks; + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data->clk_data); + return clk_data; +err_data: + kfree(clk_data); +err: + return NULL; } void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks, - int nums, void __iomem *base) + int nums, struct hisi_clock_data *data) { struct clk *clk; int i; @@ -68,11 +94,13 @@ void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks, __func__, clks[i].name); continue; } + data->clk_data.clks[clks[i].id] = clk; } } void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks, - int nums, void __iomem *base) + int nums, + struct hisi_clock_data *data) { struct clk *clk; int i; @@ -87,13 +115,15 @@ void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks, __func__, clks[i].name); continue; } + data->clk_data.clks[clks[i].id] = clk; } } void __init hisi_clk_register_mux(struct hisi_mux_clock *clks, - int nums, void __iomem *base) + int nums, struct hisi_clock_data *data) { struct clk *clk; + void __iomem *base = data->base; int i; for (i = 0; i < nums; i++) { @@ -111,14 +141,15 @@ void __init hisi_clk_register_mux(struct hisi_mux_clock *clks, if (clks[i].alias) clk_register_clkdev(clk, clks[i].alias, NULL); - clk_table[clks[i].id] = clk; + data->clk_data.clks[clks[i].id] = clk; } } void __init hisi_clk_register_divider(struct hisi_divider_clock *clks, - int nums, void __iomem *base) + int nums, struct hisi_clock_data *data) { struct clk *clk; + void __iomem *base = data->base; int i; for (i = 0; i < nums; i++) { @@ -139,14 +170,15 @@ void __init hisi_clk_register_divider(struct hisi_divider_clock *clks, if (clks[i].alias) clk_register_clkdev(clk, clks[i].alias, NULL); - clk_table[clks[i].id] = clk; + data->clk_data.clks[clks[i].id] = clk; } } void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks, - int nums, void __iomem *base) + int nums, struct hisi_clock_data *data) { struct clk *clk; + void __iomem *base = data->base; int i; for (i = 0; i < nums; i++) { @@ -166,6 +198,6 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks, if (clks[i].alias) clk_register_clkdev(clk, clks[i].alias, NULL); - clk_table[clks[i].id] = clk; + data->clk_data.clks[clks[i].id] = clk; } } diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h index 4a6beebefb7a..43fa5da88f02 100644 --- a/drivers/clk/hisilicon/clk.h +++ b/drivers/clk/hisilicon/clk.h @@ -30,6 +30,11 @@ #include <linux/io.h> #include <linux/spinlock.h> +struct hisi_clock_data { + struct clk_onecell_data clk_data; + void __iomem *base; +}; + struct hisi_fixed_rate_clock { unsigned int id; char *name; @@ -89,15 +94,15 @@ struct clk *hisi_register_clkgate_sep(struct device *, const char *, void __iomem *, u8, u8, spinlock_t *); -void __init hisi_clk_init(struct device_node *, int); +struct hisi_clock_data __init *hisi_clk_init(struct device_node *, int); void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *, - int, void __iomem *); + int, struct hisi_clock_data *); void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *, - int, void __iomem *); + int, struct hisi_clock_data *); void __init hisi_clk_register_mux(struct hisi_mux_clock *, int, - void __iomem *); + struct hisi_clock_data *); void __init hisi_clk_register_divider(struct hisi_divider_clock *, - int, void __iomem *); + int, struct hisi_clock_data *); void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *, - int, void __iomem *); + int, struct hisi_clock_data *); #endif /* __HISI_CLK_H */ diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index 17a598398a53..86f1e362eafb 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c @@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev, init.name = name; init.ops = &clk_psc_ops; + init.flags = 0; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c index 80c1dd15d15c..23a56f561812 100644 --- a/drivers/clk/mmp/clk-frac.c +++ b/drivers/clk/mmp/clk-frac.c @@ -40,15 +40,19 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, for (i = 0; i < factor->ftbl_cnt; i++) { prev_rate = rate; - rate = (((*prate / 10000) * factor->ftbl[i].num) / - (factor->ftbl[i].den * factor->masks->factor)) * 10000; + rate = (((*prate / 10000) * factor->ftbl[i].den) / + (factor->ftbl[i].num * factor->masks->factor)) * 10000; if (rate > drate) break; } - if (i == 0) + if ((i == 0) || (i == factor->ftbl_cnt)) { return rate; - else - return prev_rate; + } else { + if ((drate - prev_rate) > (rate - drate)) + return rate; + else + return prev_rate; + } } static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, @@ -64,7 +68,7 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, num = (val >> masks->num_shift) & masks->num_mask; /* calculate denominator */ - den = (val >> masks->den_shift) & masks->num_mask; + den = (val >> masks->den_shift) & masks->den_mask; if (!den) return 0; @@ -85,8 +89,8 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, for (i = 0; i < factor->ftbl_cnt; i++) { prev_rate = rate; - rate = (((prate / 10000) * factor->ftbl[i].num) / - (factor->ftbl[i].den * factor->masks->factor)) * 10000; + rate = (((prate / 10000) * factor->ftbl[i].den) / + (factor->ftbl[i].num * factor->masks->factor)) * 10000; if (rate > drate) break; } diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig index c339b829d3e3..693f7be129f1 100644 --- a/drivers/clk/mvebu/Kconfig +++ b/drivers/clk/mvebu/Kconfig @@ -13,6 +13,14 @@ config ARMADA_370_CLK select MVEBU_CLK_CPU select MVEBU_CLK_COREDIV +config ARMADA_375_CLK + bool + select MVEBU_CLK_COMMON + +config ARMADA_38X_CLK + bool + select MVEBU_CLK_COMMON + config ARMADA_XP_CLK bool select MVEBU_CLK_COMMON diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile index 21bbfb4a9f42..4c66162fb0b4 100644 --- a/drivers/clk/mvebu/Makefile +++ b/drivers/clk/mvebu/Makefile @@ -3,6 +3,8 @@ obj-$(CONFIG_MVEBU_CLK_CPU) += clk-cpu.o obj-$(CONFIG_MVEBU_CLK_COREDIV) += clk-corediv.o obj-$(CONFIG_ARMADA_370_CLK) += armada-370.o +obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o +obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o obj-$(CONFIG_DOVE_CLK) += dove.o obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 81a202d12a7a..bef198a83863 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c @@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = { .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), }; -static void __init a370_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &a370_coreclks); -} -CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock", - a370_coreclk_init); - /* * Clock Gating Control */ @@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { { } }; -static void __init a370_clk_gating_init(struct device_node *np) +static void __init a370_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, a370_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); + + mvebu_coreclk_setup(np, &a370_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, a370_gating_desc); } -CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock", - a370_clk_gating_init); +CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); + diff --git a/drivers/clk/mvebu/armada-375.c b/drivers/clk/mvebu/armada-375.c new file mode 100644 index 000000000000..c991a4d95e10 --- /dev/null +++ b/drivers/clk/mvebu/armada-375.c @@ -0,0 +1,184 @@ +/* + * Marvell Armada 375 SoC clocks + * + * Copyright (C) 2014 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * Core Clocks + */ + +/* + * For the Armada 375 SoCs, the CPU, DDR and L2 clocks frequencies are + * all modified at the same time, and not separately as for the Armada + * 370 or the Armada XP SoCs. + * + * SAR0[21:17] : CPU frequency DDR frequency L2 frequency + * 6 = 400 MHz 400 MHz 200 MHz + * 15 = 600 MHz 600 MHz 300 MHz + * 21 = 800 MHz 534 MHz 400 MHz + * 25 = 1000 MHz 500 MHz 500 MHz + * others reserved. + * + * SAR0[22] : TCLK frequency + * 0 = 166 MHz + * 1 = 200 MHz + */ + +#define SAR1_A375_TCLK_FREQ_OPT 22 +#define SAR1_A375_TCLK_FREQ_OPT_MASK 0x1 +#define SAR1_A375_CPU_DDR_L2_FREQ_OPT 17 +#define SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK 0x1F + +static const u32 armada_375_tclk_frequencies[] __initconst = { + 166000000, + 200000000, +}; + +static u32 __init armada_375_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select; + + tclk_freq_select = ((readl(sar) >> SAR1_A375_TCLK_FREQ_OPT) & + SAR1_A375_TCLK_FREQ_OPT_MASK); + return armada_375_tclk_frequencies[tclk_freq_select]; +} + + +static const u32 armada_375_cpu_frequencies[] __initconst = { + 0, 0, 0, 0, 0, 0, + 400000000, + 0, 0, 0, 0, 0, 0, 0, 0, + 600000000, + 0, 0, 0, 0, 0, + 800000000, + 0, 0, 0, + 1000000000, +}; + +static u32 __init armada_375_get_cpu_freq(void __iomem *sar) +{ + u8 cpu_freq_select; + + cpu_freq_select = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) & + SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(armada_375_cpu_frequencies)) { + pr_err("Selected CPU frequency (%d) unsupported\n", + cpu_freq_select); + return 0; + } else + return armada_375_cpu_frequencies[cpu_freq_select]; +} + +enum { A375_CPU_TO_DDR, A375_CPU_TO_L2 }; + +static const struct coreclk_ratio armada_375_coreclk_ratios[] __initconst = { + { .id = A375_CPU_TO_L2, .name = "l2clk" }, + { .id = A375_CPU_TO_DDR, .name = "ddrclk" }, +}; + +static const int armada_375_cpu_l2_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {1, 2}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {1, 2}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int armada_375_cpu_ddr_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {1, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {2, 3}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {2, 3}, {0, 1}, {0, 1}, + {0, 1}, {1, 2}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init armada_375_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) & + SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK); + + switch (id) { + case A375_CPU_TO_L2: + *mult = armada_375_cpu_l2_ratios[opt][0]; + *div = armada_375_cpu_l2_ratios[opt][1]; + break; + case A375_CPU_TO_DDR: + *mult = armada_375_cpu_ddr_ratios[opt][0]; + *div = armada_375_cpu_ddr_ratios[opt][1]; + break; + } +} + +static const struct coreclk_soc_desc armada_375_coreclks = { + .get_tclk_freq = armada_375_get_tclk_freq, + .get_cpu_freq = armada_375_get_cpu_freq, + .get_clk_ratio = armada_375_get_clk_ratio, + .ratios = armada_375_coreclk_ratios, + .num_ratios = ARRAY_SIZE(armada_375_coreclk_ratios), +}; + +static void __init armada_375_coreclk_init(struct device_node *np) +{ + mvebu_coreclk_setup(np, &armada_375_coreclks); +} +CLK_OF_DECLARE(armada_375_core_clk, "marvell,armada-375-core-clock", + armada_375_coreclk_init); + +/* + * Clock Gating Control + */ +static const struct clk_gating_soc_desc armada_375_gating_desc[] __initconst = { + { "mu", NULL, 2 }, + { "pp", NULL, 3 }, + { "ptp", NULL, 4 }, + { "pex0", NULL, 5 }, + { "pex1", NULL, 6 }, + { "audio", NULL, 8 }, + { "nd_clk", "nand", 11 }, + { "sata0_link", "sata0_core", 14 }, + { "sata0_core", NULL, 15 }, + { "usb3", NULL, 16 }, + { "sdio", NULL, 17 }, + { "usb", NULL, 18 }, + { "gop", NULL, 19 }, + { "sata1_link", "sata1_core", 20 }, + { "sata1_core", NULL, 21 }, + { "xor0", NULL, 22 }, + { "xor1", NULL, 23 }, + { "copro", NULL, 24 }, + { "tdm", NULL, 25 }, + { "crypto0_enc", NULL, 28 }, + { "crypto0_core", NULL, 29 }, + { "crypto1_enc", NULL, 30 }, + { "crypto1_core", NULL, 31 }, + { } +}; + +static void __init armada_375_clk_gating_init(struct device_node *np) +{ + mvebu_clk_gating_setup(np, armada_375_gating_desc); +} +CLK_OF_DECLARE(armada_375_clk_gating, "marvell,armada-375-gating-clock", + armada_375_clk_gating_init); diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c new file mode 100644 index 000000000000..8bccf4ecdab6 --- /dev/null +++ b/drivers/clk/mvebu/armada-38x.c @@ -0,0 +1,167 @@ +/* + * Marvell Armada 380/385 SoC clocks + * + * Copyright (C) 2014 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Andrew Lunn <andrew@lunn.ch> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include "common.h" + +/* + * SAR[14:10] : Ratios between PCLK0, NBCLK, HCLK and DRAM clocks + * + * SAR[15] : TCLK frequency + * 0 = 250 MHz + * 1 = 200 MHz + */ + +#define SAR_A380_TCLK_FREQ_OPT 15 +#define SAR_A380_TCLK_FREQ_OPT_MASK 0x1 +#define SAR_A380_CPU_DDR_L2_FREQ_OPT 10 +#define SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK 0x1F + +static const u32 armada_38x_tclk_frequencies[] __initconst = { + 250000000, + 200000000, +}; + +static u32 __init armada_38x_get_tclk_freq(void __iomem *sar) +{ + u8 tclk_freq_select; + + tclk_freq_select = ((readl(sar) >> SAR_A380_TCLK_FREQ_OPT) & + SAR_A380_TCLK_FREQ_OPT_MASK); + return armada_38x_tclk_frequencies[tclk_freq_select]; +} + +static const u32 armada_38x_cpu_frequencies[] __initconst = { + 0, 0, 0, 0, + 1066 * 1000 * 1000, 0, 0, 0, + 1332 * 1000 * 1000, 0, 0, 0, + 1600 * 1000 * 1000, +}; + +static u32 __init armada_38x_get_cpu_freq(void __iomem *sar) +{ + u8 cpu_freq_select; + + cpu_freq_select = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) & + SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK); + if (cpu_freq_select >= ARRAY_SIZE(armada_38x_cpu_frequencies)) { + pr_err("Selected CPU frequency (%d) unsupported\n", + cpu_freq_select); + return 0; + } + + return armada_38x_cpu_frequencies[cpu_freq_select]; +} + +enum { A380_CPU_TO_DDR, A380_CPU_TO_L2 }; + +static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = { + { .id = A380_CPU_TO_L2, .name = "l2clk" }, + { .id = A380_CPU_TO_DDR, .name = "ddrclk" }, +}; + +static const int armada_38x_cpu_l2_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = { + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {1, 2}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, + {0, 1}, {0, 1}, {0, 1}, {0, 1}, +}; + +static void __init armada_38x_get_clk_ratio( + void __iomem *sar, int id, int *mult, int *div) +{ + u32 opt = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) & + SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK); + + switch (id) { + case A380_CPU_TO_L2: + *mult = armada_38x_cpu_l2_ratios[opt][0]; + *div = armada_38x_cpu_l2_ratios[opt][1]; + break; + case A380_CPU_TO_DDR: + *mult = armada_38x_cpu_ddr_ratios[opt][0]; + *div = armada_38x_cpu_ddr_ratios[opt][1]; + break; + } +} + +static const struct coreclk_soc_desc armada_38x_coreclks = { + .get_tclk_freq = armada_38x_get_tclk_freq, + .get_cpu_freq = armada_38x_get_cpu_freq, + .get_clk_ratio = armada_38x_get_clk_ratio, + .ratios = armada_38x_coreclk_ratios, + .num_ratios = ARRAY_SIZE(armada_38x_coreclk_ratios), +}; + +static void __init armada_38x_coreclk_init(struct device_node *np) +{ + mvebu_coreclk_setup(np, &armada_38x_coreclks); +} +CLK_OF_DECLARE(armada_38x_core_clk, "marvell,armada-380-core-clock", + armada_38x_coreclk_init); + +/* + * Clock Gating Control + */ +static const struct clk_gating_soc_desc armada_38x_gating_desc[] __initconst = { + { "audio", NULL, 0 }, + { "ge2", NULL, 2 }, + { "ge1", NULL, 3 }, + { "ge0", NULL, 4 }, + { "pex1", NULL, 5 }, + { "pex2", NULL, 6 }, + { "pex3", NULL, 7 }, + { "pex0", NULL, 8 }, + { "usb3h0", NULL, 9 }, + { "usb3h1", NULL, 10 }, + { "usb3d", NULL, 11 }, + { "bm", NULL, 13 }, + { "crypto0z", NULL, 14 }, + { "sata0", NULL, 15 }, + { "crypto1z", NULL, 16 }, + { "sdio", NULL, 17 }, + { "usb2", NULL, 18 }, + { "crypto1", NULL, 21 }, + { "xor0", NULL, 22 }, + { "crypto0", NULL, 23 }, + { "tdm", NULL, 25 }, + { "xor1", NULL, 28 }, + { "sata1", NULL, 30 }, + { } +}; + +static void __init armada_38x_clk_gating_init(struct device_node *np) +{ + mvebu_clk_gating_setup(np, armada_38x_gating_desc); +} +CLK_OF_DECLARE(armada_38x_clk_gating, "marvell,armada-380-gating-clock", + armada_38x_clk_gating_init); diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index 9922c4475aa8..b3094315a3c0 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c @@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = { .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), }; -static void __init axp_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &axp_coreclks); -} -CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock", - axp_coreclk_init); - /* * Clock Gating Control */ @@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { { } }; -static void __init axp_clk_gating_init(struct device_node *np) +static void __init axp_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, axp_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); + + mvebu_coreclk_setup(np, &axp_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, axp_gating_desc); } -CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock", - axp_clk_gating_init); +CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c index 7162615bcdcd..d1e5863d3375 100644 --- a/drivers/clk/mvebu/clk-corediv.c +++ b/drivers/clk/mvebu/clk-corediv.c @@ -18,26 +18,56 @@ #include "common.h" #define CORE_CLK_DIV_RATIO_MASK 0xff -#define CORE_CLK_DIV_RATIO_RELOAD BIT(8) -#define CORE_CLK_DIV_ENABLE_OFFSET 24 -#define CORE_CLK_DIV_RATIO_OFFSET 0x8 +/* + * This structure describes the hardware details (bit offset and mask) + * to configure one particular core divider clock. Those hardware + * details may differ from one SoC to another. This structure is + * therefore typically instantiated statically to describe the + * hardware details. + */ struct clk_corediv_desc { unsigned int mask; unsigned int offset; unsigned int fieldbit; }; +/* + * This structure describes the hardware details to configure the core + * divider clocks on a given SoC. Amongst others, it points to the + * array of core divider clock descriptors for this SoC, as well as + * the corresponding operations to manipulate them. + */ +struct clk_corediv_soc_desc { + const struct clk_corediv_desc *descs; + unsigned int ndescs; + const struct clk_ops ops; + u32 ratio_reload; + u32 enable_bit_offset; + u32 ratio_offset; +}; + +/* + * This structure represents one core divider clock for the clock + * framework, and is dynamically allocated for each core divider clock + * existing in the current SoC. + */ struct clk_corediv { struct clk_hw hw; void __iomem *reg; - struct clk_corediv_desc desc; + const struct clk_corediv_desc *desc; + const struct clk_corediv_soc_desc *soc_desc; spinlock_t lock; }; static struct clk_onecell_data clk_data; -static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = { +/* + * Description of the core divider clocks available. For now, we + * support only NAND, and it is available at the same register + * locations regardless of the SoC. + */ +static const struct clk_corediv_desc mvebu_corediv_desc[] = { { .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */ }; @@ -46,8 +76,9 @@ static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = { static int clk_corediv_is_enabled(struct clk_hw *hwclk) { struct clk_corediv *corediv = to_corediv_clk(hwclk); - struct clk_corediv_desc *desc = &corediv->desc; - u32 enable_mask = BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET; + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; + u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset; return !!(readl(corediv->reg) & enable_mask); } @@ -55,14 +86,15 @@ static int clk_corediv_is_enabled(struct clk_hw *hwclk) static int clk_corediv_enable(struct clk_hw *hwclk) { struct clk_corediv *corediv = to_corediv_clk(hwclk); - struct clk_corediv_desc *desc = &corediv->desc; + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; unsigned long flags = 0; u32 reg; spin_lock_irqsave(&corediv->lock, flags); reg = readl(corediv->reg); - reg |= (BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET); + reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset); writel(reg, corediv->reg); spin_unlock_irqrestore(&corediv->lock, flags); @@ -73,14 +105,15 @@ static int clk_corediv_enable(struct clk_hw *hwclk) static void clk_corediv_disable(struct clk_hw *hwclk) { struct clk_corediv *corediv = to_corediv_clk(hwclk); - struct clk_corediv_desc *desc = &corediv->desc; + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; unsigned long flags = 0; u32 reg; spin_lock_irqsave(&corediv->lock, flags); reg = readl(corediv->reg); - reg &= ~(BIT(desc->fieldbit) << CORE_CLK_DIV_ENABLE_OFFSET); + reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset); writel(reg, corediv->reg); spin_unlock_irqrestore(&corediv->lock, flags); @@ -90,10 +123,11 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { struct clk_corediv *corediv = to_corediv_clk(hwclk); - struct clk_corediv_desc *desc = &corediv->desc; + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; u32 reg, div; - reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); + reg = readl(corediv->reg + soc_desc->ratio_offset); div = (reg >> desc->offset) & desc->mask; return parent_rate / div; } @@ -117,7 +151,8 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, unsigned long parent_rate) { struct clk_corediv *corediv = to_corediv_clk(hwclk); - struct clk_corediv_desc *desc = &corediv->desc; + const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc; + const struct clk_corediv_desc *desc = corediv->desc; unsigned long flags = 0; u32 reg, div; @@ -126,17 +161,17 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, spin_lock_irqsave(&corediv->lock, flags); /* Write new divider to the divider ratio register */ - reg = readl(corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); + reg = readl(corediv->reg + soc_desc->ratio_offset); reg &= ~(desc->mask << desc->offset); reg |= (div & desc->mask) << desc->offset; - writel(reg, corediv->reg + CORE_CLK_DIV_RATIO_OFFSET); + writel(reg, corediv->reg + soc_desc->ratio_offset); /* Set reload-force for this clock */ reg = readl(corediv->reg) | BIT(desc->fieldbit); writel(reg, corediv->reg); /* Now trigger the clock update */ - reg = readl(corediv->reg) | CORE_CLK_DIV_RATIO_RELOAD; + reg = readl(corediv->reg) | soc_desc->ratio_reload; writel(reg, corediv->reg); /* @@ -144,7 +179,7 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, * ratios request and the reload request. */ udelay(1000); - reg &= ~(CORE_CLK_DIV_RATIO_MASK | CORE_CLK_DIV_RATIO_RELOAD); + reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload); writel(reg, corediv->reg); udelay(1000); @@ -153,16 +188,53 @@ static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate, return 0; } -static const struct clk_ops corediv_ops = { - .enable = clk_corediv_enable, - .disable = clk_corediv_disable, - .is_enabled = clk_corediv_is_enabled, - .recalc_rate = clk_corediv_recalc_rate, - .round_rate = clk_corediv_round_rate, - .set_rate = clk_corediv_set_rate, +static const struct clk_corediv_soc_desc armada370_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .enable = clk_corediv_enable, + .disable = clk_corediv_disable, + .is_enabled = clk_corediv_is_enabled, + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .enable_bit_offset = 24, + .ratio_offset = 0x8, +}; + +static const struct clk_corediv_soc_desc armada380_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .enable = clk_corediv_enable, + .disable = clk_corediv_disable, + .is_enabled = clk_corediv_is_enabled, + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .enable_bit_offset = 16, + .ratio_offset = 0x4, }; -static void __init mvebu_corediv_clk_init(struct device_node *node) +static const struct clk_corediv_soc_desc armada375_corediv_soc = { + .descs = mvebu_corediv_desc, + .ndescs = ARRAY_SIZE(mvebu_corediv_desc), + .ops = { + .recalc_rate = clk_corediv_recalc_rate, + .round_rate = clk_corediv_round_rate, + .set_rate = clk_corediv_set_rate, + }, + .ratio_reload = BIT(8), + .ratio_offset = 0x4, +}; + +static void __init +mvebu_corediv_clk_init(struct device_node *node, + const struct clk_corediv_soc_desc *soc_desc) { struct clk_init_data init; struct clk_corediv *corediv; @@ -178,7 +250,7 @@ static void __init mvebu_corediv_clk_init(struct device_node *node) parent_name = of_clk_get_parent_name(node, 0); - clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc); + clk_data.clk_num = soc_desc->ndescs; /* clks holds the clock array */ clks = kcalloc(clk_data.clk_num, sizeof(struct clk *), @@ -199,10 +271,11 @@ static void __init mvebu_corediv_clk_init(struct device_node *node) init.num_parents = 1; init.parent_names = &parent_name; init.name = clk_name; - init.ops = &corediv_ops; + init.ops = &soc_desc->ops; init.flags = 0; - corediv[i].desc = mvebu_corediv_desc[i]; + corediv[i].soc_desc = soc_desc; + corediv[i].desc = soc_desc->descs + i; corediv[i].reg = base; corediv[i].hw.init = &init; @@ -219,5 +292,24 @@ err_free_clks: err_unmap: iounmap(base); } -CLK_OF_DECLARE(mvebu_corediv_clk, "marvell,armada-370-corediv-clock", - mvebu_corediv_clk_init); + +static void __init armada370_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada370_corediv_soc); +} +CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock", + armada370_corediv_clk_init); + +static void __init armada375_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada375_corediv_soc); +} +CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock", + armada375_corediv_clk_init); + +static void __init armada380_corediv_clk_init(struct device_node *node) +{ + return mvebu_corediv_clk_init(node, &armada380_corediv_soc); +} +CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock", + armada380_corediv_clk_init); diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index 38aee1e3f242..b8c2424ac926 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c @@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = { .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), }; -static void __init dove_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &dove_coreclks); -} -CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init); - /* * Clock Gating Control */ @@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = { { } }; -static void __init dove_clk_gating_init(struct device_node *np) +static void __init dove_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, dove_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); + + mvebu_coreclk_setup(np, &dove_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, dove_gating_desc); } -CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock", - dove_clk_gating_init); +CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 2636a55f29f9..ddb666a86500 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = { .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), }; -static void __init kirkwood_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &kirkwood_coreclks); -} -CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock", - kirkwood_coreclk_init); - static const struct coreclk_soc_desc mv88f6180_coreclks = { .get_tclk_freq = kirkwood_get_tclk_freq, .get_cpu_freq = mv88f6180_get_cpu_freq, @@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = { .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), }; -static void __init mv88f6180_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &mv88f6180_coreclks); -} -CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock", - mv88f6180_coreclk_init); - /* * Clock Gating Control */ @@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = { { } }; -static void __init kirkwood_clk_gating_init(struct device_node *np) +static void __init kirkwood_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, kirkwood_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); + + + if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) + mvebu_coreclk_setup(np, &mv88f6180_coreclks); + else + mvebu_coreclk_setup(np, &kirkwood_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); } -CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock", - kirkwood_clk_gating_init); +CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", + kirkwood_clk_init); +CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", + kirkwood_clk_init); diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index 884187fbfe00..13eae14c2cc2 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -17,7 +17,7 @@ #include <linux/module.h> #include <linux/platform_device.h> -#include <dt-bindings/clk/exynos-audss-clk.h> +#include <dt-bindings/clock/exynos-audss-clk.h> enum exynos_audss_clk_type { TYPE_EXYNOS4210, diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 010f071af883..b4f967210175 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -16,6 +16,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/syscore_ops.h> #include "clk.h" @@ -130,6 +131,17 @@ enum exynos4_plls { nr_plls /* number of PLLs */ }; +static void __iomem *reg_base; +static enum exynos4_soc exynos4_soc; + +/* + * Support for CMU save/restore across system suspends + */ +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *exynos4_save_common; +static struct samsung_clk_reg_dump *exynos4_save_soc; +static struct samsung_clk_reg_dump *exynos4_save_pll; + /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -154,6 +166,17 @@ static unsigned long exynos4x12_clk_save[] __initdata = { E4X12_MPLL_CON0, }; +static unsigned long exynos4_clk_pll_regs[] __initdata = { + EPLL_LOCK, + VPLL_LOCK, + EPLL_CON0, + EPLL_CON1, + EPLL_CON2, + VPLL_CON0, + VPLL_CON1, + VPLL_CON2, +}; + static unsigned long exynos4_clk_regs[] __initdata = { SRC_LEFTBUS, DIV_LEFTBUS, @@ -161,12 +184,6 @@ static unsigned long exynos4_clk_regs[] __initdata = { SRC_RIGHTBUS, DIV_RIGHTBUS, GATE_IP_RIGHTBUS, - EPLL_CON0, - EPLL_CON1, - EPLL_CON2, - VPLL_CON0, - VPLL_CON1, - VPLL_CON2, SRC_TOP0, SRC_TOP1, SRC_CAM, @@ -227,6 +244,124 @@ static unsigned long exynos4_clk_regs[] __initdata = { GATE_IP_CPU, }; +static const struct samsung_clk_reg_dump src_mask_suspend[] = { + { .offset = SRC_MASK_TOP, .value = 0x00000001, }, + { .offset = SRC_MASK_CAM, .value = 0x11111111, }, + { .offset = SRC_MASK_TV, .value = 0x00000111, }, + { .offset = SRC_MASK_LCD0, .value = 0x00001111, }, + { .offset = SRC_MASK_MAUDIO, .value = 0x00000001, }, + { .offset = SRC_MASK_FSYS, .value = 0x01011111, }, + { .offset = SRC_MASK_PERIL0, .value = 0x01111111, }, + { .offset = SRC_MASK_PERIL1, .value = 0x01110111, }, + { .offset = SRC_MASK_DMC, .value = 0x00010000, }, +}; + +static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { + { .offset = E4210_SRC_MASK_LCD1, .value = 0x00001111, }, +}; + +#define PLL_ENABLED (1 << 31) +#define PLL_LOCKED (1 << 29) + +static void exynos4_clk_wait_for_pll(u32 reg) +{ + u32 pll_con; + + pll_con = readl(reg_base + reg); + if (!(pll_con & PLL_ENABLED)) + return; + + while (!(pll_con & PLL_LOCKED)) { + cpu_relax(); + pll_con = readl(reg_base + reg); + } +} + +static int exynos4_clk_suspend(void) +{ + samsung_clk_save(reg_base, exynos4_save_common, + ARRAY_SIZE(exynos4_clk_regs)); + samsung_clk_save(reg_base, exynos4_save_pll, + ARRAY_SIZE(exynos4_clk_pll_regs)); + + if (exynos4_soc == EXYNOS4210) { + samsung_clk_save(reg_base, exynos4_save_soc, + ARRAY_SIZE(exynos4210_clk_save)); + samsung_clk_restore(reg_base, src_mask_suspend_e4210, + ARRAY_SIZE(src_mask_suspend_e4210)); + } else { + samsung_clk_save(reg_base, exynos4_save_soc, + ARRAY_SIZE(exynos4x12_clk_save)); + } + + samsung_clk_restore(reg_base, src_mask_suspend, + ARRAY_SIZE(src_mask_suspend)); + + return 0; +} + +static void exynos4_clk_resume(void) +{ + samsung_clk_restore(reg_base, exynos4_save_pll, + ARRAY_SIZE(exynos4_clk_pll_regs)); + + exynos4_clk_wait_for_pll(EPLL_CON0); + exynos4_clk_wait_for_pll(VPLL_CON0); + + samsung_clk_restore(reg_base, exynos4_save_common, + ARRAY_SIZE(exynos4_clk_regs)); + + if (exynos4_soc == EXYNOS4210) + samsung_clk_restore(reg_base, exynos4_save_soc, + ARRAY_SIZE(exynos4210_clk_save)); + else + samsung_clk_restore(reg_base, exynos4_save_soc, + ARRAY_SIZE(exynos4x12_clk_save)); +} + +static struct syscore_ops exynos4_clk_syscore_ops = { + .suspend = exynos4_clk_suspend, + .resume = exynos4_clk_resume, +}; + +static void exynos4_clk_sleep_init(void) +{ + exynos4_save_common = samsung_clk_alloc_reg_dump(exynos4_clk_regs, + ARRAY_SIZE(exynos4_clk_regs)); + if (!exynos4_save_common) + goto err_warn; + + if (exynos4_soc == EXYNOS4210) + exynos4_save_soc = samsung_clk_alloc_reg_dump( + exynos4210_clk_save, + ARRAY_SIZE(exynos4210_clk_save)); + else + exynos4_save_soc = samsung_clk_alloc_reg_dump( + exynos4x12_clk_save, + ARRAY_SIZE(exynos4x12_clk_save)); + if (!exynos4_save_soc) + goto err_common; + + exynos4_save_pll = samsung_clk_alloc_reg_dump(exynos4_clk_pll_regs, + ARRAY_SIZE(exynos4_clk_pll_regs)); + if (!exynos4_save_pll) + goto err_soc; + + register_syscore_ops(&exynos4_clk_syscore_ops); + return; + +err_soc: + kfree(exynos4_save_soc); +err_common: + kfree(exynos4_save_common); +err_warn: + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); +} +#else +static void exynos4_clk_sleep_init(void) {} +#endif + /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; @@ -908,12 +1043,13 @@ static unsigned long exynos4_get_xom(void) return xom; } -static void __init exynos4_clk_register_finpll(unsigned long xom) +static void __init exynos4_clk_register_finpll(void) { struct samsung_fixed_rate_clock fclk; struct clk *clk; unsigned long finpll_f = 24000000; char *parent_name; + unsigned int xom = exynos4_get_xom(); parent_name = xom & 1 ? "xusbxti" : "xxti"; clk = clk_get(NULL, parent_name); @@ -1038,27 +1174,21 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = { /* register exynos4 clocks */ static void __init exynos4_clk_init(struct device_node *np, - enum exynos4_soc exynos4_soc, - void __iomem *reg_base, unsigned long xom) + enum exynos4_soc soc) { + exynos4_soc = soc; + reg_base = of_iomap(np, 0); if (!reg_base) panic("%s: failed to map registers\n", __func__); - if (exynos4_soc == EXYNOS4210) - samsung_clk_init(np, reg_base, CLK_NR_CLKS, - exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), - exynos4210_clk_save, ARRAY_SIZE(exynos4210_clk_save)); - else - samsung_clk_init(np, reg_base, CLK_NR_CLKS, - exynos4_clk_regs, ARRAY_SIZE(exynos4_clk_regs), - exynos4x12_clk_save, ARRAY_SIZE(exynos4x12_clk_save)); + samsung_clk_init(np, reg_base, CLK_NR_CLKS); samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks), ext_clk_match); - exynos4_clk_register_finpll(xom); + exynos4_clk_register_finpll(); if (exynos4_soc == EXYNOS4210) { samsung_clk_register_mux(exynos4210_mux_early, @@ -1125,6 +1255,8 @@ static void __init exynos4_clk_init(struct device_node *np, samsung_clk_register_alias(exynos4_aliases, ARRAY_SIZE(exynos4_aliases)); + exynos4_clk_sleep_init(); + pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n" "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n", exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12", @@ -1136,12 +1268,12 @@ static void __init exynos4_clk_init(struct device_node *np, static void __init exynos4210_clk_init(struct device_node *np) { - exynos4_clk_init(np, EXYNOS4210, NULL, exynos4_get_xom()); + exynos4_clk_init(np, EXYNOS4210); } CLK_OF_DECLARE(exynos4210_clk, "samsung,exynos4210-clock", exynos4210_clk_init); static void __init exynos4412_clk_init(struct device_node *np) { - exynos4_clk_init(np, EXYNOS4X12, NULL, exynos4_get_xom()); + exynos4_clk_init(np, EXYNOS4X12); } CLK_OF_DECLARE(exynos4412_clk, "samsung,exynos4412-clock", exynos4412_clk_init); diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index ff4beebe1f0b..e7ee4420da81 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -16,6 +16,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/syscore_ops.h> #include "clk.h" @@ -85,6 +86,11 @@ enum exynos5250_plls { nr_plls /* number of PLLs */ }; +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *exynos5250_save; + /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -137,6 +143,41 @@ static unsigned long exynos5250_clk_regs[] __initdata = { GATE_IP_ACP, }; +static int exynos5250_clk_suspend(void) +{ + samsung_clk_save(reg_base, exynos5250_save, + ARRAY_SIZE(exynos5250_clk_regs)); + + return 0; +} + +static void exynos5250_clk_resume(void) +{ + samsung_clk_restore(reg_base, exynos5250_save, + ARRAY_SIZE(exynos5250_clk_regs)); +} + +static struct syscore_ops exynos5250_clk_syscore_ops = { + .suspend = exynos5250_clk_suspend, + .resume = exynos5250_clk_resume, +}; + +static void exynos5250_clk_sleep_init(void) +{ + exynos5250_save = samsung_clk_alloc_reg_dump(exynos5250_clk_regs, + ARRAY_SIZE(exynos5250_clk_regs)); + if (!exynos5250_save) { + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; + } + + register_syscore_ops(&exynos5250_clk_syscore_ops); +} +#else +static void exynos5250_clk_sleep_init(void) {} +#endif + /* list of all parent clock list */ PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; PNAME(mout_cpu_p) = { "mout_apll", "mout_mpll", }; @@ -645,8 +686,6 @@ static struct of_device_id ext_clk_match[] __initdata = { /* register exynox5250 clocks */ static void __init exynos5250_clk_init(struct device_node *np) { - void __iomem *reg_base; - if (np) { reg_base = of_iomap(np, 0); if (!reg_base) @@ -655,9 +694,7 @@ static void __init exynos5250_clk_init(struct device_node *np) panic("%s: unable to determine soc\n", __func__); } - samsung_clk_init(np, reg_base, CLK_NR_CLKS, - exynos5250_clk_regs, ARRAY_SIZE(exynos5250_clk_regs), - NULL, 0); + samsung_clk_init(np, reg_base, CLK_NR_CLKS); samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks, ARRAY_SIZE(exynos5250_fixed_rate_ext_clks), ext_clk_match); @@ -685,6 +722,8 @@ static void __init exynos5250_clk_init(struct device_node *np) samsung_clk_register_gate(exynos5250_gate_clks, ARRAY_SIZE(exynos5250_gate_clks)); + exynos5250_clk_sleep_init(); + pr_info("Exynos5250: clock setup completed, armclk=%ld\n", _get_rate("div_arm2")); } diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index ab4f2f7d88ef..60b26819bed5 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -16,6 +16,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/syscore_ops.h> #include "clk.h" @@ -108,6 +109,11 @@ enum exynos5420_plls { nr_plls /* number of PLLs */ }; +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *exynos5420_save; + /* * list of controller registers to be saved and restored during a * suspend/resume cycle. @@ -174,6 +180,41 @@ static unsigned long exynos5420_clk_regs[] __initdata = { DIV_KFC0, }; +static int exynos5420_clk_suspend(void) +{ + samsung_clk_save(reg_base, exynos5420_save, + ARRAY_SIZE(exynos5420_clk_regs)); + + return 0; +} + +static void exynos5420_clk_resume(void) +{ + samsung_clk_restore(reg_base, exynos5420_save, + ARRAY_SIZE(exynos5420_clk_regs)); +} + +static struct syscore_ops exynos5420_clk_syscore_ops = { + .suspend = exynos5420_clk_suspend, + .resume = exynos5420_clk_resume, +}; + +static void exynos5420_clk_sleep_init(void) +{ + exynos5420_save = samsung_clk_alloc_reg_dump(exynos5420_clk_regs, + ARRAY_SIZE(exynos5420_clk_regs)); + if (!exynos5420_save) { + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; + } + + register_syscore_ops(&exynos5420_clk_syscore_ops); +} +#else +static void exynos5420_clk_sleep_init(void) {} +#endif + /* list of all parent clocks */ PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll", "sclk_mpll", "sclk_spll" }; @@ -737,8 +778,6 @@ static struct of_device_id ext_clk_match[] __initdata = { /* register exynos5420 clocks */ static void __init exynos5420_clk_init(struct device_node *np) { - void __iomem *reg_base; - if (np) { reg_base = of_iomap(np, 0); if (!reg_base) @@ -747,9 +786,7 @@ static void __init exynos5420_clk_init(struct device_node *np) panic("%s: unable to determine soc\n", __func__); } - samsung_clk_init(np, reg_base, CLK_NR_CLKS, - exynos5420_clk_regs, ARRAY_SIZE(exynos5420_clk_regs), - NULL, 0); + samsung_clk_init(np, reg_base, CLK_NR_CLKS); samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks, ARRAY_SIZE(exynos5420_fixed_rate_ext_clks), ext_clk_match); @@ -765,5 +802,7 @@ static void __init exynos5420_clk_init(struct device_node *np) ARRAY_SIZE(exynos5420_div_clks)); samsung_clk_register_gate(exynos5420_gate_clks, ARRAY_SIZE(exynos5420_gate_clks)); + + exynos5420_clk_sleep_init(); } CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init); diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c index cbc15b56891d..2bfad5a993d0 100644 --- a/drivers/clk/samsung/clk-exynos5440.c +++ b/drivers/clk/samsung/clk-exynos5440.c @@ -101,7 +101,7 @@ static void __init exynos5440_clk_init(struct device_node *np) return; } - samsung_clk_init(np, reg_base, CLK_NR_CLKS, NULL, 0, NULL, 0); + samsung_clk_init(np, reg_base, CLK_NR_CLKS); samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks, ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match); diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 8e27aee6887e..8bda658137a8 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -13,6 +13,7 @@ #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/syscore_ops.h> #include <dt-bindings/clock/samsung,s3c64xx-clock.h> @@ -61,6 +62,13 @@ enum s3c64xx_plls { apll, mpll, epll, }; +static void __iomem *reg_base; +static bool is_s3c6400; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *s3c64xx_save_common; +static struct samsung_clk_reg_dump *s3c64xx_save_soc; + /* * List of controller registers to be saved and restored during * a suspend/resume cycle. @@ -87,6 +95,60 @@ static unsigned long s3c6410_clk_regs[] __initdata = { MEM0_GATE, }; +static int s3c64xx_clk_suspend(void) +{ + samsung_clk_save(reg_base, s3c64xx_save_common, + ARRAY_SIZE(s3c64xx_clk_regs)); + + if (!is_s3c6400) + samsung_clk_save(reg_base, s3c64xx_save_soc, + ARRAY_SIZE(s3c6410_clk_regs)); + + return 0; +} + +static void s3c64xx_clk_resume(void) +{ + samsung_clk_restore(reg_base, s3c64xx_save_common, + ARRAY_SIZE(s3c64xx_clk_regs)); + + if (!is_s3c6400) + samsung_clk_restore(reg_base, s3c64xx_save_soc, + ARRAY_SIZE(s3c6410_clk_regs)); +} + +static struct syscore_ops s3c64xx_clk_syscore_ops = { + .suspend = s3c64xx_clk_suspend, + .resume = s3c64xx_clk_resume, +}; + +static void s3c64xx_clk_sleep_init(void) +{ + s3c64xx_save_common = samsung_clk_alloc_reg_dump(s3c64xx_clk_regs, + ARRAY_SIZE(s3c64xx_clk_regs)); + if (!s3c64xx_save_common) + goto err_warn; + + if (!is_s3c6400) { + s3c64xx_save_soc = samsung_clk_alloc_reg_dump(s3c6410_clk_regs, + ARRAY_SIZE(s3c6410_clk_regs)); + if (!s3c64xx_save_soc) + goto err_soc; + } + + register_syscore_ops(&s3c64xx_clk_syscore_ops); + return; + +err_soc: + kfree(s3c64xx_save_common); +err_warn: + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); +} +#else +static void s3c64xx_clk_sleep_init(void) {} +#endif + /* List of parent clocks common for all S3C64xx SoCs. */ PNAME(spi_mmc_p) = { "mout_epll", "dout_mpll", "fin_pll", "clk27m" }; PNAME(uart_p) = { "mout_epll", "dout_mpll" }; @@ -391,11 +453,11 @@ static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f, /* Register s3c64xx clocks. */ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, - unsigned long xusbxti_f, bool is_s3c6400, - void __iomem *reg_base) + unsigned long xusbxti_f, bool s3c6400, + void __iomem *base) { - unsigned long *soc_regs = NULL; - unsigned long nr_soc_regs = 0; + reg_base = base; + is_s3c6400 = s3c6400; if (np) { reg_base = of_iomap(np, 0); @@ -403,13 +465,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, panic("%s: failed to map registers\n", __func__); } - if (!is_s3c6400) { - soc_regs = s3c6410_clk_regs; - nr_soc_regs = ARRAY_SIZE(s3c6410_clk_regs); - } - - samsung_clk_init(np, reg_base, NR_CLKS, s3c64xx_clk_regs, - ARRAY_SIZE(s3c64xx_clk_regs), soc_regs, nr_soc_regs); + samsung_clk_init(np, reg_base, NR_CLKS); /* Register external clocks. */ if (!np) @@ -452,6 +508,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, samsung_clk_register_alias(s3c64xx_clock_aliases, ARRAY_SIZE(s3c64xx_clock_aliases)); + s3c64xx_clk_sleep_init(); pr_info("%s clocks: apll = %lu, mpll = %lu\n" "\tepll = %lu, arm_clk = %lu\n", diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index f503f32e2f80..91bec3ebdc8f 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -21,64 +21,45 @@ static void __iomem *reg_base; static struct clk_onecell_data clk_data; #endif -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *reg_dump; -static unsigned long nr_reg_dump; - -static int samsung_clk_suspend(void) +void samsung_clk_save(void __iomem *base, + struct samsung_clk_reg_dump *rd, + unsigned int num_regs) { - struct samsung_clk_reg_dump *rd = reg_dump; - unsigned long i; - - for (i = 0; i < nr_reg_dump; i++, rd++) - rd->value = __raw_readl(reg_base + rd->offset); + for (; num_regs > 0; --num_regs, ++rd) + rd->value = readl(base + rd->offset); +} - return 0; +void samsung_clk_restore(void __iomem *base, + const struct samsung_clk_reg_dump *rd, + unsigned int num_regs) +{ + for (; num_regs > 0; --num_regs, ++rd) + writel(rd->value, base + rd->offset); } -static void samsung_clk_resume(void) +struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( + const unsigned long *rdump, + unsigned long nr_rdump) { - struct samsung_clk_reg_dump *rd = reg_dump; - unsigned long i; + struct samsung_clk_reg_dump *rd; + unsigned int i; - for (i = 0; i < nr_reg_dump; i++, rd++) - __raw_writel(rd->value, reg_base + rd->offset); -} + rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL); + if (!rd) + return NULL; + + for (i = 0; i < nr_rdump; ++i) + rd[i].offset = rdump[i]; -static struct syscore_ops samsung_clk_syscore_ops = { - .suspend = samsung_clk_suspend, - .resume = samsung_clk_resume, -}; -#endif /* CONFIG_PM_SLEEP */ + return rd; +} /* setup the essentials required to support clock lookup using ccf */ void __init samsung_clk_init(struct device_node *np, void __iomem *base, - unsigned long nr_clks, unsigned long *rdump, - unsigned long nr_rdump, unsigned long *soc_rdump, - unsigned long nr_soc_rdump) + unsigned long nr_clks) { reg_base = base; -#ifdef CONFIG_PM_SLEEP - if (rdump && nr_rdump) { - unsigned int idx; - reg_dump = kzalloc(sizeof(struct samsung_clk_reg_dump) - * (nr_rdump + nr_soc_rdump), GFP_KERNEL); - if (!reg_dump) { - pr_err("%s: memory alloc for register dump failed\n", - __func__); - return; - } - - for (idx = 0; idx < nr_rdump; idx++) - reg_dump[idx].offset = rdump[idx]; - for (idx = 0; idx < nr_soc_rdump; idx++) - reg_dump[nr_rdump + idx].offset = soc_rdump[idx]; - nr_reg_dump = nr_rdump + nr_soc_rdump; - register_syscore_ops(&samsung_clk_syscore_ops); - } -#endif - clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); if (!clk_table) panic("could not allocate clock lookup table\n"); diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index 31b4174e7a5b..c7141ba826e0 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -313,9 +313,7 @@ struct samsung_pll_clock { _lock, _con, _rtable, _alias) extern void __init samsung_clk_init(struct device_node *np, void __iomem *base, - unsigned long nr_clks, unsigned long *rdump, - unsigned long nr_rdump, unsigned long *soc_rdump, - unsigned long nr_soc_rdump); + unsigned long nr_clks); extern void __init samsung_clk_of_register_fixed_ext( struct samsung_fixed_rate_clock *fixed_rate_clk, unsigned int nr_fixed_rate_clk, @@ -340,4 +338,14 @@ extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list, extern unsigned long _get_rate(const char *clk_name); +extern void samsung_clk_save(void __iomem *base, + struct samsung_clk_reg_dump *rd, + unsigned int num_regs); +extern void samsung_clk_restore(void __iomem *base, + const struct samsung_clk_reg_dump *rd, + unsigned int num_regs); +extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( + const unsigned long *rdump, + unsigned long nr_rdump); + #endif /* __SAMSUNG_CLK_H */ diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile index 9ecef140dba7..5404cb931ebf 100644 --- a/drivers/clk/shmobile/Makefile +++ b/drivers/clk/shmobile/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o +obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c index aac4756ec52e..f065f694cb65 100644 --- a/drivers/clk/shmobile/clk-div6.c +++ b/drivers/clk/shmobile/clk-div6.c @@ -23,7 +23,7 @@ #define CPG_DIV6_DIV_MASK 0x3f /** - * struct div6_clock - MSTP gating clock + * struct div6_clock - CPG 6 bit divider clock * @hw: handle between common and hardware-specific interfaces * @reg: IO-remapped register * @div: divisor value (1-64) diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c index 42d5912b1d25..2e5810c88d11 100644 --- a/drivers/clk/shmobile/clk-mstp.c +++ b/drivers/clk/shmobile/clk-mstp.c @@ -137,7 +137,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name, init.name = name; init.ops = &cpg_mstp_clock_ops; - init.flags = CLK_IS_BASIC; + init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; init.parent_names = &parent_name; init.num_parents = 1; diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index a59ec217a124..dff7f79a19b9 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -26,6 +26,8 @@ struct rcar_gen2_cpg { void __iomem *reg; }; +#define CPG_FRQCRB 0x00000004 +#define CPG_FRQCRB_KICK BIT(31) #define CPG_SDCKCR 0x00000074 #define CPG_PLL0CR 0x000000d8 #define CPG_FRQCRC 0x000000e0 @@ -45,6 +47,7 @@ struct rcar_gen2_cpg { struct cpg_z_clk { struct clk_hw hw; void __iomem *reg; + void __iomem *kick_reg; }; #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) @@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, { struct cpg_z_clk *zclk = to_z_clk(hw); unsigned int mult; - u32 val; + u32 val, kick; + unsigned int i; mult = div_u64((u64)rate * 32, parent_rate); mult = clamp(mult, 1U, 32U); + if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK) + return -EBUSY; + val = clk_readl(zclk->reg); val &= ~CPG_FRQCRC_ZFC_MASK; val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; clk_writel(val, zclk->reg); - return 0; + /* + * Set KICK bit in FRQCRB to update hardware setting and wait for + * clock change completion. + */ + kick = clk_readl(zclk->kick_reg); + kick |= CPG_FRQCRB_KICK; + clk_writel(kick, zclk->kick_reg); + + /* + * Note: There is no HW information about the worst case latency. + * + * Using experimental measurements, it seems that no more than + * ~10 iterations are needed, independently of the CPU rate. + * Since this value might be dependant of external xtal rate, pll1 + * rate or even the other emulation clocks rate, use 1000 as a + * "super" safe value. + */ + for (i = 1000; i; i--) { + if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) + return 0; + + cpu_relax(); + } + + return -ETIMEDOUT; } static const struct clk_ops cpg_z_clk_ops = { @@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) init.num_parents = 1; zclk->reg = cpg->reg + CPG_FRQCRC; + zclk->kick_reg = cpg->reg + CPG_FRQCRB; zclk->hw.init = &init; clk = clk_register(NULL, &zclk->hw); @@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, const char *name) { const struct clk_div_table *table = NULL; - const char *parent_name = "main"; + const char *parent_name; unsigned int shift; unsigned int mult = 1; unsigned int div = 1; @@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, * the multiplier value. */ u32 value = clk_readl(cpg->reg + CPG_PLL0CR); + parent_name = "main"; mult = ((value >> 24) & ((1 << 7) - 1)) + 1; } else if (!strcmp(name, "pll1")) { + parent_name = "main"; mult = config->pll1_mult / 2; } else if (!strcmp(name, "pll3")) { + parent_name = "main"; mult = config->pll3_mult; } else if (!strcmp(name, "lb")) { + parent_name = "pll1"; div = cpg_mode & BIT(18) ? 36 : 24; } else if (!strcmp(name, "qspi")) { + parent_name = "pll1_div2"; div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) - ? 16 : 20; + ? 8 : 10; } else if (!strcmp(name, "sdh")) { + parent_name = "pll1"; table = cpg_sdh_div_table; shift = 8; } else if (!strcmp(name, "sd0")) { + parent_name = "pll1"; table = cpg_sd01_div_table; shift = 4; } else if (!strcmp(name, "sd1")) { + parent_name = "pll1"; table = cpg_sd01_div_table; shift = 0; } else if (!strcmp(name, "z")) { diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c new file mode 100644 index 000000000000..7e68e8630962 --- /dev/null +++ b/drivers/clk/shmobile/clk-rz.c @@ -0,0 +1,103 @@ +/* + * rz Core CPG Clocks + * + * Copyright (C) 2013 Ideas On Board SPRL + * Copyright (C) 2014 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/clk-provider.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> + +struct rz_cpg { + struct clk_onecell_data data; + void __iomem *reg; +}; + +#define CPG_FRQCR 0x10 +#define CPG_FRQCR2 0x14 + +/* ----------------------------------------------------------------------------- + * Initialization + */ + +static struct clk * __init +rz_cpg_register_clock(struct device_node *np, struct rz_cpg *cpg, const char *name) +{ + u32 val; + unsigned mult; + static const unsigned frqcr_tab[4] = { 3, 2, 0, 1 }; + + if (strcmp(name, "pll") == 0) { + /* FIXME: cpg_mode should be read from GPIO. But no GPIO support yet */ + unsigned cpg_mode = 0; /* hardcoded to EXTAL for now */ + const char *parent_name = of_clk_get_parent_name(np, cpg_mode); + + mult = cpg_mode ? (32 / 4) : 30; + + return clk_register_fixed_factor(NULL, name, parent_name, 0, mult, 1); + } + + /* If mapping regs failed, skip non-pll clocks. System will boot anyhow */ + if (!cpg->reg) + return ERR_PTR(-ENXIO); + + /* FIXME:"i" and "g" are variable clocks with non-integer dividers (e.g. 2/3) + * and the constraint that always g <= i. To get the rz platform started, + * let them run at fixed current speed and implement the details later. + */ + if (strcmp(name, "i") == 0) + val = (clk_readl(cpg->reg + CPG_FRQCR) >> 8) & 3; + else if (strcmp(name, "g") == 0) + val = clk_readl(cpg->reg + CPG_FRQCR2) & 3; + else + return ERR_PTR(-EINVAL); + + mult = frqcr_tab[val]; + return clk_register_fixed_factor(NULL, name, "pll", 0, mult, 3); +} + +static void __init rz_cpg_clocks_init(struct device_node *np) +{ + struct rz_cpg *cpg; + struct clk **clks; + unsigned i; + int num_clks; + + num_clks = of_property_count_strings(np, "clock-output-names"); + if (WARN(num_clks <= 0, "can't count CPG clocks\n")) + return; + + cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); + clks = kzalloc(num_clks * sizeof(*clks), GFP_KERNEL); + BUG_ON(!cpg || !clks); + + cpg->data.clks = clks; + cpg->data.clk_num = num_clks; + + cpg->reg = of_iomap(np, 0); + + for (i = 0; i < num_clks; ++i) { + const char *name; + struct clk *clk; + + of_property_read_string_index(np, "clock-output-names", i, &name); + + clk = rz_cpg_register_clock(np, cpg, name); + if (IS_ERR(clk)) + pr_err("%s: failed to register %s %s clock (%ld)\n", + __func__, np->name, name, PTR_ERR(clk)); + else + cpg->data.clks[i] = clk; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); +} +CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init); diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index f9f4a15a64ab..d63b76ca60c3 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -1,7 +1,8 @@ /* * Clock tree for CSR SiRFatlasVI * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group + * company. * * Licensed under GPLv2 or later. */ diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index 7dde6a82f514..37af51c5f213 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -1,7 +1,8 @@ /* * common clks module for all SiRF SoCs * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group + * company. * * Licensed under GPLv2 or later. */ diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c index 7adc5c70c7ff..6968e2ebcd8a 100644 --- a/drivers/clk/sirf/clk-prima2.c +++ b/drivers/clk/sirf/clk-prima2.c @@ -1,7 +1,8 @@ /* * Clock tree for CSR SiRFprimaII * - * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group + * company. * * Licensed under GPLv2 or later. */ diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile index 0303c0b99cd0..7e2d15a0c7b8 100644 --- a/drivers/clk/socfpga/Makefile +++ b/drivers/clk/socfpga/Makefile @@ -1 +1,4 @@ obj-y += clk.o +obj-y += clk-gate.o +obj-y += clk-pll.o +obj-y += clk-periph.o diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c new file mode 100644 index 000000000000..501d513bf890 --- /dev/null +++ b/drivers/clk/socfpga/clk-gate.c @@ -0,0 +1,263 @@ +/* + * Copyright 2011-2012 Calxeda, Inc. + * Copyright (C) 2012-2013 Altera Corporation <www.altera.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based from clk-highbank.c + * + */ +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include "clk.h" + +#define SOCFPGA_L4_MP_CLK "l4_mp_clk" +#define SOCFPGA_L4_SP_CLK "l4_sp_clk" +#define SOCFPGA_NAND_CLK "nand_clk" +#define SOCFPGA_NAND_X_CLK "nand_x_clk" +#define SOCFPGA_MMC_CLK "sdmmc_clk" +#define SOCFPGA_GPIO_DB_CLK_OFFSET 0xA8 + +#define div_mask(width) ((1 << (width)) - 1) +#define streq(a, b) (strcmp((a), (b)) == 0) + +#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw) + +/* SDMMC Group for System Manager defines */ +#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x108 +#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ + ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0)) + +static u8 socfpga_clk_get_parent(struct clk_hw *hwclk) +{ + u32 l4_src; + u32 perpll_src; + + if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { + l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + return l4_src &= 0x1; + } + if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { + l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + return !!(l4_src & 2); + } + + perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) + return perpll_src &= 0x3; + if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || + streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) + return (perpll_src >> 2) & 3; + + /* QSPI clock */ + return (perpll_src >> 4) & 3; + +} + +static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent) +{ + u32 src_reg; + + if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { + src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + src_reg &= ~0x1; + src_reg |= parent; + writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); + } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { + src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); + src_reg &= ~0x2; + src_reg |= (parent << 1); + writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); + } else { + src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) { + src_reg &= ~0x3; + src_reg |= parent; + } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || + streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) { + src_reg &= ~0xC; + src_reg |= (parent << 2); + } else {/* QSPI clock */ + src_reg &= ~0x30; + src_reg |= (parent << 4); + } + writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC); + } + + return 0; +} + +static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + u32 div = 1, val; + + if (socfpgaclk->fixed_div) + div = socfpgaclk->fixed_div; + else if (socfpgaclk->div_reg) { + val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + val &= div_mask(socfpgaclk->width); + /* Check for GPIO_DB_CLK by its offset */ + if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET) + div = val + 1; + else + div = (1 << val); + } + + return parent_rate / div; +} + +static int socfpga_clk_prepare(struct clk_hw *hwclk) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + struct regmap *sys_mgr_base_addr; + int i; + u32 hs_timing; + u32 clk_phase[2]; + + if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) { + sys_mgr_base_addr = syscon_regmap_lookup_by_compatible("altr,sys-mgr"); + if (IS_ERR(sys_mgr_base_addr)) { + pr_err("%s: failed to find altr,sys-mgr regmap!\n", __func__); + return -EINVAL; + } + + for (i = 0; i < 2; i++) { + switch (socfpgaclk->clk_phase[i]) { + case 0: + clk_phase[i] = 0; + break; + case 45: + clk_phase[i] = 1; + break; + case 90: + clk_phase[i] = 2; + break; + case 135: + clk_phase[i] = 3; + break; + case 180: + clk_phase[i] = 4; + break; + case 225: + clk_phase[i] = 5; + break; + case 270: + clk_phase[i] = 6; + break; + case 315: + clk_phase[i] = 7; + break; + default: + clk_phase[i] = 0; + break; + } + } + hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]); + regmap_write(sys_mgr_base_addr, SYSMGR_SDMMCGRP_CTRL_OFFSET, + hs_timing); + } + return 0; +} + +static struct clk_ops gateclk_ops = { + .prepare = socfpga_clk_prepare, + .recalc_rate = socfpga_clk_recalc_rate, + .get_parent = socfpga_clk_get_parent, + .set_parent = socfpga_clk_set_parent, +}; + +static void __init __socfpga_gate_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 clk_gate[2]; + u32 div_reg[3]; + u32 clk_phase[2]; + u32 fixed_div; + struct clk *clk; + struct socfpga_gate_clk *socfpga_clk; + const char *clk_name = node->name; + const char *parent_name[SOCFPGA_MAX_PARENTS]; + struct clk_init_data init; + int rc; + int i = 0; + + socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); + if (WARN_ON(!socfpga_clk)) + return; + + rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); + if (rc) + clk_gate[0] = 0; + + if (clk_gate[0]) { + socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; + socfpga_clk->hw.bit_idx = clk_gate[1]; + + gateclk_ops.enable = clk_gate_ops.enable; + gateclk_ops.disable = clk_gate_ops.disable; + } + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + socfpga_clk->fixed_div = 0; + else + socfpga_clk->fixed_div = fixed_div; + + rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!rc) { + socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0]; + socfpga_clk->shift = div_reg[1]; + socfpga_clk->width = div_reg[2]; + } else { + socfpga_clk->div_reg = 0; + } + + rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2); + if (!rc) { + socfpga_clk->clk_phase[0] = clk_phase[0]; + socfpga_clk->clk_phase[1] = clk_phase[1]; + } + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = + of_clk_get_parent_name(node, i)) != NULL) + i++; + + init.parent_names = parent_name; + init.num_parents = i; + socfpga_clk->hw.hw.init = &init; + + clk = clk_register(NULL, &socfpga_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(socfpga_clk); + return; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + if (WARN_ON(rc)) + return; +} + +void __init socfpga_gate_init(struct device_node *node) +{ + __socfpga_gate_init(node, &gateclk_ops); +} diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c new file mode 100644 index 000000000000..81623a3736f9 --- /dev/null +++ b/drivers/clk/socfpga/clk-periph.c @@ -0,0 +1,94 @@ +/* + * Copyright 2011-2012 Calxeda, Inc. + * Copyright (C) 2012-2013 Altera Corporation <www.altera.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based from clk-highbank.c + * + */ +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> + +#include "clk.h" + +#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw) + +static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); + u32 div; + + if (socfpgaclk->fixed_div) + div = socfpgaclk->fixed_div; + else + div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1); + + return parent_rate / div; +} + +static const struct clk_ops periclk_ops = { + .recalc_rate = clk_periclk_recalc_rate, +}; + +static __init void __socfpga_periph_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct clk *clk; + struct socfpga_periph_clk *periph_clk; + const char *clk_name = node->name; + const char *parent_name; + struct clk_init_data init; + int rc; + u32 fixed_div; + + of_property_read_u32(node, "reg", ®); + + periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); + if (WARN_ON(!periph_clk)) + return; + + periph_clk->hw.reg = clk_mgr_base_addr + reg; + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + periph_clk->fixed_div = 0; + else + periph_clk->fixed_div = fixed_div; + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = &parent_name; + init.num_parents = 1; + + periph_clk->hw.hw.init = &init; + + clk = clk_register(NULL, &periph_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(periph_clk); + return; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); +} + +void __init socfpga_periph_init(struct device_node *node) +{ + __socfpga_periph_init(node, &periclk_ops); +} diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c new file mode 100644 index 000000000000..88dafb5e9627 --- /dev/null +++ b/drivers/clk/socfpga/clk-pll.c @@ -0,0 +1,131 @@ +/* + * Copyright 2011-2012 Calxeda, Inc. + * Copyright (C) 2012-2013 Altera Corporation <www.altera.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based from clk-highbank.c + * + */ +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> + +#include "clk.h" + +/* Clock bypass bits */ +#define MAINPLL_BYPASS (1<<0) +#define SDRAMPLL_BYPASS (1<<1) +#define SDRAMPLL_SRC_BYPASS (1<<2) +#define PERPLL_BYPASS (1<<3) +#define PERPLL_SRC_BYPASS (1<<4) + +#define SOCFPGA_PLL_BG_PWRDWN 0 +#define SOCFPGA_PLL_EXT_ENA 1 +#define SOCFPGA_PLL_PWR_DOWN 2 +#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8 +#define SOCFPGA_PLL_DIVF_SHIFT 3 +#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 +#define SOCFPGA_PLL_DIVQ_SHIFT 16 + +#define CLK_MGR_PLL_CLK_SRC_SHIFT 22 +#define CLK_MGR_PLL_CLK_SRC_MASK 0x3 + +#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) + +static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); + unsigned long divf, divq, reg; + unsigned long long vco_freq; + unsigned long bypass; + + reg = readl(socfpgaclk->hw.reg); + bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS); + if (bypass & MAINPLL_BYPASS) + return parent_rate; + + divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; + divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; + vco_freq = (unsigned long long)parent_rate * (divf + 1); + do_div(vco_freq, (1 + divq)); + return (unsigned long)vco_freq; +} + +static u8 clk_pll_get_parent(struct clk_hw *hwclk) +{ + u32 pll_src; + struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); + + pll_src = readl(socfpgaclk->hw.reg); + return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) & + CLK_MGR_PLL_CLK_SRC_MASK; +} + +static struct clk_ops clk_pll_ops = { + .recalc_rate = clk_pll_recalc_rate, + .get_parent = clk_pll_get_parent, +}; + +static __init struct clk *__socfpga_pll_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct clk *clk; + struct socfpga_pll *pll_clk; + const char *clk_name = node->name; + const char *parent_name[SOCFPGA_MAX_PARENTS]; + struct clk_init_data init; + int rc; + int i = 0; + + of_property_read_u32(node, "reg", ®); + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (WARN_ON(!pll_clk)) + return NULL; + + pll_clk->hw.reg = clk_mgr_base_addr + reg; + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + + while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = + of_clk_get_parent_name(node, i)) != NULL) + i++; + + init.num_parents = i; + init.parent_names = parent_name; + pll_clk->hw.hw.init = &init; + + pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; + clk_pll_ops.enable = clk_gate_ops.enable; + clk_pll_ops.disable = clk_gate_ops.disable; + + clk = clk_register(NULL, &pll_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(pll_clk); + return NULL; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + return clk; +} + +void __init socfpga_pll_init(struct device_node *node) +{ + __socfpga_pll_init(node, &clk_pll_ops); +} diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 5983a26a8c5f..35a960a993f9 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -22,325 +22,23 @@ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> -/* Clock Manager offsets */ -#define CLKMGR_CTRL 0x0 -#define CLKMGR_BYPASS 0x4 -#define CLKMGR_L4SRC 0x70 -#define CLKMGR_PERPLL_SRC 0xAC +#include "clk.h" -/* Clock bypass bits */ -#define MAINPLL_BYPASS (1<<0) -#define SDRAMPLL_BYPASS (1<<1) -#define SDRAMPLL_SRC_BYPASS (1<<2) -#define PERPLL_BYPASS (1<<3) -#define PERPLL_SRC_BYPASS (1<<4) +void __iomem *clk_mgr_base_addr; -#define SOCFPGA_PLL_BG_PWRDWN 0 -#define SOCFPGA_PLL_EXT_ENA 1 -#define SOCFPGA_PLL_PWR_DOWN 2 -#define SOCFPGA_PLL_DIVF_MASK 0x0000FFF8 -#define SOCFPGA_PLL_DIVF_SHIFT 3 -#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 -#define SOCFPGA_PLL_DIVQ_SHIFT 16 -#define SOCFGPA_MAX_PARENTS 3 - -#define SOCFPGA_L4_MP_CLK "l4_mp_clk" -#define SOCFPGA_L4_SP_CLK "l4_sp_clk" -#define SOCFPGA_NAND_CLK "nand_clk" -#define SOCFPGA_NAND_X_CLK "nand_x_clk" -#define SOCFPGA_MMC_CLK "sdmmc_clk" -#define SOCFPGA_DB_CLK "gpio_db_clk" - -#define div_mask(width) ((1 << (width)) - 1) -#define streq(a, b) (strcmp((a), (b)) == 0) - -extern void __iomem *clk_mgr_base_addr; - -struct socfpga_clk { - struct clk_gate hw; - char *parent_name; - char *clk_name; - u32 fixed_div; - void __iomem *div_reg; - u32 width; /* only valid if div_reg != 0 */ - u32 shift; /* only valid if div_reg != 0 */ -}; -#define to_socfpga_clk(p) container_of(p, struct socfpga_clk, hw.hw) - -static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, - unsigned long parent_rate) -{ - struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk); - unsigned long divf, divq, vco_freq, reg; - unsigned long bypass; - - reg = readl(socfpgaclk->hw.reg); - bypass = readl(clk_mgr_base_addr + CLKMGR_BYPASS); - if (bypass & MAINPLL_BYPASS) - return parent_rate; - - divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; - divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; - vco_freq = parent_rate * (divf + 1); - return vco_freq / (1 + divq); -} - - -static struct clk_ops clk_pll_ops = { - .recalc_rate = clk_pll_recalc_rate, -}; - -static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, - unsigned long parent_rate) -{ - struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk); - u32 div; - - if (socfpgaclk->fixed_div) - div = socfpgaclk->fixed_div; - else - div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1); - - return parent_rate / div; -} - -static const struct clk_ops periclk_ops = { - .recalc_rate = clk_periclk_recalc_rate, -}; - -static __init struct clk *socfpga_clk_init(struct device_node *node, - const struct clk_ops *ops) -{ - u32 reg; - struct clk *clk; - struct socfpga_clk *socfpga_clk; - const char *clk_name = node->name; - const char *parent_name; - struct clk_init_data init; - int rc; - u32 fixed_div; - - of_property_read_u32(node, "reg", ®); - - socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); - if (WARN_ON(!socfpga_clk)) - return NULL; - - socfpga_clk->hw.reg = clk_mgr_base_addr + reg; - - rc = of_property_read_u32(node, "fixed-divider", &fixed_div); - if (rc) - socfpga_clk->fixed_div = 0; - else - socfpga_clk->fixed_div = fixed_div; - - of_property_read_string(node, "clock-output-names", &clk_name); - - init.name = clk_name; - init.ops = ops; - init.flags = 0; - parent_name = of_clk_get_parent_name(node, 0); - init.parent_names = &parent_name; - init.num_parents = 1; - - socfpga_clk->hw.hw.init = &init; - - if (streq(clk_name, "main_pll") || - streq(clk_name, "periph_pll") || - streq(clk_name, "sdram_pll")) { - socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; - clk_pll_ops.enable = clk_gate_ops.enable; - clk_pll_ops.disable = clk_gate_ops.disable; - } - - clk = clk_register(NULL, &socfpga_clk->hw.hw); - if (WARN_ON(IS_ERR(clk))) { - kfree(socfpga_clk); - return NULL; - } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); - return clk; -} - -static u8 socfpga_clk_get_parent(struct clk_hw *hwclk) -{ - u32 l4_src; - u32 perpll_src; - - if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { - l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - return l4_src &= 0x1; - } - if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { - l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - return !!(l4_src & 2); - } - - perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) - return perpll_src &= 0x3; - if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || - streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) - return (perpll_src >> 2) & 3; - - /* QSPI clock */ - return (perpll_src >> 4) & 3; - -} - -static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent) -{ - u32 src_reg; - - if (streq(hwclk->init->name, SOCFPGA_L4_MP_CLK)) { - src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - src_reg &= ~0x1; - src_reg |= parent; - writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); - } else if (streq(hwclk->init->name, SOCFPGA_L4_SP_CLK)) { - src_reg = readl(clk_mgr_base_addr + CLKMGR_L4SRC); - src_reg &= ~0x2; - src_reg |= (parent << 1); - writel(src_reg, clk_mgr_base_addr + CLKMGR_L4SRC); - } else { - src_reg = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - if (streq(hwclk->init->name, SOCFPGA_MMC_CLK)) { - src_reg &= ~0x3; - src_reg |= parent; - } else if (streq(hwclk->init->name, SOCFPGA_NAND_CLK) || - streq(hwclk->init->name, SOCFPGA_NAND_X_CLK)) { - src_reg &= ~0xC; - src_reg |= (parent << 2); - } else {/* QSPI clock */ - src_reg &= ~0x30; - src_reg |= (parent << 4); - } - writel(src_reg, clk_mgr_base_addr + CLKMGR_PERPLL_SRC); - } - - return 0; -} - -static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk, - unsigned long parent_rate) -{ - struct socfpga_clk *socfpgaclk = to_socfpga_clk(hwclk); - u32 div = 1, val; - - if (socfpgaclk->fixed_div) - div = socfpgaclk->fixed_div; - else if (socfpgaclk->div_reg) { - val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; - val &= div_mask(socfpgaclk->width); - if (streq(hwclk->init->name, SOCFPGA_DB_CLK)) - div = val + 1; - else - div = (1 << val); - } - - return parent_rate / div; -} - -static struct clk_ops gateclk_ops = { - .recalc_rate = socfpga_clk_recalc_rate, - .get_parent = socfpga_clk_get_parent, - .set_parent = socfpga_clk_set_parent, +static const struct of_device_id socfpga_child_clocks[] __initconst = { + { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, }, + { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, }, + { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, }, + {}, }; -static void __init socfpga_gate_clk_init(struct device_node *node, - const struct clk_ops *ops) -{ - u32 clk_gate[2]; - u32 div_reg[3]; - u32 fixed_div; - struct clk *clk; - struct socfpga_clk *socfpga_clk; - const char *clk_name = node->name; - const char *parent_name[SOCFGPA_MAX_PARENTS]; - struct clk_init_data init; - int rc; - int i = 0; - - socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); - if (WARN_ON(!socfpga_clk)) - return; - - rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); - if (rc) - clk_gate[0] = 0; - - if (clk_gate[0]) { - socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; - socfpga_clk->hw.bit_idx = clk_gate[1]; - - gateclk_ops.enable = clk_gate_ops.enable; - gateclk_ops.disable = clk_gate_ops.disable; - } - - rc = of_property_read_u32(node, "fixed-divider", &fixed_div); - if (rc) - socfpga_clk->fixed_div = 0; - else - socfpga_clk->fixed_div = fixed_div; - - rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); - if (!rc) { - socfpga_clk->div_reg = clk_mgr_base_addr + div_reg[0]; - socfpga_clk->shift = div_reg[1]; - socfpga_clk->width = div_reg[2]; - } else { - socfpga_clk->div_reg = NULL; - } - - of_property_read_string(node, "clock-output-names", &clk_name); - - init.name = clk_name; - init.ops = ops; - init.flags = 0; - while (i < SOCFGPA_MAX_PARENTS && (parent_name[i] = - of_clk_get_parent_name(node, i)) != NULL) - i++; - - init.parent_names = parent_name; - init.num_parents = i; - socfpga_clk->hw.hw.init = &init; - - clk = clk_register(NULL, &socfpga_clk->hw.hw); - if (WARN_ON(IS_ERR(clk))) { - kfree(socfpga_clk); - return; - } - rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); - if (WARN_ON(rc)) - return; -} - -static void __init socfpga_pll_init(struct device_node *node) +static void __init socfpga_clkmgr_init(struct device_node *node) { - socfpga_clk_init(node, &clk_pll_ops); + clk_mgr_base_addr = of_iomap(node, 0); + of_clk_init(socfpga_child_clocks); } -CLK_OF_DECLARE(socfpga_pll, "altr,socfpga-pll-clock", socfpga_pll_init); +CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init); -static void __init socfpga_periph_init(struct device_node *node) -{ - socfpga_clk_init(node, &periclk_ops); -} -CLK_OF_DECLARE(socfpga_periph, "altr,socfpga-perip-clk", socfpga_periph_init); - -static void __init socfpga_gate_init(struct device_node *node) -{ - socfpga_gate_clk_init(node, &gateclk_ops); -} -CLK_OF_DECLARE(socfpga_gate, "altr,socfpga-gate-clk", socfpga_gate_init); - -void __init socfpga_init_clocks(void) -{ - struct clk *clk; - int ret; - - clk = clk_register_fixed_factor(NULL, "smp_twd", "mpuclk", 0, 1, 4); - ret = clk_register_clkdev(clk, NULL, "smp_twd"); - if (ret) - pr_err("smp_twd alias not registered\n"); -} diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h new file mode 100644 index 000000000000..d2e54019c94f --- /dev/null +++ b/drivers/clk/socfpga/clk.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, Steffen Trumtrar <s.trumtrar@pengutronix.de> + * + * based on drivers/clk/tegra/clk.h + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __SOCFPGA_CLK_H +#define __SOCFPGA_CLK_H + +#include <linux/clk-provider.h> +#include <linux/clkdev.h> + +/* Clock Manager offsets */ +#define CLKMGR_CTRL 0x0 +#define CLKMGR_BYPASS 0x4 +#define CLKMGR_L4SRC 0x70 +#define CLKMGR_PERPLL_SRC 0xAC + +#define SOCFPGA_MAX_PARENTS 3 + +extern void __iomem *clk_mgr_base_addr; + +void __init socfpga_pll_init(struct device_node *node); +void __init socfpga_periph_init(struct device_node *node); +void __init socfpga_gate_init(struct device_node *node); + +struct socfpga_pll { + struct clk_gate hw; +}; + +struct socfpga_gate_clk { + struct clk_gate hw; + char *parent_name; + u32 fixed_div; + void __iomem *div_reg; + u32 width; /* only valid if div_reg != 0 */ + u32 shift; /* only valid if div_reg != 0 */ + u32 clk_phase[2]; +}; + +struct socfpga_periph_clk { + struct clk_gate hw; + char *parent_name; + u32 fixed_div; +}; + +#endif /* SOCFPGA_CLK_H */ diff --git a/drivers/clk/st/Makefile b/drivers/clk/st/Makefile new file mode 100644 index 000000000000..c7455ffdbdf7 --- /dev/null +++ b/drivers/clk/st/Makefile @@ -0,0 +1 @@ +obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c new file mode 100644 index 000000000000..4f53ee0778d9 --- /dev/null +++ b/drivers/clk/st/clkgen-fsyn.c @@ -0,0 +1,1039 @@ +/* + * Copyright (C) 2014 STMicroelectronics R&D Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +/* + * Maximum input clock to the PLL before we divide it down by 2 + * although in reality in actual systems this has never been seen to + * be used. + */ +#define QUADFS_NDIV_THRESHOLD 30000000 + +#define PLL_BW_GOODREF (0L) +#define PLL_BW_VBADREF (1L) +#define PLL_BW_BADREF (2L) +#define PLL_BW_VGOODREF (3L) + +#define QUADFS_MAX_CHAN 4 + +struct stm_fs { + unsigned long ndiv; + unsigned long mdiv; + unsigned long pe; + unsigned long sdiv; + unsigned long nsdiv; +}; + +static struct stm_fs fs216c65_rtbl[] = { + { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 312.5 Khz */ + { .mdiv = 0x17, .pe = 0x25ed, .sdiv = 0x1, .nsdiv = 0 }, /* 27 MHz */ + { .mdiv = 0x1a, .pe = 0x7b36, .sdiv = 0x2, .nsdiv = 1 }, /* 36.87 MHz */ + { .mdiv = 0x13, .pe = 0x0, .sdiv = 0x2, .nsdiv = 1 }, /* 48 MHz */ + { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x1, .nsdiv = 1 }, /* 108 MHz */ +}; + +static struct stm_fs fs432c65_rtbl[] = { + { .mdiv = 0x1f, .pe = 0x0, .sdiv = 0x7, .nsdiv = 0 }, /* 625 Khz */ + { .mdiv = 0x11, .pe = 0x1c72, .sdiv = 0x2, .nsdiv = 1 }, /* 108 MHz */ + { .mdiv = 0x19, .pe = 0x121a, .sdiv = 0x0, .nsdiv = 1 }, /* 297 MHz */ +}; + +static struct stm_fs fs660c32_rtbl[] = { + { .mdiv = 0x01, .pe = 0x2aaa, .sdiv = 0x8, .nsdiv = 0 }, /* 600 KHz */ + { .mdiv = 0x02, .pe = 0x3d33, .sdiv = 0x0, .nsdiv = 0 }, /* 148.5 Mhz */ + { .mdiv = 0x13, .pe = 0x5bcc, .sdiv = 0x0, .nsdiv = 1 }, /* 297 Mhz */ + { .mdiv = 0x0e, .pe = 0x1025, .sdiv = 0x0, .nsdiv = 1 }, /* 333 Mhz */ + { .mdiv = 0x0b, .pe = 0x715f, .sdiv = 0x0, .nsdiv = 1 }, /* 350 Mhz */ +}; + +struct clkgen_quadfs_data { + bool reset_present; + bool bwfilter_present; + bool lockstatus_present; + bool nsdiv_present; + struct clkgen_field ndiv; + struct clkgen_field ref_bw; + struct clkgen_field nreset; + struct clkgen_field npda; + struct clkgen_field lock_status; + + struct clkgen_field nsb[QUADFS_MAX_CHAN]; + struct clkgen_field en[QUADFS_MAX_CHAN]; + struct clkgen_field mdiv[QUADFS_MAX_CHAN]; + struct clkgen_field pe[QUADFS_MAX_CHAN]; + struct clkgen_field sdiv[QUADFS_MAX_CHAN]; + struct clkgen_field nsdiv[QUADFS_MAX_CHAN]; + + const struct clk_ops *pll_ops; + struct stm_fs *rtbl; + u8 rtbl_cnt; + int (*get_rate)(unsigned long , struct stm_fs *, + unsigned long *); +}; + +static const struct clk_ops st_quadfs_pll_c65_ops; +static const struct clk_ops st_quadfs_pll_c32_ops; +static const struct clk_ops st_quadfs_fs216c65_ops; +static const struct clk_ops st_quadfs_fs432c65_ops; +static const struct clk_ops st_quadfs_fs660c32_ops; + +static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *, + unsigned long *); +static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *, + unsigned long *); +static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *, + unsigned long *); +/* + * Values for all of the standalone instances of this clock + * generator found in STiH415 and STiH416 SYSCFG register banks. Note + * that the individual channel standby control bits (nsb) are in the + * first register along with the PLL control bits. + */ +static struct clkgen_quadfs_data st_fs216c65_416 = { + /* 416 specific */ + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x1, 15), + .bwfilter_present = true, + .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), + .pe = { CLKGEN_FIELD(0x8, 0xffff, 0), + CLKGEN_FIELD(0x18, 0xffff, 0), + CLKGEN_FIELD(0x28, 0xffff, 0), + CLKGEN_FIELD(0x38, 0xffff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0), + CLKGEN_FIELD(0x1C, 0x7, 0), + CLKGEN_FIELD(0x2C, 0x7, 0), + CLKGEN_FIELD(0x3C, 0x7, 0) }, + .pll_ops = &st_quadfs_pll_c65_ops, + .rtbl = fs216c65_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs216c65_rtbl), + .get_rate = clk_fs216c65_get_rate, +}; + +static struct clkgen_quadfs_data st_fs432c65_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x1, 15), + .bwfilter_present = true, + .ref_bw = CLKGEN_FIELD(0x0, 0x3, 16), + .pe = { CLKGEN_FIELD(0x8, 0xffff, 0), + CLKGEN_FIELD(0x18, 0xffff, 0), + CLKGEN_FIELD(0x28, 0xffff, 0), + CLKGEN_FIELD(0x38, 0xffff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0x7, 0), + CLKGEN_FIELD(0x1C, 0x7, 0), + CLKGEN_FIELD(0x2C, 0x7, 0), + CLKGEN_FIELD(0x3C, 0x7, 0) }, + .pll_ops = &st_quadfs_pll_c65_ops, + .rtbl = fs432c65_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs432c65_rtbl), + .get_rate = clk_fs432c65_get_rate, +}; + +static struct clkgen_quadfs_data st_fs660c32_E_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x7, 15), + .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0), + CLKGEN_FIELD(0x18, 0x7fff, 0), + CLKGEN_FIELD(0x28, 0x7fff, 0), + CLKGEN_FIELD(0x38, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0), + CLKGEN_FIELD(0x1C, 0xf, 0), + CLKGEN_FIELD(0x2C, 0xf, 0), + CLKGEN_FIELD(0x3C, 0xf, 0) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0xAC, 0x1, 0), + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate, +}; + +static struct clkgen_quadfs_data st_fs660c32_F_416 = { + .npda = CLKGEN_FIELD(0x0, 0x1, 14), + .nsb = { CLKGEN_FIELD(0x0, 0x1, 10), + CLKGEN_FIELD(0x0, 0x1, 11), + CLKGEN_FIELD(0x0, 0x1, 12), + CLKGEN_FIELD(0x0, 0x1, 13) }, + .nsdiv_present = true, + .nsdiv = { CLKGEN_FIELD(0x0, 0x1, 18), + CLKGEN_FIELD(0x0, 0x1, 19), + CLKGEN_FIELD(0x0, 0x1, 20), + CLKGEN_FIELD(0x0, 0x1, 21) }, + .mdiv = { CLKGEN_FIELD(0x4, 0x1f, 0), + CLKGEN_FIELD(0x14, 0x1f, 0), + CLKGEN_FIELD(0x24, 0x1f, 0), + CLKGEN_FIELD(0x34, 0x1f, 0) }, + .en = { CLKGEN_FIELD(0x10, 0x1, 0), + CLKGEN_FIELD(0x20, 0x1, 0), + CLKGEN_FIELD(0x30, 0x1, 0), + CLKGEN_FIELD(0x40, 0x1, 0) }, + .ndiv = CLKGEN_FIELD(0x0, 0x7, 15), + .pe = { CLKGEN_FIELD(0x8, 0x7fff, 0), + CLKGEN_FIELD(0x18, 0x7fff, 0), + CLKGEN_FIELD(0x28, 0x7fff, 0), + CLKGEN_FIELD(0x38, 0x7fff, 0) }, + .sdiv = { CLKGEN_FIELD(0xC, 0xf, 0), + CLKGEN_FIELD(0x1C, 0xf, 0), + CLKGEN_FIELD(0x2C, 0xf, 0), + CLKGEN_FIELD(0x3C, 0xf, 0) }, + .lockstatus_present = true, + .lock_status = CLKGEN_FIELD(0xEC, 0x1, 0), + .pll_ops = &st_quadfs_pll_c32_ops, + .rtbl = fs660c32_rtbl, + .rtbl_cnt = ARRAY_SIZE(fs660c32_rtbl), + .get_rate = clk_fs660c32_dig_get_rate, +}; + +/** + * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control the Fsyn + * rate - inherits rate from parent. set_rate/round_rate/recalc_rate + * parent - fixed parent. No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of + * its parent clock, found inside a type of + * ST quad channel frequency synthesizer block + * + * @hw: handle between common and hardware-specific interfaces. + * @ndiv: regmap field for the ndiv control. + * @regs_base: base address of the configuration registers. + * @lock: spinlock. + * + */ +struct st_clk_quadfs_pll { + struct clk_hw hw; + void __iomem *regs_base; + spinlock_t *lock; + struct clkgen_quadfs_data *data; + u32 ndiv; +}; + +#define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw) + +static int quadfs_pll_enable(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10); + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + /* + * Bring block out of reset if we have reset control. + */ + if (pll->data->reset_present) + CLKGEN_WRITE(pll, nreset, 1); + + /* + * Use a fixed input clock noise bandwidth filter for the moment + */ + if (pll->data->bwfilter_present) + CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF); + + + CLKGEN_WRITE(pll, ndiv, pll->ndiv); + + /* + * Power up the PLL + */ + CLKGEN_WRITE(pll, npda, 1); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + if (pll->data->lockstatus_present) + while (!CLKGEN_READ(pll, lock_status)) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cpu_relax(); + } + + return 0; +} + +static void quadfs_pll_disable(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long flags = 0; + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + /* + * Powerdown the PLL and then put block into soft reset if we have + * reset control. + */ + CLKGEN_WRITE(pll, npda, 0); + + if (pll->data->reset_present) + CLKGEN_WRITE(pll, nreset, 0); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); +} + +static int quadfs_pll_is_enabled(struct clk_hw *hw) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + u32 npda = CLKGEN_READ(pll, npda); + + return !!npda; +} + +int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, + unsigned long *rate) +{ + unsigned long nd = fs->ndiv + 16; /* ndiv value */ + + *rate = input * nd; + + return 0; +} + +static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + unsigned long rate = 0; + struct stm_fs params; + + params.ndiv = CLKGEN_READ(pll, ndiv); + if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) + pr_err("%s:%s error calculating rate\n", + __clk_get_name(hw->clk), __func__); + + pll->ndiv = params.ndiv; + + return rate; +} + +int clk_fs660c32_vco_get_params(unsigned long input, + unsigned long output, struct stm_fs *fs) +{ +/* Formula + VCO frequency = (fin x ndiv) / pdiv + ndiv = VCOfreq * pdiv / fin + */ + unsigned long pdiv = 1, n; + + /* Output clock range: 384Mhz to 660Mhz */ + if (output < 384000000 || output > 660000000) + return -EINVAL; + + if (input > 40000000) + /* This means that PDIV would be 2 instead of 1. + Not supported today. */ + return -EINVAL; + + input /= 1000; + output /= 1000; + + n = output * pdiv / input; + if (n < 16) + n = 16; + fs->ndiv = n - 16; /* Converting formula value to reg value */ + + return 0; +} + +static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, unsigned long rate + , unsigned long *prate) +{ + struct stm_fs params; + + if (!clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) + clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); + + pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", + __func__, __clk_get_name(hw->clk), + rate, (unsigned int)params.sdiv, + (unsigned int)params.mdiv, + (unsigned int)params.pe, (unsigned int)params.nsdiv); + + return rate; +} + +static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); + struct stm_fs params; + long hwrate = 0; + unsigned long flags = 0; + + if (!rate || !parent_rate) + return -EINVAL; + + if (!clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms)) + clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); + + pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", + __func__, __clk_get_name(hw->clk), + hwrate, (unsigned int)params.ndiv); + + if (!hwrate) + return -EINVAL; + + pll->ndiv = params.ndiv; + + if (pll->lock) + spin_lock_irqsave(pll->lock, flags); + + CLKGEN_WRITE(pll, ndiv, pll->ndiv); + + if (pll->lock) + spin_unlock_irqrestore(pll->lock, flags); + + return 0; +} + +static const struct clk_ops st_quadfs_pll_c65_ops = { + .enable = quadfs_pll_enable, + .disable = quadfs_pll_disable, + .is_enabled = quadfs_pll_is_enabled, +}; + +static const struct clk_ops st_quadfs_pll_c32_ops = { + .enable = quadfs_pll_enable, + .disable = quadfs_pll_disable, + .is_enabled = quadfs_pll_is_enabled, + .recalc_rate = quadfs_pll_fs660c32_recalc_rate, + .round_rate = quadfs_pll_fs660c32_round_rate, + .set_rate = quadfs_pll_fs660c32_set_rate, +}; + +static struct clk * __init st_clk_register_quadfs_pll( + const char *name, const char *parent_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, + spinlock_t *lock) +{ + struct st_clk_quadfs_pll *pll; + struct clk *clk; + struct clk_init_data init; + + /* + * Sanity check required pointers. + */ + if (WARN_ON(!name || !parent_name)) + return ERR_PTR(-EINVAL); + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = quadfs->pll_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + pll->data = quadfs; + pll->regs_base = reg; + pll->lock = lock; + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} + +/** + * DOC: A digital frequency synthesizer + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional + * rate - set rate is functional + * parent - fixed parent. No clk_set_parent support + */ + +/** + * struct st_clk_quadfs_fsynth - One clock output from a four channel digital + * frequency synthesizer (fsynth) block. + * + * @hw: handle between common and hardware-specific interfaces + * + * @nsb: regmap field in the output control register for the digital + * standby of this fsynth channel. This control is active low so + * the channel is in standby when the control bit is cleared. + * + * @nsdiv: regmap field in the output control register for + * for the optional divide by 3 of this fsynth channel. This control + * is active low so the divide by 3 is active when the control bit is + * cleared and the divide is bypassed when the bit is set. + */ +struct st_clk_quadfs_fsynth { + struct clk_hw hw; + void __iomem *regs_base; + spinlock_t *lock; + struct clkgen_quadfs_data *data; + + u32 chan; + /* + * Cached hardware values from set_rate so we can program the + * hardware in enable. There are two reasons for this: + * + * 1. The registers may not be writable until the parent has been + * enabled. + * + * 2. It restores the clock rate when a driver does an enable + * on PM restore, after a suspend to RAM has lost the hardware + * setup. + */ + u32 md; + u32 pe; + u32 sdiv; + u32 nsdiv; +}; + +#define to_quadfs_fsynth(_hw) \ + container_of(_hw, struct st_clk_quadfs_fsynth, hw) + +static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs) +{ + /* + * Pulse the program enable register lsb to make the hardware take + * notice of the new md/pe values with a glitchless transition. + */ + CLKGEN_WRITE(fs, en[fs->chan], 1); + CLKGEN_WRITE(fs, en[fs->chan], 0); +} + +static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs) +{ + unsigned long flags = 0; + + /* + * Ensure the md/pe parameters are ignored while we are + * reprogramming them so we can get a glitchless change + * when fine tuning the speed of a running clock. + */ + CLKGEN_WRITE(fs, en[fs->chan], 0); + + CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); + CLKGEN_WRITE(fs, pe[fs->chan], fs->pe); + CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + if (fs->data->nsdiv_present) + CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_enable(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long flags = 0; + + pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + + quadfs_fsynth_program_rate(fs); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + CLKGEN_WRITE(fs, nsb[fs->chan], 1); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); + + quadfs_fsynth_program_enable(fs); + + return 0; +} + +static void quadfs_fsynth_disable(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long flags = 0; + + pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk)); + + if (fs->lock) + spin_lock_irqsave(fs->lock, flags); + + CLKGEN_WRITE(fs, nsb[fs->chan], 0); + + if (fs->lock) + spin_unlock_irqrestore(fs->lock, flags); +} + +static int quadfs_fsynth_is_enabled(struct clk_hw *hw) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); + + pr_debug("%s: %s enable bit = 0x%x\n", + __func__, __clk_get_name(hw->clk), nsb); + + return !!nsb; +} + +#define P15 (uint64_t)(1 << 15) + +static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs, + unsigned long *rate) +{ + uint64_t res; + unsigned long ns; + unsigned long nd = 8; /* ndiv stuck at 0 => val = 8 */ + unsigned long s; + long m; + + m = fs->mdiv - 32; + s = 1 << (fs->sdiv + 1); + ns = (fs->nsdiv ? 1 : 3); + + res = (uint64_t)(s * ns * P15 * (uint64_t)(m + 33)); + res = res - (s * ns * fs->pe); + *rate = div64_u64(P15 * nd * input * 32, res); + + return 0; +} + +static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs, + unsigned long *rate) +{ + uint64_t res; + unsigned long nd = 16; /* ndiv value; stuck at 0 (30Mhz input) */ + long m; + unsigned long sd; + unsigned long ns; + + m = fs->mdiv - 32; + sd = 1 << (fs->sdiv + 1); + ns = (fs->nsdiv ? 1 : 3); + + res = (uint64_t)(sd * ns * P15 * (uint64_t)(m + 33)); + res = res - (sd * ns * fs->pe); + *rate = div64_u64(P15 * nd * input * 32, res); + + return 0; +} + +#define P20 (uint64_t)(1 << 20) + +static int clk_fs660c32_dig_get_rate(unsigned long input, + struct stm_fs *fs, unsigned long *rate) +{ + unsigned long s = (1 << fs->sdiv); + unsigned long ns; + uint64_t res; + + /* + * 'nsdiv' is a register value ('BIN') which is translated + * to a decimal value according to following rules. + * + * nsdiv ns.dec + * 0 3 + * 1 1 + */ + ns = (fs->nsdiv == 1) ? 1 : 3; + + res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns; + *rate = (unsigned long)div64_u64(input * P20 * 32, res); + + return 0; +} + +static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs, + struct stm_fs *params) +{ + /* + * Get the initial hardware values for recalc_rate + */ + params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]); + params->pe = CLKGEN_READ(fs, pe[fs->chan]); + params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]); + + if (fs->data->nsdiv_present) + params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]); + else + params->nsdiv = 1; + + /* + * If All are NULL then assume no clock rate is programmed. + */ + if (!params->mdiv && !params->pe && !params->sdiv) + return 1; + + fs->md = params->mdiv; + fs->pe = params->pe; + fs->sdiv = params->sdiv; + fs->nsdiv = params->nsdiv; + + return 0; +} + +static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate, struct stm_fs *params) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + int (*clk_fs_get_rate)(unsigned long , + struct stm_fs *, unsigned long *); + struct stm_fs prev_params; + unsigned long prev_rate, rate = 0; + unsigned long diff_rate, prev_diff_rate = ~0; + int index; + + clk_fs_get_rate = fs->data->get_rate; + + for (index = 0; index < fs->data->rtbl_cnt; index++) { + prev_rate = rate; + + *params = fs->data->rtbl[index]; + prev_params = *params; + + clk_fs_get_rate(prate, &fs->data->rtbl[index], &rate); + + diff_rate = abs(drate - rate); + + if (diff_rate > prev_diff_rate) { + rate = prev_rate; + *params = prev_params; + break; + } + + prev_diff_rate = diff_rate; + + if (drate == rate) + return rate; + } + + + if (index == fs->data->rtbl_cnt) + *params = prev_params; + + return rate; +} + +static unsigned long quadfs_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + unsigned long rate = 0; + struct stm_fs params; + int (*clk_fs_get_rate)(unsigned long , + struct stm_fs *, unsigned long *); + + clk_fs_get_rate = fs->data->get_rate; + + if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms)) + return 0; + + if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { + pr_err("%s:%s error calculating rate\n", + __clk_get_name(hw->clk), __func__); + } + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct stm_fs params; + + rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms); + + pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", + __func__, __clk_get_name(hw->clk), + rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, + (unsigned int)params.pe, (unsigned int)params.nsdiv); + + return rate; +} + + +static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs, + struct stm_fs *params) +{ + fs->md = params->mdiv; + fs->pe = params->pe; + fs->sdiv = params->sdiv; + fs->nsdiv = params->nsdiv; + + /* + * In some integrations you can only change the fsynth programming when + * the parent entity containing it is enabled. + */ + quadfs_fsynth_program_rate(fs); + quadfs_fsynth_program_enable(fs); +} + +static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); + struct stm_fs params; + long hwrate; + int uninitialized_var(i); + + if (!rate || !parent_rate) + return -EINVAL; + + memset(¶ms, 0, sizeof(struct stm_fs)); + + hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms); + if (!hwrate) + return -EINVAL; + + quadfs_program_and_enable(fs, ¶ms); + + return 0; +} + + + +static const struct clk_ops st_quadfs_ops = { + .enable = quadfs_fsynth_enable, + .disable = quadfs_fsynth_disable, + .is_enabled = quadfs_fsynth_is_enabled, + .round_rate = quadfs_round_rate, + .set_rate = quadfs_set_rate, + .recalc_rate = quadfs_recalc_rate, +}; + +static struct clk * __init st_clk_register_quadfs_fsynth( + const char *name, const char *parent_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, + spinlock_t *lock) +{ + struct st_clk_quadfs_fsynth *fs; + struct clk *clk; + struct clk_init_data init; + + /* + * Sanity check required pointers, note that nsdiv3 is optional. + */ + if (WARN_ON(!name || !parent_name)) + return ERR_PTR(-EINVAL); + + fs = kzalloc(sizeof(*fs), GFP_KERNEL); + if (!fs) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &st_quadfs_ops; + init.flags = CLK_GET_RATE_NOCACHE | CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + fs->data = quadfs; + fs->regs_base = reg; + fs->chan = chan; + fs->lock = lock; + fs->hw.init = &init; + + clk = clk_register(NULL, &fs->hw); + + if (IS_ERR(clk)) + kfree(fs); + + return clk; +} + +static struct of_device_id quadfs_of_match[] = { + { + .compatible = "st,stih416-quadfs216", + .data = (void *)&st_fs216c65_416 + }, + { + .compatible = "st,stih416-quadfs432", + .data = (void *)&st_fs432c65_416 + }, + { + .compatible = "st,stih416-quadfs660-E", + .data = (void *)&st_fs660c32_E_416 + }, + { + .compatible = "st,stih416-quadfs660-F", + .data = (void *)&st_fs660c32_F_416 + }, + {} +}; + +static void __init st_of_create_quadfs_fsynths( + struct device_node *np, const char *pll_name, + struct clkgen_quadfs_data *quadfs, void __iomem *reg, + spinlock_t *lock) +{ + struct clk_onecell_data *clk_data; + int fschan; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = QUADFS_MAX_CHAN; + clk_data->clks = kzalloc(QUADFS_MAX_CHAN * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) { + kfree(clk_data); + return; + } + + for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + fschan, &clk_name)) { + break; + } + + /* + * If we read an empty clock name then the channel is unused + */ + if (*clk_name == '\0') + continue; + + clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, + quadfs, reg, fschan, lock); + + /* + * If there was an error registering this clock output, clean + * up and move on to the next one. + */ + if (!IS_ERR(clk)) { + clk_data->clks[fschan] = clk; + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + } + } + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); +} + +static void __init st_of_quadfs_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *pll_name, *clk_parent_name; + void __iomem *reg; + spinlock_t *lock; + + match = of_match_node(quadfs_of_match, np); + if (WARN_ON(!match)) + return; + + reg = of_iomap(np, 0); + if (!reg) + return; + + clk_parent_name = of_clk_get_parent_name(np, 0); + if (!clk_parent_name) + return; + + pll_name = kasprintf(GFP_KERNEL, "%s.pll", np->name); + if (!pll_name) + return; + + lock = kzalloc(sizeof(*lock), GFP_KERNEL); + if (!lock) + goto err_exit; + + spin_lock_init(lock); + + clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, + (struct clkgen_quadfs_data *) match->data, reg, lock); + if (IS_ERR(clk)) + goto err_exit; + else + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + st_of_create_quadfs_fsynths(np, pll_name, + (struct clkgen_quadfs_data *)match->data, + reg, lock); + +err_exit: + kfree(pll_name); /* No longer need local copy of the PLL name */ +} +CLK_OF_DECLARE(quadfs, "st,quadfs", st_of_quadfs_setup); diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c new file mode 100644 index 000000000000..a329906d1e81 --- /dev/null +++ b/drivers/clk/st/clkgen-mux.c @@ -0,0 +1,820 @@ +/* + * clkgen-mux.c: ST GEN-MUX Clock driver + * + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * Authors: Stephen Gallimore <stephen.gallimore@st.com> + * Pankaj Dev <pankaj.dev@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +static DEFINE_SPINLOCK(clkgena_divmux_lock); +static DEFINE_SPINLOCK(clkgenf_lock); + +static const char ** __init clkgen_mux_get_parents(struct device_node *np, + int *num_parents) +{ + const char **parents; + int nparents, i; + + nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (WARN_ON(nparents <= 0)) + return ERR_PTR(-EINVAL); + + parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL); + if (!parents) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nparents; i++) + parents[i] = of_clk_get_parent_name(np, i); + + *num_parents = nparents; + return parents; +} + +/** + * DOC: Clock mux with a programmable divider on each of its three inputs. + * The mux has an input setting which effectively gates its output. + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable and clk_disable are functional & control gating + * rate - set rate is supported + * parent - set/get parent + */ + +#define NUM_INPUTS 3 + +struct clkgena_divmux { + struct clk_hw hw; + /* Subclassed mux and divider structures */ + struct clk_mux mux; + struct clk_divider div[NUM_INPUTS]; + /* Enable/running feedback register bits for each input */ + void __iomem *feedback_reg[NUM_INPUTS]; + int feedback_bit_idx; + + u8 muxsel; +}; + +#define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw) + +struct clkgena_divmux_data { + int num_outputs; + int mux_offset; + int mux_offset2; + int mux_start_bit; + int div_offsets[NUM_INPUTS]; + int fb_offsets[NUM_INPUTS]; + int fb_start_bit_idx; +}; + +#define CKGAX_CLKOPSRC_SWITCH_OFF 0x3 + +static int clkgena_divmux_is_running(struct clkgena_divmux *mux) +{ + u32 regval = readl(mux->feedback_reg[mux->muxsel]); + u32 running = regval & BIT(mux->feedback_bit_idx); + return !!running; +} + +static int clkgena_divmux_enable(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + unsigned long timeout; + int ret = 0; + + mux_hw->clk = hw->clk; + + ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); + if (ret) + return ret; + + timeout = jiffies + msecs_to_jiffies(10); + + while (!clkgena_divmux_is_running(genamux)) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cpu_relax(); + } + + return 0; +} + +static void clkgena_divmux_disable(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + mux_hw->clk = hw->clk; + + clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); +} + +static int clkgena_divmux_is_enabled(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + mux_hw->clk = hw->clk; + + return (s8)clk_mux_ops.get_parent(mux_hw) > 0; +} + +u8 clkgena_divmux_get_parent(struct clk_hw *hw) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *mux_hw = &genamux->mux.hw; + + mux_hw->clk = hw->clk; + + genamux->muxsel = clk_mux_ops.get_parent(mux_hw); + if ((s8)genamux->muxsel < 0) { + pr_debug("%s: %s: Invalid parent, setting to default.\n", + __func__, __clk_get_name(hw->clk)); + genamux->muxsel = 0; + } + + return genamux->muxsel; +} + +static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + + if (index >= CKGAX_CLKOPSRC_SWITCH_OFF) + return -EINVAL; + + genamux->muxsel = index; + + /* + * If the mux is already enabled, call enable directly to set the + * new mux position and wait for it to start running again. Otherwise + * do nothing. + */ + if (clkgena_divmux_is_enabled(hw)) + clkgena_divmux_enable(hw); + + return 0; +} + +unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + div_hw->clk = hw->clk; + + return clk_divider_ops.recalc_rate(div_hw, parent_rate); +} + +static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + div_hw->clk = hw->clk; + + return clk_divider_ops.set_rate(div_hw, rate, parent_rate); +} + +static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clkgena_divmux *genamux = to_clkgena_divmux(hw); + struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; + + div_hw->clk = hw->clk; + + return clk_divider_ops.round_rate(div_hw, rate, prate); +} + +static const struct clk_ops clkgena_divmux_ops = { + .enable = clkgena_divmux_enable, + .disable = clkgena_divmux_disable, + .is_enabled = clkgena_divmux_is_enabled, + .get_parent = clkgena_divmux_get_parent, + .set_parent = clkgena_divmux_set_parent, + .round_rate = clkgena_divmux_round_rate, + .recalc_rate = clkgena_divmux_recalc_rate, + .set_rate = clkgena_divmux_set_rate, +}; + +/** + * clk_register_genamux - register a genamux clock with the clock framework + */ +struct clk *clk_register_genamux(const char *name, + const char **parent_names, u8 num_parents, + void __iomem *reg, + const struct clkgena_divmux_data *muxdata, + u32 idx) +{ + /* + * Fixed constants across all ClockgenA variants + */ + const int mux_width = 2; + const int divider_width = 5; + struct clkgena_divmux *genamux; + struct clk *clk; + struct clk_init_data init; + int i; + + genamux = kzalloc(sizeof(*genamux), GFP_KERNEL); + if (!genamux) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &clkgena_divmux_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = parent_names; + init.num_parents = num_parents; + + genamux->mux.lock = &clkgena_divmux_lock; + genamux->mux.mask = BIT(mux_width) - 1; + genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width); + if (genamux->mux.shift > 31) { + /* + * We have spilled into the second mux register so + * adjust the register address and the bit shift accordingly + */ + genamux->mux.reg = reg + muxdata->mux_offset2; + genamux->mux.shift -= 32; + } else { + genamux->mux.reg = reg + muxdata->mux_offset; + } + + for (i = 0; i < NUM_INPUTS; i++) { + /* + * Divider config for each input + */ + void __iomem *divbase = reg + muxdata->div_offsets[i]; + genamux->div[i].width = divider_width; + genamux->div[i].reg = divbase + (idx * sizeof(u32)); + + /* + * Mux enabled/running feedback register for each input. + */ + genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i]; + } + + genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx; + genamux->hw.init = &init; + + clk = clk_register(NULL, &genamux->hw); + if (IS_ERR(clk)) { + kfree(genamux); + goto err; + } + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); +err: + return clk; +} + +static struct clkgena_divmux_data st_divmux_c65hs = { + .num_outputs = 4, + .mux_offset = 0x14, + .mux_start_bit = 0, + .div_offsets = { 0x800, 0x900, 0xb00 }, + .fb_offsets = { 0x18, 0x1c, 0x20 }, + .fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c65ls = { + .num_outputs = 14, + .mux_offset = 0x14, + .mux_offset2 = 0x24, + .mux_start_bit = 8, + .div_offsets = { 0x810, 0xa10, 0xb10 }, + .fb_offsets = { 0x18, 0x1c, 0x20 }, + .fb_start_bit_idx = 4, +}; + +static struct clkgena_divmux_data st_divmux_c32odf0 = { + .num_outputs = 8, + .mux_offset = 0x1c, + .mux_start_bit = 0, + .div_offsets = { 0x800, 0x900, 0xa60 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 0, +}; + +static struct clkgena_divmux_data st_divmux_c32odf1 = { + .num_outputs = 8, + .mux_offset = 0x1c, + .mux_start_bit = 16, + .div_offsets = { 0x820, 0x980, 0xa80 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 8, +}; + +static struct clkgena_divmux_data st_divmux_c32odf2 = { + .num_outputs = 8, + .mux_offset = 0x20, + .mux_start_bit = 0, + .div_offsets = { 0x840, 0xa20, 0xb10 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 16, +}; + +static struct clkgena_divmux_data st_divmux_c32odf3 = { + .num_outputs = 8, + .mux_offset = 0x20, + .mux_start_bit = 16, + .div_offsets = { 0x860, 0xa40, 0xb30 }, + .fb_offsets = { 0x2c, 0x24, 0x28 }, + .fb_start_bit_idx = 24, +}; + +static struct of_device_id clkgena_divmux_of_match[] = { + { + .compatible = "st,clkgena-divmux-c65-hs", + .data = &st_divmux_c65hs, + }, + { + .compatible = "st,clkgena-divmux-c65-ls", + .data = &st_divmux_c65ls, + }, + { + .compatible = "st,clkgena-divmux-c32-odf0", + .data = &st_divmux_c32odf0, + }, + { + .compatible = "st,clkgena-divmux-c32-odf1", + .data = &st_divmux_c32odf1, + }, + { + .compatible = "st,clkgena-divmux-c32-odf2", + .data = &st_divmux_c32odf2, + }, + { + .compatible = "st,clkgena-divmux-c32-odf3", + .data = &st_divmux_c32odf3, + }, + {} +}; + +static void __iomem * __init clkgen_get_register_base( + struct device_node *np) +{ + struct device_node *pnode; + void __iomem *reg = NULL; + + pnode = of_get_parent(np); + if (!pnode) + return NULL; + + reg = of_iomap(pnode, 0); + + of_node_put(pnode); + return reg; +} + +void __init st_of_clkgena_divmux_setup(struct device_node *np) +{ + const struct of_device_id *match; + const struct clkgena_divmux_data *data; + struct clk_onecell_data *clk_data; + void __iomem *reg; + const char **parents; + int num_parents = 0, i; + + match = of_match_node(clkgena_divmux_of_match, np); + if (WARN_ON(!match)) + return; + + data = (struct clkgena_divmux_data *)match->data; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + goto err; + + clk_data->clk_num = data->num_outputs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (i = 0; i < clk_data->clk_num; i++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + i, &clk_name)) + break; + + /* + * If we read an empty clock name then the output is unused + */ + if (*clk_name == '\0') + continue; + + clk = clk_register_genamux(clk_name, parents, num_parents, + reg, data, i); + + if (IS_ERR(clk)) + goto err; + + clk_data->clks[i] = clk; + } + + kfree(parents); + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; +err: + if (clk_data) + kfree(clk_data->clks); + + kfree(clk_data); + kfree(parents); +} +CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup); + +struct clkgena_prediv_data { + u32 offset; + u8 shift; + struct clk_div_table *table; +}; + +static struct clk_div_table prediv_table16[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 16 }, + { .div = 0 }, +}; + +static struct clkgena_prediv_data prediv_c65_data = { + .offset = 0x4c, + .shift = 31, + .table = prediv_table16, +}; + +static struct clkgena_prediv_data prediv_c32_data = { + .offset = 0x50, + .shift = 1, + .table = prediv_table16, +}; + +static struct of_device_id clkgena_prediv_of_match[] = { + { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data }, + { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data }, + {} +}; + +void __init st_of_clkgena_prediv_setup(struct device_node *np) +{ + const struct of_device_id *match; + void __iomem *reg; + const char *parent_name, *clk_name; + struct clk *clk; + struct clkgena_prediv_data *data; + + match = of_match_node(clkgena_prediv_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgena_prediv_data *)match->data; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + return; + + clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, + reg + data->offset, data->shift, 1, + 0, data->table, NULL); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + return; +} +CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup); + +struct clkgen_mux_data { + u32 offset; + u8 shift; + u8 width; + spinlock_t *lock; + unsigned long clk_flags; + u8 mux_flags; +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = { + .offset = 0, + .shift = 0, + .width = 1, +}; + +static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = { + .offset = 0, + .shift = 16, + .width = 1, + .lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = { + .offset = 0, + .shift = 17, + .width = 1, + .lock = &clkgenf_lock, +}; + +static struct clkgen_mux_data stih415_a9_mux_data = { + .offset = 0, + .shift = 1, + .width = 2, +}; +static struct clkgen_mux_data stih416_a9_mux_data = { + .offset = 0, + .shift = 0, + .width = 2, +}; + +static struct of_device_id mux_of_match[] = { + { + .compatible = "st,stih416-clkgenc-vcc-hd", + .data = &clkgen_mux_c_vcc_hd_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-fvdp", + .data = &clkgen_mux_f_vcc_fvdp_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-hva", + .data = &clkgen_mux_f_vcc_hva_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-hd", + .data = &clkgen_mux_f_vcc_hd_416, + }, + { + .compatible = "st,stih416-clkgenf-vcc-sd", + .data = &clkgen_mux_c_vcc_sd_416, + }, + { + .compatible = "st,stih415-clkgen-a9-mux", + .data = &stih415_a9_mux_data, + }, + { + .compatible = "st,stih416-clkgen-a9-mux", + .data = &stih416_a9_mux_data, + }, + {} +}; + +void __init st_of_clkgen_mux_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + void __iomem *reg; + const char **parents; + int num_parents; + struct clkgen_mux_data *data; + + match = of_match_node(mux_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_mux_data *)match->data; + + reg = of_iomap(np, 0); + if (!reg) { + pr_err("%s: Failed to get base address\n", __func__); + return; + } + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) { + pr_err("%s: Failed to get parents (%ld)\n", + __func__, PTR_ERR(parents)); + return; + } + + clk = clk_register_mux(NULL, np->name, parents, num_parents, + data->clk_flags | CLK_SET_RATE_PARENT, + reg + data->offset, + data->shift, data->width, data->mux_flags, + data->lock); + if (IS_ERR(clk)) + goto err; + + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + +err: + kfree(parents); + + return; +} +CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup); + +#define VCC_MAX_CHANNELS 16 + +#define VCC_GATE_OFFSET 0x0 +#define VCC_MUX_OFFSET 0x4 +#define VCC_DIV_OFFSET 0x8 + +struct clkgen_vcc_data { + spinlock_t *lock; + unsigned long clk_flags; +}; + +static struct clkgen_vcc_data st_clkgenc_vcc_416 = { + .clk_flags = CLK_SET_RATE_PARENT, +}; + +static struct clkgen_vcc_data st_clkgenf_vcc_416 = { + .lock = &clkgenf_lock, +}; + +static struct of_device_id vcc_of_match[] = { + { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 }, + { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 }, + {} +}; + +void __init st_of_clkgen_vcc_setup(struct device_node *np) +{ + const struct of_device_id *match; + void __iomem *reg; + const char **parents; + int num_parents, i; + struct clk_onecell_data *clk_data; + struct clkgen_vcc_data *data; + + match = of_match_node(vcc_of_match, np); + if (WARN_ON(!match)) + return; + data = (struct clkgen_vcc_data *)match->data; + + reg = of_iomap(np, 0); + if (!reg) + return; + + parents = clkgen_mux_get_parents(np, &num_parents); + if (IS_ERR(parents)) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + goto err; + + clk_data->clk_num = VCC_MAX_CHANNELS; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (i = 0; i < clk_data->clk_num; i++) { + struct clk *clk; + const char *clk_name; + struct clk_gate *gate; + struct clk_divider *div; + struct clk_mux *mux; + + if (of_property_read_string_index(np, "clock-output-names", + i, &clk_name)) + break; + + /* + * If we read an empty clock name then the output is unused + */ + if (*clk_name == '\0') + continue; + + gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); + if (!gate) + break; + + div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); + if (!div) { + kfree(gate); + break; + } + + mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); + if (!mux) { + kfree(gate); + kfree(div); + break; + } + + gate->reg = reg + VCC_GATE_OFFSET; + gate->bit_idx = i; + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->lock = data->lock; + + div->reg = reg + VCC_DIV_OFFSET; + div->shift = 2 * i; + div->width = 2; + div->flags = CLK_DIVIDER_POWER_OF_TWO; + + mux->reg = reg + VCC_MUX_OFFSET; + mux->shift = 2 * i; + mux->mask = 0x3; + + clk = clk_register_composite(NULL, clk_name, parents, + num_parents, + &mux->hw, &clk_mux_ops, + &div->hw, &clk_divider_ops, + &gate->hw, &clk_gate_ops, + data->clk_flags); + if (IS_ERR(clk)) { + kfree(gate); + kfree(div); + kfree(mux); + goto err; + } + + pr_debug("%s: parent %s rate %u\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + (unsigned int)clk_get_rate(clk)); + + clk_data->clks[i] = clk; + } + + kfree(parents); + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + for (i = 0; i < clk_data->clk_num; i++) { + struct clk_composite *composite; + + if (!clk_data->clks[i]) + continue; + + composite = container_of(__clk_get_hw(clk_data->clks[i]), + struct clk_composite, hw); + kfree(container_of(composite->gate_hw, struct clk_gate, hw)); + kfree(container_of(composite->rate_hw, struct clk_divider, hw)); + kfree(container_of(composite->mux_hw, struct clk_mux, hw)); + } + + if (clk_data) + kfree(clk_data->clks); + + kfree(clk_data); + kfree(parents); +} +CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c new file mode 100644 index 000000000000..bca0a0badbfa --- /dev/null +++ b/drivers/clk/st/clkgen-pll.c @@ -0,0 +1,698 @@ +/* + * Copyright (C) 2014 STMicroelectronics (R&D) Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +/* + * Authors: + * Stephen Gallimore <stephen.gallimore@st.com>, + * Pankaj Dev <pankaj.dev@st.com>. + */ + +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/clk-provider.h> + +#include "clkgen.h" + +static DEFINE_SPINLOCK(clkgena_c32_odf_lock); + +/* + * Common PLL configuration register bits for PLL800 and PLL1600 C65 + */ +#define C65_MDIV_PLL800_MASK (0xff) +#define C65_MDIV_PLL1600_MASK (0x7) +#define C65_NDIV_MASK (0xff) +#define C65_PDIV_MASK (0x7) + +/* + * PLL configuration register bits for PLL3200 C32 + */ +#define C32_NDIV_MASK (0xff) +#define C32_IDF_MASK (0x7) +#define C32_ODF_MASK (0x3f) +#define C32_LDF_MASK (0x7f) + +#define C32_MAX_ODFS (4) + +struct clkgen_pll_data { + struct clkgen_field pdn_status; + struct clkgen_field locked_status; + struct clkgen_field mdiv; + struct clkgen_field ndiv; + struct clkgen_field pdiv; + struct clkgen_field idf; + struct clkgen_field ldf; + unsigned int num_odfs; + struct clkgen_field odf[C32_MAX_ODFS]; + struct clkgen_field odf_gate[C32_MAX_ODFS]; + const struct clk_ops *ops; +}; + +static const struct clk_ops st_pll1600c65_ops; +static const struct clk_ops st_pll800c65_ops; +static const struct clk_ops stm_pll3200c32_ops; +static const struct clk_ops st_pll1200c32_ops; + +static struct clkgen_pll_data st_pll1600c65_ax = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19), + .locked_status = CLKGEN_FIELD(0x0, 0x1, 31), + .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK, 0), + .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8), + .ops = &st_pll1600c65_ops +}; + +static struct clkgen_pll_data st_pll800c65_ax = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 19), + .locked_status = CLKGEN_FIELD(0x0, 0x1, 31), + .mdiv = CLKGEN_FIELD(0x0, C65_MDIV_PLL800_MASK, 0), + .ndiv = CLKGEN_FIELD(0x0, C65_NDIV_MASK, 8), + .pdiv = CLKGEN_FIELD(0x0, C65_PDIV_MASK, 16), + .ops = &st_pll800c65_ops +}; + +static struct clkgen_pll_data st_pll3200c32_a1x_0 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 31), + .locked_status = CLKGEN_FIELD(0x4, 0x1, 31), + .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 0x0), + .idf = CLKGEN_FIELD(0x4, C32_IDF_MASK, 0x0), + .num_odfs = 4, + .odf = { CLKGEN_FIELD(0x54, C32_ODF_MASK, 4), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 10), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 16), + CLKGEN_FIELD(0x54, C32_ODF_MASK, 22) }, + .odf_gate = { CLKGEN_FIELD(0x54, 0x1, 0), + CLKGEN_FIELD(0x54, 0x1, 1), + CLKGEN_FIELD(0x54, 0x1, 2), + CLKGEN_FIELD(0x54, 0x1, 3) }, + .ops = &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_a1x_1 = { + .pdn_status = CLKGEN_FIELD(0xC, 0x1, 31), + .locked_status = CLKGEN_FIELD(0x10, 0x1, 31), + .ndiv = CLKGEN_FIELD(0xC, C32_NDIV_MASK, 0x0), + .idf = CLKGEN_FIELD(0x10, C32_IDF_MASK, 0x0), + .num_odfs = 4, + .odf = { CLKGEN_FIELD(0x58, C32_ODF_MASK, 4), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 10), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 16), + CLKGEN_FIELD(0x58, C32_ODF_MASK, 22) }, + .odf_gate = { CLKGEN_FIELD(0x58, 0x1, 0), + CLKGEN_FIELD(0x58, 0x1, 1), + CLKGEN_FIELD(0x58, 0x1, 2), + CLKGEN_FIELD(0x58, 0x1, 3) }, + .ops = &stm_pll3200c32_ops, +}; + +/* 415 specific */ +static struct clkgen_pll_data st_pll3200c32_a9_415 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x0, C32_NDIV_MASK, 9), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 22), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 3) }, + .odf_gate = { CLKGEN_FIELD(0x0, 0x1, 28) }, + .ops = &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_ddr_415 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x100, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 2, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8), + CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28), + CLKGEN_FIELD(0x4, 0x1, 29) }, + .ops = &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll1200c32_gpu_415 = { + .pdn_status = CLKGEN_FIELD(0x144, 0x1, 3), + .locked_status = CLKGEN_FIELD(0x168, 0x1, 0), + .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0), + .num_odfs = 0, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) }, + .ops = &st_pll1200c32_ops, +}; + +/* 416 specific */ +static struct clkgen_pll_data st_pll3200c32_a9_416 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x6C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 1, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28) }, + .ops = &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll3200c32_ddr_416 = { + .pdn_status = CLKGEN_FIELD(0x0, 0x1, 0), + .locked_status = CLKGEN_FIELD(0x10C, 0x1, 0), + .ndiv = CLKGEN_FIELD(0x8, C32_NDIV_MASK, 0), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 25), + .num_odfs = 2, + .odf = { CLKGEN_FIELD(0x8, C32_ODF_MASK, 8), + CLKGEN_FIELD(0x8, C32_ODF_MASK, 14) }, + .odf_gate = { CLKGEN_FIELD(0x4, 0x1, 28), + CLKGEN_FIELD(0x4, 0x1, 29) }, + .ops = &stm_pll3200c32_ops, +}; + +static struct clkgen_pll_data st_pll1200c32_gpu_416 = { + .pdn_status = CLKGEN_FIELD(0x8E4, 0x1, 3), + .locked_status = CLKGEN_FIELD(0x90C, 0x1, 0), + .ldf = CLKGEN_FIELD(0x0, C32_LDF_MASK, 3), + .idf = CLKGEN_FIELD(0x0, C32_IDF_MASK, 0), + .num_odfs = 0, + .odf = { CLKGEN_FIELD(0x0, C32_ODF_MASK, 10) }, + .ops = &st_pll1200c32_ops, +}; + +/** + * DOC: Clock Generated by PLL, rate set and enabled by bootloader + * + * Traits of this clock: + * prepare - clk_(un)prepare only ensures parent is (un)prepared + * enable - clk_enable/disable only ensures parent is enabled + * rate - rate is fixed. No clk_set_rate support + * parent - fixed parent. No clk_set_parent support + */ + +/** + * PLL clock that is integrated in the ClockGenA instances on the STiH415 + * and STiH416. + * + * @hw: handle between common and hardware-specific interfaces. + * @type: PLL instance type. + * @regs_base: base of the PLL configuration register(s). + * + */ +struct clkgen_pll { + struct clk_hw hw; + struct clkgen_pll_data *data; + void __iomem *regs_base; +}; + +#define to_clkgen_pll(_hw) container_of(_hw, struct clkgen_pll, hw) + +static int clkgen_pll_is_locked(struct clk_hw *hw) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + u32 locked = CLKGEN_READ(pll, locked_status); + + return !!locked; +} + +static int clkgen_pll_is_enabled(struct clk_hw *hw) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + u32 poweroff = CLKGEN_READ(pll, pdn_status); + return !poweroff; +} + +unsigned long recalc_stm_pll800c65(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long mdiv, ndiv, pdiv; + unsigned long rate; + uint64_t res; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + pdiv = CLKGEN_READ(pll, pdiv); + mdiv = CLKGEN_READ(pll, mdiv); + ndiv = CLKGEN_READ(pll, ndiv); + + if (!mdiv) + mdiv++; /* mdiv=0 or 1 => MDIV=1 */ + + res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv; + rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv)); + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; + +} + +unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long mdiv, ndiv; + unsigned long rate; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + mdiv = CLKGEN_READ(pll, mdiv); + ndiv = CLKGEN_READ(pll, ndiv); + + if (!mdiv) + mdiv = 1; + + /* Note: input is divided by 1000 to avoid overflow */ + rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long ndiv, idf; + unsigned long rate = 0; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + ndiv = CLKGEN_READ(pll, ndiv); + idf = CLKGEN_READ(pll, idf); + + if (idf) + /* Note: input is divided to avoid overflow */ + rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clkgen_pll *pll = to_clkgen_pll(hw); + unsigned long odf, ldf, idf; + unsigned long rate; + + if (!clkgen_pll_is_enabled(hw) || !clkgen_pll_is_locked(hw)) + return 0; + + odf = CLKGEN_READ(pll, odf[0]); + ldf = CLKGEN_READ(pll, ldf); + idf = CLKGEN_READ(pll, idf); + + if (!idf) /* idf==0 means 1 */ + idf = 1; + if (!odf) /* odf==0 means 1 */ + odf = 1; + + /* Note: input is divided by 1000 to avoid overflow */ + rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000; + + pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate); + + return rate; +} + +static const struct clk_ops st_pll1600c65_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll1600c65, +}; + +static const struct clk_ops st_pll800c65_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll800c65, +}; + +static const struct clk_ops stm_pll3200c32_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll3200c32, +}; + +static const struct clk_ops st_pll1200c32_ops = { + .is_enabled = clkgen_pll_is_enabled, + .recalc_rate = recalc_stm_pll1200c32, +}; + +static struct clk * __init clkgen_pll_register(const char *parent_name, + struct clkgen_pll_data *pll_data, + void __iomem *reg, + const char *clk_name) +{ + struct clkgen_pll *pll; + struct clk *clk; + struct clk_init_data init; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = clk_name; + init.ops = pll_data->ops; + + init.flags = CLK_IS_BASIC; + init.parent_names = &parent_name; + init.num_parents = 1; + + pll->data = pll_data; + pll->regs_base = reg; + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + if (IS_ERR(clk)) { + kfree(pll); + return clk; + } + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + + return clk; +} + +static struct clk * __init clkgen_c65_lsdiv_register(const char *parent_name, + const char *clk_name) +{ + struct clk *clk; + + clk = clk_register_fixed_factor(NULL, clk_name, parent_name, 0, 1, 2); + if (IS_ERR(clk)) + return clk; + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + return clk; +} + +static void __iomem * __init clkgen_get_register_base( + struct device_node *np) +{ + struct device_node *pnode; + void __iomem *reg = NULL; + + pnode = of_get_parent(np); + if (!pnode) + return NULL; + + reg = of_iomap(pnode, 0); + + of_node_put(pnode); + return reg; +} + +#define CLKGENAx_PLL0_OFFSET 0x0 +#define CLKGENAx_PLL1_OFFSET 0x4 + +static void __init clkgena_c65_pll_setup(struct device_node *np) +{ + const int num_pll_outputs = 3; + struct clk_onecell_data *clk_data; + const char *parent_name; + void __iomem *reg; + const char *clk_name; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = num_pll_outputs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + goto err; + + /* + * PLL0 HS (high speed) output + */ + clk_data->clks[0] = clkgen_pll_register(parent_name, + &st_pll1600c65_ax, + reg + CLKGENAx_PLL0_OFFSET, + clk_name); + + if (IS_ERR(clk_data->clks[0])) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 1, &clk_name)) + goto err; + + /* + * PLL0 LS (low speed) output, which is a fixed divide by 2 of the + * high speed output. + */ + clk_data->clks[1] = clkgen_c65_lsdiv_register(__clk_get_name + (clk_data->clks[0]), + clk_name); + + if (IS_ERR(clk_data->clks[1])) + goto err; + + if (of_property_read_string_index(np, "clock-output-names", + 2, &clk_name)) + goto err; + + /* + * PLL1 output + */ + clk_data->clks[2] = clkgen_pll_register(parent_name, + &st_pll800c65_ax, + reg + CLKGENAx_PLL1_OFFSET, + clk_name); + + if (IS_ERR(clk_data->clks[2])) + goto err; + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + kfree(clk_data->clks); + kfree(clk_data); +} +CLK_OF_DECLARE(clkgena_c65_plls, + "st,clkgena-plls-c65", clkgena_c65_pll_setup); + +static struct clk * __init clkgen_odf_register(const char *parent_name, + void * __iomem reg, + struct clkgen_pll_data *pll_data, + int odf, + spinlock_t *odf_lock, + const char *odf_name) +{ + struct clk *clk; + unsigned long flags; + struct clk_gate *gate; + struct clk_divider *div; + + flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_GATE; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->reg = reg + pll_data->odf_gate[odf].offset; + gate->bit_idx = pll_data->odf_gate[odf].shift; + gate->lock = odf_lock; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; + div->reg = reg + pll_data->odf[odf].offset; + div->shift = pll_data->odf[odf].shift; + div->width = fls(pll_data->odf[odf].mask); + div->lock = odf_lock; + + clk = clk_register_composite(NULL, odf_name, &parent_name, 1, + NULL, NULL, + &div->hw, &clk_divider_ops, + &gate->hw, &clk_gate_ops, + flags); + if (IS_ERR(clk)) + return clk; + + pr_debug("%s: parent %s rate %lu\n", + __clk_get_name(clk), + __clk_get_name(clk_get_parent(clk)), + clk_get_rate(clk)); + return clk; +} + +static struct of_device_id c32_pll_of_match[] = { + { + .compatible = "st,plls-c32-a1x-0", + .data = &st_pll3200c32_a1x_0, + }, + { + .compatible = "st,plls-c32-a1x-1", + .data = &st_pll3200c32_a1x_1, + }, + { + .compatible = "st,stih415-plls-c32-a9", + .data = &st_pll3200c32_a9_415, + }, + { + .compatible = "st,stih415-plls-c32-ddr", + .data = &st_pll3200c32_ddr_415, + }, + { + .compatible = "st,stih416-plls-c32-a9", + .data = &st_pll3200c32_a9_416, + }, + { + .compatible = "st,stih416-plls-c32-ddr", + .data = &st_pll3200c32_ddr_416, + }, + {} +}; + +static void __init clkgen_c32_pll_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *parent_name, *pll_name; + void __iomem *pll_base; + int num_odfs, odf; + struct clk_onecell_data *clk_data; + struct clkgen_pll_data *data; + + match = of_match_node(c32_pll_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_pll_data *) match->data; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + pll_base = clkgen_get_register_base(np); + if (!pll_base) + return; + + clk = clkgen_pll_register(parent_name, data, pll_base, np->name); + if (IS_ERR(clk)) + return; + + pll_name = __clk_get_name(clk); + + num_odfs = data->num_odfs; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->clk_num = num_odfs; + clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *), + GFP_KERNEL); + + if (!clk_data->clks) + goto err; + + for (odf = 0; odf < num_odfs; odf++) { + struct clk *clk; + const char *clk_name; + + if (of_property_read_string_index(np, "clock-output-names", + odf, &clk_name)) + return; + + clk = clkgen_odf_register(pll_name, pll_base, data, + odf, &clkgena_c32_odf_lock, clk_name); + if (IS_ERR(clk)) + goto err; + + clk_data->clks[odf] = clk; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + return; + +err: + kfree(pll_name); + kfree(clk_data->clks); + kfree(clk_data); +} +CLK_OF_DECLARE(clkgen_c32_pll, "st,clkgen-plls-c32", clkgen_c32_pll_setup); + +static struct of_device_id c32_gpu_pll_of_match[] = { + { + .compatible = "st,stih415-gpu-pll-c32", + .data = &st_pll1200c32_gpu_415, + }, + { + .compatible = "st,stih416-gpu-pll-c32", + .data = &st_pll1200c32_gpu_416, + }, +}; + +static void __init clkgengpu_c32_pll_setup(struct device_node *np) +{ + const struct of_device_id *match; + struct clk *clk; + const char *parent_name; + void __iomem *reg; + const char *clk_name; + struct clkgen_pll_data *data; + + match = of_match_node(c32_gpu_pll_of_match, np); + if (!match) { + pr_err("%s: No matching data\n", __func__); + return; + } + + data = (struct clkgen_pll_data *)match->data; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) + return; + + reg = clkgen_get_register_base(np); + if (!reg) + return; + + if (of_property_read_string_index(np, "clock-output-names", + 0, &clk_name)) + return; + + /* + * PLL 1200MHz output + */ + clk = clkgen_pll_register(parent_name, data, reg, clk_name); + + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); + + return; +} +CLK_OF_DECLARE(clkgengpu_c32_pll, + "st,clkgengpu-pll-c32", clkgengpu_c32_pll_setup); diff --git a/drivers/clk/st/clkgen.h b/drivers/clk/st/clkgen.h new file mode 100644 index 000000000000..35c863295268 --- /dev/null +++ b/drivers/clk/st/clkgen.h @@ -0,0 +1,48 @@ +/************************************************************************ +File : Clock H/w specific Information + +Author: Pankaj Dev <pankaj.dev@st.com> + +Copyright (C) 2014 STMicroelectronics +************************************************************************/ + +#ifndef __CLKGEN_INFO_H +#define __CLKGEN_INFO_H + +struct clkgen_field { + unsigned int offset; + unsigned int mask; + unsigned int shift; +}; + +static inline unsigned long clkgen_read(void __iomem *base, + struct clkgen_field *field) +{ + return (readl(base + field->offset) >> field->shift) & field->mask; +} + + +static inline void clkgen_write(void __iomem *base, struct clkgen_field *field, + unsigned long val) +{ + writel((readl(base + field->offset) & + ~(field->mask << field->shift)) | (val << field->shift), + base + field->offset); + + return; +} + +#define CLKGEN_FIELD(_offset, _mask, _shift) { \ + .offset = _offset, \ + .mask = _mask, \ + .shift = _shift, \ + } + +#define CLKGEN_READ(pll, field) clkgen_read(pll->regs_base, \ + &pll->data->field) + +#define CLKGEN_WRITE(pll, field, val) clkgen_write(pll->regs_base, \ + &pll->data->field, val) + +#endif /*__CLKGEN_INFO_H*/ + diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index abb6c5ac8a10..bd7dc733c1ca 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -18,6 +18,7 @@ #include <linux/clkdev.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/reset-controller.h> #include "clk-factors.h" @@ -51,6 +52,8 @@ static void __init sun4i_osc_clk_setup(struct device_node *node) if (!gate) goto err_free_fixed; + of_property_read_string(node, "clock-output-names", &clk_name); + /* set up gate and fixed rate properties */ gate->reg = of_iomap(node, 0); gate->bit_idx = SUNXI_OSC24M_GATE; @@ -77,7 +80,7 @@ err_free_gate: err_free_fixed: kfree(fixed); } -CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-osc-clk", sun4i_osc_clk_setup); +CLK_OF_DECLARE(sun4i_osc, "allwinner,sun4i-a10-osc-clk", sun4i_osc_clk_setup); @@ -249,7 +252,38 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate, *n = DIV_ROUND_UP(div, (*k+1)); } +/** + * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6 + * PLL6 rate is calculated as follows + * rate = parent_rate * n * (k + 1) / 2 + * parent_rate is always 24Mhz + */ + +static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate, + u8 *n, u8 *k, u8 *m, u8 *p) +{ + u8 div; + + /* + * We always have 24MHz / 2, so we can just say that our + * parent clock is 12MHz. + */ + parent_rate = parent_rate / 2; + /* Normalize value to a parent_rate multiple (24M / 2) */ + div = *freq / parent_rate; + *freq = parent_rate * div; + + /* we were called to round the frequency, we can now return */ + if (n == NULL) + return; + + *k = div / 32; + if (*k > 3) + *k = 3; + + *n = DIV_ROUND_UP(div, (*k+1)); +} /** * sun4i_get_apb1_factors() - calculates m, p factors for APB1 @@ -265,7 +299,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate, if (parent_rate < *freq) *freq = parent_rate; - parent_rate = (parent_rate + (*freq - 1)) / *freq; + parent_rate = DIV_ROUND_UP(parent_rate, *freq); /* Invalid rate! */ if (parent_rate > 32) @@ -296,7 +330,7 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate, /** * sun4i_get_mod0_factors() - calculates m, n factors for MOD0-style clocks - * MMC rate is calculated as follows + * MOD0 rate is calculated as follows * rate = (parent_rate >> p) / (m + 1); */ @@ -310,7 +344,7 @@ static void sun4i_get_mod0_factors(u32 *freq, u32 parent_rate, if (*freq > parent_rate) *freq = parent_rate; - div = parent_rate / *freq; + div = DIV_ROUND_UP(parent_rate, *freq); if (div < 16) calcp = 0; @@ -351,7 +385,7 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate, if (*freq > parent_rate) *freq = parent_rate; - div = parent_rate / *freq; + div = DIV_ROUND_UP(parent_rate, *freq); if (div < 32) calcp = 0; @@ -377,6 +411,102 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate, /** + * sun7i_a20_gmac_clk_setup - Setup function for A20/A31 GMAC clock module + * + * This clock looks something like this + * ________________________ + * MII TX clock from PHY >-----|___________ _________|----> to GMAC core + * GMAC Int. RGMII TX clk >----|___________\__/__gate---|----> to PHY + * Ext. 125MHz RGMII TX clk >--|__divider__/ | + * |________________________| + * + * The external 125 MHz reference is optional, i.e. GMAC can use its + * internal TX clock just fine. The A31 GMAC clock module does not have + * the divider controls for the external reference. + * + * To keep it simple, let the GMAC use either the MII TX clock for MII mode, + * and its internal TX clock for GMII and RGMII modes. The GMAC driver should + * select the appropriate source and gate/ungate the output to the PHY. + * + * Only the GMAC should use this clock. Altering the clock so that it doesn't + * match the GMAC's operation parameters will result in the GMAC not being + * able to send traffic out. The GMAC driver should set the clock rate and + * enable/disable this clock to configure the required state. The clock + * driver then responds by auto-reparenting the clock. + */ + +#define SUN7I_A20_GMAC_GPIT 2 +#define SUN7I_A20_GMAC_MASK 0x3 +#define SUN7I_A20_GMAC_PARENTS 2 + +static void __init sun7i_a20_gmac_clk_setup(struct device_node *node) +{ + struct clk *clk; + struct clk_mux *mux; + struct clk_gate *gate; + const char *clk_name = node->name; + const char *parents[SUN7I_A20_GMAC_PARENTS]; + void *reg; + + if (of_property_read_string(node, "clock-output-names", &clk_name)) + return; + + /* allocate mux and gate clock structs */ + mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); + if (!mux) + return; + + gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); + if (!gate) + goto free_mux; + + /* gmac clock requires exactly 2 parents */ + parents[0] = of_clk_get_parent_name(node, 0); + parents[1] = of_clk_get_parent_name(node, 1); + if (!parents[0] || !parents[1]) + goto free_gate; + + reg = of_iomap(node, 0); + if (!reg) + goto free_gate; + + /* set up gate and fixed rate properties */ + gate->reg = reg; + gate->bit_idx = SUN7I_A20_GMAC_GPIT; + gate->lock = &clk_lock; + mux->reg = reg; + mux->mask = SUN7I_A20_GMAC_MASK; + mux->flags = CLK_MUX_INDEX_BIT; + mux->lock = &clk_lock; + + clk = clk_register_composite(NULL, clk_name, + parents, SUN7I_A20_GMAC_PARENTS, + &mux->hw, &clk_mux_ops, + NULL, NULL, + &gate->hw, &clk_gate_ops, + 0); + + if (IS_ERR(clk)) + goto iounmap_reg; + + of_clk_add_provider(node, of_clk_src_simple_get, clk); + clk_register_clkdev(clk, clk_name, NULL); + + return; + +iounmap_reg: + iounmap(reg); +free_gate: + kfree(gate); +free_mux: + kfree(mux); +} +CLK_OF_DECLARE(sun7i_a20_gmac, "allwinner,sun7i-a20-gmac-clk", + sun7i_a20_gmac_clk_setup); + + + +/** * sunxi_factors_clk_setup() - Setup function for factor clocks */ @@ -387,6 +517,7 @@ struct factors_data { int mux; struct clk_factors_config *table; void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p); + const char *name; }; static struct clk_factors_config sun4i_pll1_config = { @@ -416,6 +547,13 @@ static struct clk_factors_config sun4i_pll5_config = { .kwidth = 2, }; +static struct clk_factors_config sun6i_a31_pll6_config = { + .nshift = 8, + .nwidth = 5, + .kshift = 4, + .kwidth = 2, +}; + static struct clk_factors_config sun4i_apb1_config = { .mshift = 0, .mwidth = 5, @@ -451,10 +589,30 @@ static const struct factors_data sun6i_a31_pll1_data __initconst = { .getter = sun6i_a31_get_pll1_factors, }; +static const struct factors_data sun7i_a20_pll4_data __initconst = { + .enable = 31, + .table = &sun4i_pll5_config, + .getter = sun4i_get_pll5_factors, +}; + static const struct factors_data sun4i_pll5_data __initconst = { .enable = 31, .table = &sun4i_pll5_config, .getter = sun4i_get_pll5_factors, + .name = "pll5", +}; + +static const struct factors_data sun4i_pll6_data __initconst = { + .enable = 31, + .table = &sun4i_pll5_config, + .getter = sun4i_get_pll5_factors, + .name = "pll6", +}; + +static const struct factors_data sun6i_a31_pll6_data __initconst = { + .enable = 31, + .table = &sun6i_a31_pll6_config, + .getter = sun6i_a31_get_pll6_factors, }; static const struct factors_data sun4i_apb1_data __initconst = { @@ -497,14 +655,14 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node, (parents[i] = of_clk_get_parent_name(node, i)) != NULL) i++; - /* Nodes should be providing the name via clock-output-names - * but originally our dts didn't, and so we used node->name. - * The new, better nodes look like clk@deadbeef, so we pull the - * name just in this case */ - if (!strcmp("clk", clk_name)) { - of_property_read_string_index(node, "clock-output-names", - 0, &clk_name); - } + /* + * some factor clocks, such as pll5 and pll6, may have multiple + * outputs, and have their name designated in factors_data + */ + if (data->name) + clk_name = data->name; + else + of_property_read_string(node, "clock-output-names", &clk_name); factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL); if (!factors) @@ -601,6 +759,8 @@ static void __init sunxi_mux_clk_setup(struct device_node *node, (parents[i] = of_clk_get_parent_name(node, i)) != NULL) i++; + of_property_read_string(node, "clock-output-names", &clk_name); + clk = clk_register_mux(NULL, clk_name, parents, i, CLK_SET_RATE_NO_REPARENT, reg, data->shift, SUNXI_MUX_GATE_WIDTH, @@ -660,6 +820,8 @@ static void __init sunxi_divider_clk_setup(struct device_node *node, clk_parent = of_clk_get_parent_name(node, 0); + of_property_read_string(node, "clock-output-names", &clk_name); + clk = clk_register_divider(NULL, clk_name, clk_parent, 0, reg, data->shift, data->width, data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0, @@ -673,6 +835,59 @@ static void __init sunxi_divider_clk_setup(struct device_node *node, /** + * sunxi_gates_reset... - reset bits in leaf gate clk registers handling + */ + +struct gates_reset_data { + void __iomem *reg; + spinlock_t *lock; + struct reset_controller_dev rcdev; +}; + +static int sunxi_gates_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct gates_reset_data *data = container_of(rcdev, + struct gates_reset_data, + rcdev); + unsigned long flags; + u32 reg; + + spin_lock_irqsave(data->lock, flags); + + reg = readl(data->reg); + writel(reg & ~BIT(id), data->reg); + + spin_unlock_irqrestore(data->lock, flags); + + return 0; +} + +static int sunxi_gates_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct gates_reset_data *data = container_of(rcdev, + struct gates_reset_data, + rcdev); + unsigned long flags; + u32 reg; + + spin_lock_irqsave(data->lock, flags); + + reg = readl(data->reg); + writel(reg | BIT(id), data->reg); + + spin_unlock_irqrestore(data->lock, flags); + + return 0; +} + +static struct reset_control_ops sunxi_gates_reset_ops = { + .assert = sunxi_gates_reset_assert, + .deassert = sunxi_gates_reset_deassert, +}; + +/** * sunxi_gates_clk_setup() - Setup function for leaf gates on clocks */ @@ -680,6 +895,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node, struct gates_data { DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE); + u32 reset_mask; }; static const struct gates_data sun4i_axi_gates_data __initconst = { @@ -746,10 +962,21 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = { .mask = { 0xff80ff }, }; +static const struct gates_data sun4i_a10_usb_gates_data __initconst = { + .mask = {0x1C0}, + .reset_mask = 0x07, +}; + +static const struct gates_data sun5i_a13_usb_gates_data __initconst = { + .mask = {0x140}, + .reset_mask = 0x03, +}; + static void __init sunxi_gates_clk_setup(struct device_node *node, struct gates_data *data) { struct clk_onecell_data *clk_data; + struct gates_reset_data *reset_data; const char *clk_parent; const char *clk_name; void *reg; @@ -793,6 +1020,21 @@ static void __init sunxi_gates_clk_setup(struct device_node *node, clk_data->clk_num = i; of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + + /* Register a reset controler for gates with reset bits */ + if (data->reset_mask == 0) + return; + + reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL); + if (!reset_data) + return; + + reset_data->reg = reg; + reset_data->lock = &clk_lock; + reset_data->rcdev.nr_resets = __fls(data->reset_mask) + 1; + reset_data->rcdev.ops = &sunxi_gates_reset_ops; + reset_data->rcdev.of_node = node; + reset_controller_register(&reset_data->rcdev); } @@ -832,7 +1074,7 @@ static const struct divs_data pll5_divs_data __initconst = { }; static const struct divs_data pll6_divs_data __initconst = { - .factors = &sun4i_pll5_data, + .factors = &sun4i_pll6_data, .div = { { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */ { .fixed = 2 }, /* P, other */ @@ -854,7 +1096,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node, struct divs_data *data) { struct clk_onecell_data *clk_data; - const char *parent = node->name; + const char *parent; const char *clk_name; struct clk **clks, *pclk; struct clk_hw *gate_hw, *rate_hw; @@ -868,6 +1110,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node, /* Set up factor clock that we will be dividing */ pclk = sunxi_factors_clk_setup(node, data->factors); + parent = __clk_get_name(pclk); reg = of_iomap(node, 0); @@ -970,56 +1213,60 @@ free_clkdata: /* Matches for factors clocks */ static const struct of_device_id clk_factors_match[] __initconst = { - {.compatible = "allwinner,sun4i-pll1-clk", .data = &sun4i_pll1_data,}, + {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,}, {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,}, - {.compatible = "allwinner,sun4i-apb1-clk", .data = &sun4i_apb1_data,}, - {.compatible = "allwinner,sun4i-mod0-clk", .data = &sun4i_mod0_data,}, + {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,}, + {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,}, + {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,}, + {.compatible = "allwinner,sun4i-a10-mod0-clk", .data = &sun4i_mod0_data,}, {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,}, {} }; /* Matches for divider clocks */ static const struct of_device_id clk_div_match[] __initconst = { - {.compatible = "allwinner,sun4i-axi-clk", .data = &sun4i_axi_data,}, - {.compatible = "allwinner,sun4i-ahb-clk", .data = &sun4i_ahb_data,}, - {.compatible = "allwinner,sun4i-apb0-clk", .data = &sun4i_apb0_data,}, + {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,}, + {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,}, + {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,}, {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,}, {} }; /* Matches for divided outputs */ static const struct of_device_id clk_divs_match[] __initconst = { - {.compatible = "allwinner,sun4i-pll5-clk", .data = &pll5_divs_data,}, - {.compatible = "allwinner,sun4i-pll6-clk", .data = &pll6_divs_data,}, + {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,}, + {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,}, {} }; /* Matches for mux clocks */ static const struct of_device_id clk_mux_match[] __initconst = { - {.compatible = "allwinner,sun4i-cpu-clk", .data = &sun4i_cpu_mux_data,}, - {.compatible = "allwinner,sun4i-apb1-mux-clk", .data = &sun4i_apb1_mux_data,}, + {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,}, + {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,}, {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,}, {} }; /* Matches for gate clocks */ static const struct of_device_id clk_gates_match[] __initconst = { - {.compatible = "allwinner,sun4i-axi-gates-clk", .data = &sun4i_axi_gates_data,}, - {.compatible = "allwinner,sun4i-ahb-gates-clk", .data = &sun4i_ahb_gates_data,}, + {.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,}, + {.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,}, {.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,}, {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,}, {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,}, {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,}, - {.compatible = "allwinner,sun4i-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, + {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,}, {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,}, {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,}, - {.compatible = "allwinner,sun4i-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, + {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,}, {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,}, {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,}, {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,}, {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, + {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,}, + {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,}, {} }; diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c index 4d75b1f37e3a..290f9c1a3749 100644 --- a/drivers/clk/tegra/clk-divider.c +++ b/drivers/clk/tegra/clk-divider.c @@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate, return 0; if (divider_ux1 > get_max_div(divider)) - return -EINVAL; + return get_max_div(divider); return divider_ux1; } diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index cf0c323f2c36..c39613c519af 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -180,9 +180,13 @@ enum clk_id { tegra_clk_sbc6_8, tegra_clk_sclk, tegra_clk_sdmmc1, + tegra_clk_sdmmc1_8, tegra_clk_sdmmc2, + tegra_clk_sdmmc2_8, tegra_clk_sdmmc3, + tegra_clk_sdmmc3_8, tegra_clk_sdmmc4, + tegra_clk_sdmmc4_8, tegra_clk_se, tegra_clk_soc_therm, tegra_clk_sor0, diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c index 356e9b804421..9e899c18af86 100644 --- a/drivers/clk/tegra/clk-periph.c +++ b/drivers/clk/tegra/clk-periph.c @@ -130,7 +130,7 @@ static const struct clk_ops tegra_clk_periph_nodiv_ops = { .disable = clk_periph_disable, }; -const struct clk_ops tegra_clk_periph_no_gate_ops = { +static const struct clk_ops tegra_clk_periph_no_gate_ops = { .get_parent = clk_periph_get_parent, .set_parent = clk_periph_set_parent, .recalc_rate = clk_periph_recalc_rate, diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 5c35885f4a7c..1fa5c3f33b20 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = { static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" }; -static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = { - [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, -}; +#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", @@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = { MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), + MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), + MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), + MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), + MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), @@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = { UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), - UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte), + UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte), XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 05dce4aa2c11..feb3201c85ce 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c @@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base, ARRAY_SIZE(cclk_lp_parents), CLK_SET_RATE_PARENT, clk_base + CCLKLP_BURST_POLICY, - 0, 4, 8, 9, NULL); + TEGRA_DIVIDER_2, 4, 8, 9, NULL); *dt_clk = clk; } diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 90d9d25f2228..80431f0fb268 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, - [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, - [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, - [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, @@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, - [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index aff86b5bc745..166e02f16c8a 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = { }; static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { - {12000000, 216000000, 432, 12, 1, 8}, - {13000000, 216000000, 432, 13, 1, 8}, - {16800000, 216000000, 360, 14, 1, 8}, - {19200000, 216000000, 360, 16, 1, 8}, - {26000000, 216000000, 432, 26, 1, 8}, + {12000000, 408000000, 408, 12, 0, 8}, + {13000000, 408000000, 408, 13, 0, 8}, + {16800000, 408000000, 340, 14, 0, 8}, + {19200000, 408000000, 340, 16, 0, 8}, + {26000000, 408000000, 408, 26, 0, 8}, {0, 0, 0, 0, 0, 0}, }; @@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = { .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, }; +static struct div_nmp plld_nmp = { + .divm_shift = 0, + .divm_width = 5, + .divn_shift = 8, + .divn_width = 11, + .divp_shift = 20, + .divp_width = 3, +}; + static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { {12000000, 216000000, 864, 12, 4, 12}, {13000000, 216000000, 864, 13, 4, 12}, @@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = { .lock_mask = PLL_BASE_LOCK, .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, .lock_delay = 1000, - .div_nmp = &pllp_nmp, + .div_nmp = &plld_nmp, .freq_table = pll_d_freq_table, .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK, }; static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { - { 12000000, 148500000, 99, 1, 8}, - { 12000000, 594000000, 99, 1, 1}, - { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */ - { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */ - { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */ - { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */ + { 12000000, 594000000, 99, 1, 2}, + { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ + { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ + { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ + { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ { 0, 0, 0, 0, 0, 0 }, }; @@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, - [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, - [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, - [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, - [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true }, [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, - [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true }, [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, - [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, + [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, @@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, - [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, @@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base, clk_register_clkdev(clk, "pll_d2", NULL); clks[TEGRA124_CLK_PLL_D2] = clk; - /* PLLD2_OUT0 ?? */ + /* PLLD2_OUT0 */ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", - CLK_SET_RATE_PARENT, 1, 2); + CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "pll_d2_out0", NULL); clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index dbace152b2fa..dace2b1b5ae6 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, + [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, + [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, }; static unsigned long tegra20_clk_measure_input_freq(void) diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index 776ee4594bd4..028b33783d38 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c @@ -34,7 +34,6 @@ static struct ti_dt_clk am33xx_clks[] = { DT_CLK(NULL, "dpll_core_m5_ck", "dpll_core_m5_ck"), DT_CLK(NULL, "dpll_core_m6_ck", "dpll_core_m6_ck"), DT_CLK(NULL, "dpll_mpu_ck", "dpll_mpu_ck"), - DT_CLK("cpu0", NULL, "dpll_mpu_ck"), DT_CLK(NULL, "dpll_mpu_m2_ck", "dpll_mpu_m2_ck"), DT_CLK(NULL, "dpll_ddr_ck", "dpll_ddr_ck"), DT_CLK(NULL, "dpll_ddr_m2_ck", "dpll_ddr_m2_ck"), diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index ae00218b5da3..02517a8206bd 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -222,7 +222,6 @@ static struct ti_dt_clk omap44xx_clks[] = { DT_CLK(NULL, "auxclk5_src_ck", "auxclk5_src_ck"), DT_CLK(NULL, "auxclk5_ck", "auxclk5_ck"), DT_CLK(NULL, "auxclkreq5_ck", "auxclkreq5_ck"), - DT_CLK("50000000.gpmc", "fck", "dummy_ck"), DT_CLK("omap_i2c.1", "ick", "dummy_ck"), DT_CLK("omap_i2c.2", "ick", "dummy_ck"), DT_CLK("omap_i2c.3", "ick", "dummy_ck"), diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 0ef9f581286b..08f3d1b915b3 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -182,7 +182,6 @@ static struct ti_dt_clk omap54xx_clks[] = { DT_CLK(NULL, "auxclk3_src_ck", "auxclk3_src_ck"), DT_CLK(NULL, "auxclk3_ck", "auxclk3_ck"), DT_CLK(NULL, "auxclkreq3_ck", "auxclkreq3_ck"), - DT_CLK(NULL, "gpmc_ck", "dummy_ck"), DT_CLK("omap_i2c.1", "ick", "dummy_ck"), DT_CLK("omap_i2c.2", "ick", "dummy_ck"), DT_CLK("omap_i2c.3", "ick", "dummy_ck"), diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9977653f2d63..f7e40734c819 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -262,7 +262,6 @@ static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "vip1_gclk_mux", "vip1_gclk_mux"), DT_CLK(NULL, "vip2_gclk_mux", "vip2_gclk_mux"), DT_CLK(NULL, "vip3_gclk_mux", "vip3_gclk_mux"), - DT_CLK(NULL, "gpmc_ck", "dummy_ck"), DT_CLK("omap_i2c.1", "ick", "dummy_ck"), DT_CLK("omap_i2c.2", "ick", "dummy_ck"), DT_CLK("omap_i2c.3", "ick", "dummy_ck"), diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index a15e445570b2..e6aa10db7bba 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -112,7 +112,7 @@ static unsigned long ti_clk_divider_recalc_rate(struct clk_hw *hw, return parent_rate; } - return parent_rate / div; + return DIV_ROUND_UP(parent_rate, div); } /* @@ -182,7 +182,7 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), MULT_ROUND_UP(rate, i)); - now = parent_rate / i; + now = DIV_ROUND_UP(parent_rate, i); if (now <= rate && now > best) { bestdiv = i; best = now; @@ -205,7 +205,7 @@ static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, int div; div = ti_clk_divider_bestdiv(hw, rate, prate); - return *prate / div; + return DIV_ROUND_UP(*prate, div); } static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, @@ -216,7 +216,7 @@ static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long flags = 0; u32 val; - div = parent_rate / rate; + div = DIV_ROUND_UP(parent_rate, rate); value = _get_val(divider, div); if (value > div_mask(divider)) diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index cdeff299de26..7b55ef89baa5 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -29,7 +29,8 @@ static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_C #define PRCC_KCLK_STORE(clk, base, bit) \ prcc_kclk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] = clk -struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, void *data) +static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, + void *data) { struct clk **clk_data = data; unsigned int base, bit; diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index 8cbfcf88fae3..a820b0cfcf57 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -33,7 +33,7 @@ struct clk_icst { struct clk_hw hw; void __iomem *vcoreg; void __iomem *lockreg; - const struct icst_params *params; + struct icst_params *params; unsigned long rate; }; @@ -84,6 +84,8 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw, struct clk_icst *icst = to_icst(hw); struct icst_vco vco; + if (parent_rate) + icst->params->ref = parent_rate; vco = vco_get(icst->vcoreg); icst->rate = icst_hz(icst->params, vco); return icst->rate; @@ -105,6 +107,8 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_icst *icst = to_icst(hw); struct icst_vco vco; + if (parent_rate) + icst->params->ref = parent_rate; vco = icst_hz_to_vco(icst->params, rate); icst->rate = icst_hz(icst->params, vco); vco_set(icst->lockreg, icst->vcoreg, vco); @@ -120,24 +124,33 @@ static const struct clk_ops icst_ops = { struct clk *icst_clk_register(struct device *dev, const struct clk_icst_desc *desc, const char *name, + const char *parent_name, void __iomem *base) { struct clk *clk; struct clk_icst *icst; struct clk_init_data init; + struct icst_params *pclone; icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL); if (!icst) { pr_err("could not allocate ICST clock!\n"); return ERR_PTR(-ENOMEM); } + + pclone = kmemdup(desc->params, sizeof(*pclone), GFP_KERNEL); + if (!pclone) { + pr_err("could not clone ICST params\n"); + return ERR_PTR(-ENOMEM); + } + init.name = name; init.ops = &icst_ops; init.flags = CLK_IS_ROOT; - init.parent_names = NULL; - init.num_parents = 0; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); icst->hw.init = &init; - icst->params = desc->params; + icst->params = pclone; icst->vcoreg = base + desc->vco_offset; icst->lockreg = base + desc->lock_offset; diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h index be99dd0da785..04e6f0aef588 100644 --- a/drivers/clk/versatile/clk-icst.h +++ b/drivers/clk/versatile/clk-icst.h @@ -16,4 +16,5 @@ struct clk_icst_desc { struct clk *icst_clk_register(struct device *dev, const struct clk_icst_desc *desc, const char *name, + const char *parent_name, void __iomem *base); diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c index 844f8d711a12..31b44f025f9e 100644 --- a/drivers/clk/versatile/clk-impd1.c +++ b/drivers/clk/versatile/clk-impd1.c @@ -13,10 +13,12 @@ #include <linux/io.h> #include <linux/platform_data/clk-integrator.h> -#include <mach/impd1.h> - #include "clk-icst.h" +#define IMPD1_OSC1 0x00 +#define IMPD1_OSC2 0x04 +#define IMPD1_LOCK 0x08 + struct impd1_clk { char *vco1name; struct clk *vco1clk; @@ -93,13 +95,15 @@ void integrator_impd1_clk_init(void __iomem *base, unsigned int id) imc = &impd1_clks[id]; imc->vco1name = kasprintf(GFP_KERNEL, "lm%x-vco1", id); - clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, base); + clk = icst_clk_register(NULL, &impd1_icst1_desc, imc->vco1name, NULL, + base); imc->vco1clk = clk; imc->clks[0] = clkdev_alloc(clk, NULL, "lm%x:01000", id); /* VCO2 is also called "CLK2" */ imc->vco2name = kasprintf(GFP_KERNEL, "lm%x-vco2", id); - clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, base); + clk = icst_clk_register(NULL, &impd1_icst2_desc, imc->vco2name, NULL, + base); imc->vco2clk = clk; /* MMCI uses CLK2 right off */ diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c index bda8967e09c2..734c4b8fe6ab 100644 --- a/drivers/clk/versatile/clk-integrator.c +++ b/drivers/clk/versatile/clk-integrator.c @@ -10,21 +10,17 @@ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> -#include <linux/platform_data/clk-integrator.h> - -#include <mach/hardware.h> -#include <mach/platform.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "clk-icst.h" -/* - * Implementation of the ARM Integrator/AP and Integrator/CP clock tree. - * Inspired by portions of: - * plat-versatile/clock.c and plat-versatile/include/plat/clock.h - */ +#define INTEGRATOR_HDR_LOCK_OFFSET 0x14 -static const struct icst_params cp_auxvco_params = { - .ref = 24000000, +/* Base offset for the core module */ +static void __iomem *cm_base; + +static const struct icst_params cp_auxosc_params = { .vco_max = ICST525_VCO_MAX_5V, .vco_min = ICST525_VCO_MIN, .vd_min = 8, @@ -35,50 +31,39 @@ static const struct icst_params cp_auxvco_params = { .idx2s = icst525_idx2s, }; -static const struct clk_icst_desc __initdata cp_icst_desc = { - .params = &cp_auxvco_params, +static const struct clk_icst_desc __initdata cm_auxosc_desc = { + .params = &cp_auxosc_params, .vco_offset = 0x1c, .lock_offset = INTEGRATOR_HDR_LOCK_OFFSET, }; -/* - * integrator_clk_init() - set up the integrator clock tree - * @is_cp: pass true if it's the Integrator/CP else AP is assumed - */ -void __init integrator_clk_init(bool is_cp) +static void __init of_integrator_cm_osc_setup(struct device_node *np) { - struct clk *clk; - - /* APB clock dummy */ - clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0); - clk_register_clkdev(clk, "apb_pclk", NULL); - - /* UART reference clock */ - clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT, - 14745600); - clk_register_clkdev(clk, NULL, "uart0"); - clk_register_clkdev(clk, NULL, "uart1"); - if (is_cp) - clk_register_clkdev(clk, NULL, "mmci"); - - /* 24 MHz clock */ - clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT, - 24000000); - clk_register_clkdev(clk, NULL, "kmi0"); - clk_register_clkdev(clk, NULL, "kmi1"); - if (!is_cp) - clk_register_clkdev(clk, NULL, "ap_timer"); + struct clk *clk = ERR_PTR(-EINVAL); + const char *clk_name = np->name; + const struct clk_icst_desc *desc = &cm_auxosc_desc; + const char *parent_name; - if (!is_cp) - return; + if (!cm_base) { + /* Remap the core module base if not done yet */ + struct device_node *parent; - /* 1 MHz clock */ - clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT, - 1000000); - clk_register_clkdev(clk, NULL, "sp804"); + parent = of_get_parent(np); + if (!np) { + pr_err("no parent on core module clock\n"); + return; + } + cm_base = of_iomap(parent, 0); + if (!cm_base) { + pr_err("could not remap core module base\n"); + return; + } + } - /* ICST VCO clock used on the Integrator/CP CLCD */ - clk = icst_clk_register(NULL, &cp_icst_desc, "icst", - __io_address(INTEGRATOR_HDR_BASE)); - clk_register_clkdev(clk, NULL, "clcd"); + parent_name = of_clk_get_parent_name(np, 0); + clk = icst_clk_register(NULL, desc, clk_name, parent_name, cm_base); + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); } +CLK_OF_DECLARE(integrator_cm_auxosc_clk, + "arm,integrator-cm-auxosc", of_integrator_cm_osc_setup); diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c index 747e7b31117c..c8b523117fb7 100644 --- a/drivers/clk/versatile/clk-realview.c +++ b/drivers/clk/versatile/clk-realview.c @@ -85,10 +85,10 @@ void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176) /* ICST VCO clock */ if (is_pb1176) clk = icst_clk_register(NULL, &realview_osc0_desc, - "osc0", sysbase); + "osc0", NULL, sysbase); else clk = icst_clk_register(NULL, &realview_osc4_desc, - "osc4", sysbase); + "osc4", NULL, sysbase); clk_register_clkdev(clk, NULL, "dev:clcd"); clk_register_clkdev(clk, NULL, "issp:clcd"); diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 09dd0173ea0a..52c09afdcfb7 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -21,34 +21,35 @@ #include <linux/clk/zynq.h> #include <linux/clk-provider.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/io.h> -static void __iomem *zynq_slcr_base_priv; - -#define SLCR_ARMPLL_CTRL (zynq_slcr_base_priv + 0x100) -#define SLCR_DDRPLL_CTRL (zynq_slcr_base_priv + 0x104) -#define SLCR_IOPLL_CTRL (zynq_slcr_base_priv + 0x108) -#define SLCR_PLL_STATUS (zynq_slcr_base_priv + 0x10c) -#define SLCR_ARM_CLK_CTRL (zynq_slcr_base_priv + 0x120) -#define SLCR_DDR_CLK_CTRL (zynq_slcr_base_priv + 0x124) -#define SLCR_DCI_CLK_CTRL (zynq_slcr_base_priv + 0x128) -#define SLCR_APER_CLK_CTRL (zynq_slcr_base_priv + 0x12c) -#define SLCR_GEM0_CLK_CTRL (zynq_slcr_base_priv + 0x140) -#define SLCR_GEM1_CLK_CTRL (zynq_slcr_base_priv + 0x144) -#define SLCR_SMC_CLK_CTRL (zynq_slcr_base_priv + 0x148) -#define SLCR_LQSPI_CLK_CTRL (zynq_slcr_base_priv + 0x14c) -#define SLCR_SDIO_CLK_CTRL (zynq_slcr_base_priv + 0x150) -#define SLCR_UART_CLK_CTRL (zynq_slcr_base_priv + 0x154) -#define SLCR_SPI_CLK_CTRL (zynq_slcr_base_priv + 0x158) -#define SLCR_CAN_CLK_CTRL (zynq_slcr_base_priv + 0x15c) -#define SLCR_CAN_MIOCLK_CTRL (zynq_slcr_base_priv + 0x160) -#define SLCR_DBG_CLK_CTRL (zynq_slcr_base_priv + 0x164) -#define SLCR_PCAP_CLK_CTRL (zynq_slcr_base_priv + 0x168) -#define SLCR_FPGA0_CLK_CTRL (zynq_slcr_base_priv + 0x170) -#define SLCR_621_TRUE (zynq_slcr_base_priv + 0x1c4) -#define SLCR_SWDT_CLK_SEL (zynq_slcr_base_priv + 0x304) +static void __iomem *zynq_clkc_base; + +#define SLCR_ARMPLL_CTRL (zynq_clkc_base + 0x00) +#define SLCR_DDRPLL_CTRL (zynq_clkc_base + 0x04) +#define SLCR_IOPLL_CTRL (zynq_clkc_base + 0x08) +#define SLCR_PLL_STATUS (zynq_clkc_base + 0x0c) +#define SLCR_ARM_CLK_CTRL (zynq_clkc_base + 0x20) +#define SLCR_DDR_CLK_CTRL (zynq_clkc_base + 0x24) +#define SLCR_DCI_CLK_CTRL (zynq_clkc_base + 0x28) +#define SLCR_APER_CLK_CTRL (zynq_clkc_base + 0x2c) +#define SLCR_GEM0_CLK_CTRL (zynq_clkc_base + 0x40) +#define SLCR_GEM1_CLK_CTRL (zynq_clkc_base + 0x44) +#define SLCR_SMC_CLK_CTRL (zynq_clkc_base + 0x48) +#define SLCR_LQSPI_CLK_CTRL (zynq_clkc_base + 0x4c) +#define SLCR_SDIO_CLK_CTRL (zynq_clkc_base + 0x50) +#define SLCR_UART_CLK_CTRL (zynq_clkc_base + 0x54) +#define SLCR_SPI_CLK_CTRL (zynq_clkc_base + 0x58) +#define SLCR_CAN_CLK_CTRL (zynq_clkc_base + 0x5c) +#define SLCR_CAN_MIOCLK_CTRL (zynq_clkc_base + 0x60) +#define SLCR_DBG_CLK_CTRL (zynq_clkc_base + 0x64) +#define SLCR_PCAP_CLK_CTRL (zynq_clkc_base + 0x68) +#define SLCR_FPGA0_CLK_CTRL (zynq_clkc_base + 0x70) +#define SLCR_621_TRUE (zynq_clkc_base + 0xc4) +#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204) #define NUM_MIO_PINS 54 @@ -148,7 +149,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk, clks[fclk] = clk_register_gate(NULL, clk_name, div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg, 0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock); - enable_reg = readl(fclk_gate_reg) & 1; + enable_reg = clk_readl(fclk_gate_reg) & 1; if (enable && !enable_reg) { if (clk_prepare_enable(clks[fclk])) pr_warn("%s: FCLK%u enable failed\n", __func__, @@ -277,7 +278,7 @@ static void __init zynq_clk_setup(struct device_node *np) SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock); /* CPU clocks */ - tmp = readl(SLCR_621_TRUE) & 1; + tmp = clk_readl(SLCR_621_TRUE) & 1; clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4, CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0, &armclk_lock); @@ -569,8 +570,42 @@ static void __init zynq_clk_setup(struct device_node *np) CLK_OF_DECLARE(zynq_clkc, "xlnx,ps7-clkc", zynq_clk_setup); -void __init zynq_clock_init(void __iomem *slcr_base) +void __init zynq_clock_init(void) { - zynq_slcr_base_priv = slcr_base; - of_clk_init(NULL); + struct device_node *np; + struct device_node *slcr; + struct resource res; + + np = of_find_compatible_node(NULL, NULL, "xlnx,ps7-clkc"); + if (!np) { + pr_err("%s: clkc node not found\n", __func__); + goto np_err; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_err("%s: failed to get resource\n", np->name); + goto np_err; + } + + slcr = of_get_parent(np); + + if (slcr->data) { + zynq_clkc_base = (__force void __iomem *)slcr->data + res.start; + } else { + pr_err("%s: Unable to get I/O memory\n", np->name); + of_node_put(slcr); + goto np_err; + } + + pr_info("%s: clkc starts at %p\n", __func__, zynq_clkc_base); + + of_node_put(slcr); + of_node_put(np); + + return; + +np_err: + of_node_put(np); + BUG(); + return; } diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c index 3226f54fa595..cec97596fe65 100644 --- a/drivers/clk/zynq/pll.c +++ b/drivers/clk/zynq/pll.c @@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw, * makes probably sense to redundantly save fbdiv in the struct * zynq_pll to save the IO access. */ - fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >> + fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >> PLLCTRL_FBDIV_SHIFT; return parent_rate * fbdiv; @@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw) spin_lock_irqsave(clk->lock, flags); - reg = readl(clk->pll_ctrl); + reg = clk_readl(clk->pll_ctrl); spin_unlock_irqrestore(clk->lock, flags); @@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw) /* Power up PLL and wait for lock */ spin_lock_irqsave(clk->lock, flags); - reg = readl(clk->pll_ctrl); + reg = clk_readl(clk->pll_ctrl); reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK); - writel(reg, clk->pll_ctrl); - while (!(readl(clk->pll_status) & (1 << clk->lockbit))) + clk_writel(reg, clk->pll_ctrl); + while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit))) ; spin_unlock_irqrestore(clk->lock, flags); @@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw) /* shut down PLL */ spin_lock_irqsave(clk->lock, flags); - reg = readl(clk->pll_ctrl); + reg = clk_readl(clk->pll_ctrl); reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK; - writel(reg, clk->pll_ctrl); + clk_writel(reg, clk->pll_ctrl); spin_unlock_irqrestore(clk->lock, flags); } @@ -225,9 +225,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent, spin_lock_irqsave(pll->lock, flags); - reg = readl(pll->pll_ctrl); + reg = clk_readl(pll->pll_ctrl); reg &= ~PLLCTRL_BPQUAL_MASK; - writel(reg, pll->pll_ctrl); + clk_writel(reg, pll->pll_ctrl); spin_unlock_irqrestore(pll->lock, flags); |