diff options
Diffstat (limited to 'drivers')
141 files changed, 8366 insertions, 1654 deletions
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index edb2622fd35f..95cc2a9f3e05 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -545,7 +545,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) - != REGION_INTERSECTS))) + != REGION_INTERSECTS) && + !arch_is_platform_page(base_addr))) return -EINVAL; inject: diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0c8330ed1ffd..0c5c9acc6254 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -449,7 +449,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; pfn = PHYS_PFN(physical_addr); - if (!pfn_valid(pfn)) { + if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid address in generic error data: %#llx\n", physical_addr); diff --git a/drivers/base/node.c b/drivers/base/node.c index b5a4ba18f9f9..87acc47e8951 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -581,6 +581,9 @@ static const struct attribute_group node_dev_group = { static const struct attribute_group *node_dev_groups[] = { &node_dev_group, +#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP + &arch_node_dev_group, +#endif NULL }; diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index 28bb65a5613f..bccb275b65ba 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -21,6 +21,7 @@ struct imx_weim_devtype { unsigned int cs_stride; unsigned int wcr_offset; unsigned int wcr_bcm; + unsigned int wcr_cont_bclk; }; static const struct imx_weim_devtype imx1_weim_devtype = { @@ -41,6 +42,7 @@ static const struct imx_weim_devtype imx50_weim_devtype = { .cs_stride = 0x18, .wcr_offset = 0x90, .wcr_bcm = BIT(0), + .wcr_cont_bclk = BIT(3), }; static const struct imx_weim_devtype imx51_weim_devtype = { @@ -206,8 +208,20 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base) if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) { if (devtype->wcr_bcm) { reg = readl(base + devtype->wcr_offset); - writel(reg | devtype->wcr_bcm, - base + devtype->wcr_offset); + reg |= devtype->wcr_bcm; + + if (of_property_read_bool(pdev->dev.of_node, + "fsl,continuous-burst-clk")) { + if (devtype->wcr_cont_bclk) { + reg |= devtype->wcr_cont_bclk; + } else { + dev_err(&pdev->dev, + "continuous burst clk not supported.\n"); + return -EINVAL; + } + } + + writel(reg, base + devtype->wcr_offset); } else { dev_err(&pdev->dev, "burst clk mode not supported.\n"); return -EINVAL; diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c index a6570789f7af..35b59f92fa66 100644 --- a/drivers/bus/tegra-gmi.c +++ b/drivers/bus/tegra-gmi.c @@ -13,8 +13,11 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> + #define TEGRA_GMI_CONFIG 0x00 #define TEGRA_GMI_CONFIG_GO BIT(31) #define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30) @@ -54,9 +57,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi) { int err; - err = clk_prepare_enable(gmi->clk); - if (err < 0) { - dev_err(gmi->dev, "failed to enable clock: %d\n", err); + pm_runtime_enable(gmi->dev); + err = pm_runtime_resume_and_get(gmi->dev); + if (err) { + pm_runtime_disable(gmi->dev); return err; } @@ -83,7 +87,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi) writel(config, gmi->base + TEGRA_GMI_CONFIG); reset_control_assert(gmi->rst); - clk_disable_unprepare(gmi->clk); + + pm_runtime_put_sync_suspend(gmi->dev); + pm_runtime_force_suspend(gmi->dev); } static int tegra_gmi_parse_dt(struct tegra_gmi *gmi) @@ -213,6 +219,7 @@ static int tegra_gmi_probe(struct platform_device *pdev) if (!gmi) return -ENOMEM; + platform_set_drvdata(pdev, gmi); gmi->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -232,6 +239,10 @@ static int tegra_gmi_probe(struct platform_device *pdev) return PTR_ERR(gmi->rst); } + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; + err = tegra_gmi_parse_dt(gmi); if (err) return err; @@ -247,8 +258,6 @@ static int tegra_gmi_probe(struct platform_device *pdev) return err; } - platform_set_drvdata(pdev, gmi); - return 0; } @@ -262,6 +271,34 @@ static int tegra_gmi_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev) +{ + struct tegra_gmi *gmi = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(gmi->clk); + if (err < 0) { + dev_err(gmi->dev, "failed to enable clock: %d\n", err); + return err; + } + + return 0; +} + +static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev) +{ + struct tegra_gmi *gmi = dev_get_drvdata(dev); + + clk_disable_unprepare(gmi->clk); + + return 0; +} + +static const struct dev_pm_ops tegra_gmi_pm = { + SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume, + NULL) +}; + static const struct of_device_id tegra_gmi_id_table[] = { { .compatible = "nvidia,tegra20-gmi", }, { .compatible = "nvidia,tegra30-gmi", }, @@ -275,6 +312,7 @@ static struct platform_driver tegra_gmi_driver = { .driver = { .name = "tegra-gmi", .of_match_table = tegra_gmi_id_table, + .pm = &tegra_gmi_pm, }, }; module_platform_driver(tegra_gmi_driver); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index c5b3dc97396a..c91931c94888 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -412,6 +412,7 @@ source "drivers/clk/samsung/Kconfig" source "drivers/clk/sifive/Kconfig" source "drivers/clk/socfpga/Kconfig" source "drivers/clk/sprd/Kconfig" +source "drivers/clk/starfive/Kconfig" source "drivers/clk/sunxi/Kconfig" source "drivers/clk/sunxi-ng/Kconfig" source "drivers/clk/tegra/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index e42312121e51..a9bb2478fbdd 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -109,6 +109,7 @@ obj-y += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_ARCH_STI) += st/ +obj-$(CONFIG_SOC_STARFIVE) += starfive/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ diff --git a/drivers/clk/starfive/Kconfig b/drivers/clk/starfive/Kconfig new file mode 100644 index 000000000000..c0fa9d5e641f --- /dev/null +++ b/drivers/clk/starfive/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +config CLK_STARFIVE_JH7100 + bool "StarFive JH7100 clock support" + depends on SOC_STARFIVE || COMPILE_TEST + default SOC_STARFIVE + help + Say yes here to support the clock controller on the StarFive JH7100 + SoC. diff --git a/drivers/clk/starfive/Makefile b/drivers/clk/starfive/Makefile new file mode 100644 index 000000000000..09759cc73530 --- /dev/null +++ b/drivers/clk/starfive/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +# StarFive Clock +obj-$(CONFIG_CLK_STARFIVE_JH7100) += clk-starfive-jh7100.o diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c new file mode 100644 index 000000000000..25d31afa0f87 --- /dev/null +++ b/drivers/clk/starfive/clk-starfive-jh7100.c @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * StarFive JH7100 Clock Generator Driver + * + * Copyright 2021 Ahmad Fatoum, Pengutronix + * Copyright (C) 2021 Glider bv + * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk> + */ + +#include <linux/bits.h> +#include <linux/clk-provider.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <dt-bindings/clock/starfive-jh7100.h> + +/* external clocks */ +#define JH7100_CLK_OSC_SYS (JH7100_CLK_END + 0) +#define JH7100_CLK_OSC_AUD (JH7100_CLK_END + 1) +#define JH7100_CLK_GMAC_RMII_REF (JH7100_CLK_END + 2) +#define JH7100_CLK_GMAC_GR_MII_RX (JH7100_CLK_END + 3) + +/* register fields */ +#define JH7100_CLK_ENABLE BIT(31) +#define JH7100_CLK_INVERT BIT(30) +#define JH7100_CLK_MUX_MASK GENMASK(27, 24) +#define JH7100_CLK_MUX_SHIFT 24 +#define JH7100_CLK_DIV_MASK GENMASK(23, 0) + +/* clock data */ +#define JH7100_GATE(_idx, _name, _flags, _parent) [_idx] = { \ + .name = _name, \ + .flags = CLK_SET_RATE_PARENT | (_flags), \ + .max = JH7100_CLK_ENABLE, \ + .parents = { [0] = _parent }, \ +} + +#define JH7100__DIV(_idx, _name, _max, _parent) [_idx] = { \ + .name = _name, \ + .flags = 0, \ + .max = _max, \ + .parents = { [0] = _parent }, \ +} + +#define JH7100_GDIV(_idx, _name, _flags, _max, _parent) [_idx] = { \ + .name = _name, \ + .flags = _flags, \ + .max = JH7100_CLK_ENABLE | (_max), \ + .parents = { [0] = _parent }, \ +} + +#define JH7100__MUX(_idx, _name, _nparents, ...) [_idx] = { \ + .name = _name, \ + .flags = 0, \ + .max = ((_nparents) - 1) << JH7100_CLK_MUX_SHIFT, \ + .parents = { __VA_ARGS__ }, \ +} + +#define JH7100_GMUX(_idx, _name, _flags, _nparents, ...) [_idx] = { \ + .name = _name, \ + .flags = _flags, \ + .max = JH7100_CLK_ENABLE | \ + (((_nparents) - 1) << JH7100_CLK_MUX_SHIFT), \ + .parents = { __VA_ARGS__ }, \ +} + +#define JH7100__INV(_idx, _name, _parent) [_idx] = { \ + .name = _name, \ + .flags = CLK_SET_RATE_PARENT, \ + .max = JH7100_CLK_INVERT, \ + .parents = { [0] = _parent }, \ +} + +static const struct { + const char *name; + unsigned long flags; + u32 max; + u8 parents[4]; +} jh7100_clk_data[] __initconst = { + JH7100__MUX(JH7100_CLK_CPUNDBUS_ROOT, "cpundbus_root", 4, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL1_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_DLA_ROOT, "dla_root", 3, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL1_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_DSP_ROOT, "dsp_root", 4, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL1_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_GMACUSB_ROOT, "gmacusb_root", 3, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_PERH0_ROOT, "perh0_root", 2, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL0_OUT), + JH7100__MUX(JH7100_CLK_PERH1_ROOT, "perh1_root", 2, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_VIN_ROOT, "vin_root", 3, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL1_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_VOUT_ROOT, "vout_root", 3, + JH7100_CLK_OSC_AUD, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL2_OUT), + JH7100_GDIV(JH7100_CLK_AUDIO_ROOT, "audio_root", 0, 8, JH7100_CLK_PLL0_OUT), + JH7100__MUX(JH7100_CLK_CDECHIFI4_ROOT, "cdechifi4_root", 3, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL1_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__MUX(JH7100_CLK_CDEC_ROOT, "cdec_root", 3, + JH7100_CLK_OSC_SYS, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL1_OUT), + JH7100__MUX(JH7100_CLK_VOUTBUS_ROOT, "voutbus_root", 3, + JH7100_CLK_OSC_AUD, + JH7100_CLK_PLL0_OUT, + JH7100_CLK_PLL2_OUT), + JH7100__DIV(JH7100_CLK_CPUNBUS_ROOT_DIV, "cpunbus_root_div", 2, JH7100_CLK_CPUNDBUS_ROOT), + JH7100__DIV(JH7100_CLK_DSP_ROOT_DIV, "dsp_root_div", 4, JH7100_CLK_DSP_ROOT), + JH7100__DIV(JH7100_CLK_PERH0_SRC, "perh0_src", 4, JH7100_CLK_PERH0_ROOT), + JH7100__DIV(JH7100_CLK_PERH1_SRC, "perh1_src", 4, JH7100_CLK_PERH1_ROOT), + JH7100_GDIV(JH7100_CLK_PLL0_TESTOUT, "pll0_testout", 0, 31, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_PLL1_TESTOUT, "pll1_testout", 0, 31, JH7100_CLK_DLA_ROOT), + JH7100_GDIV(JH7100_CLK_PLL2_TESTOUT, "pll2_testout", 0, 31, JH7100_CLK_PERH1_SRC), + JH7100__MUX(JH7100_CLK_PLL2_REF, "pll2_refclk", 2, + JH7100_CLK_OSC_SYS, + JH7100_CLK_OSC_AUD), + JH7100__DIV(JH7100_CLK_CPU_CORE, "cpu_core", 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100__DIV(JH7100_CLK_CPU_AXI, "cpu_axi", 8, JH7100_CLK_CPU_CORE), + JH7100__DIV(JH7100_CLK_AHB_BUS, "ahb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100__DIV(JH7100_CLK_APB1_BUS, "apb1_bus", 8, JH7100_CLK_AHB_BUS), + JH7100__DIV(JH7100_CLK_APB2_BUS, "apb2_bus", 8, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_DOM3AHB_BUS, "dom3ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_DOM7AHB_BUS, "dom7ahb_bus", CLK_IS_CRITICAL, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_U74_CORE0, "u74_core0", CLK_IS_CRITICAL, JH7100_CLK_CPU_CORE), + JH7100_GDIV(JH7100_CLK_U74_CORE1, "u74_core1", CLK_IS_CRITICAL, 8, JH7100_CLK_CPU_CORE), + JH7100_GATE(JH7100_CLK_U74_AXI, "u74_axi", CLK_IS_CRITICAL, JH7100_CLK_CPU_AXI), + JH7100_GATE(JH7100_CLK_U74RTC_TOGGLE, "u74rtc_toggle", CLK_IS_CRITICAL, JH7100_CLK_OSC_SYS), + JH7100_GATE(JH7100_CLK_SGDMA2P_AXI, "sgdma2p_axi", 0, JH7100_CLK_CPU_AXI), + JH7100_GATE(JH7100_CLK_DMA2PNOC_AXI, "dma2pnoc_axi", 0, JH7100_CLK_CPU_AXI), + JH7100_GATE(JH7100_CLK_SGDMA2P_AHB, "sgdma2p_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100__DIV(JH7100_CLK_DLA_BUS, "dla_bus", 4, JH7100_CLK_DLA_ROOT), + JH7100_GATE(JH7100_CLK_DLA_AXI, "dla_axi", 0, JH7100_CLK_DLA_BUS), + JH7100_GATE(JH7100_CLK_DLANOC_AXI, "dlanoc_axi", 0, JH7100_CLK_DLA_BUS), + JH7100_GATE(JH7100_CLK_DLA_APB, "dla_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_VP6_CORE, "vp6_core", 0, 4, JH7100_CLK_DSP_ROOT_DIV), + JH7100__DIV(JH7100_CLK_VP6BUS_SRC, "vp6bus_src", 4, JH7100_CLK_DSP_ROOT), + JH7100_GDIV(JH7100_CLK_VP6_AXI, "vp6_axi", 0, 4, JH7100_CLK_VP6BUS_SRC), + JH7100__DIV(JH7100_CLK_VCDECBUS_SRC, "vcdecbus_src", 4, JH7100_CLK_CDECHIFI4_ROOT), + JH7100__DIV(JH7100_CLK_VDEC_BUS, "vdec_bus", 8, JH7100_CLK_VCDECBUS_SRC), + JH7100_GATE(JH7100_CLK_VDEC_AXI, "vdec_axi", 0, JH7100_CLK_VDEC_BUS), + JH7100_GATE(JH7100_CLK_VDECBRG_MAIN, "vdecbrg_mainclk", 0, JH7100_CLK_VDEC_BUS), + JH7100_GDIV(JH7100_CLK_VDEC_BCLK, "vdec_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC), + JH7100_GDIV(JH7100_CLK_VDEC_CCLK, "vdec_cclk", 0, 8, JH7100_CLK_CDEC_ROOT), + JH7100_GATE(JH7100_CLK_VDEC_APB, "vdec_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_JPEG_AXI, "jpeg_axi", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100_GDIV(JH7100_CLK_JPEG_CCLK, "jpeg_cclk", 0, 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100_GATE(JH7100_CLK_JPEG_APB, "jpeg_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_GC300_2X, "gc300_2x", 0, 8, JH7100_CLK_CDECHIFI4_ROOT), + JH7100_GATE(JH7100_CLK_GC300_AHB, "gc300_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100__DIV(JH7100_CLK_JPCGC300_AXIBUS, "jpcgc300_axibus", 8, JH7100_CLK_VCDECBUS_SRC), + JH7100_GATE(JH7100_CLK_GC300_AXI, "gc300_axi", 0, JH7100_CLK_JPCGC300_AXIBUS), + JH7100_GATE(JH7100_CLK_JPCGC300_MAIN, "jpcgc300_mainclk", 0, JH7100_CLK_JPCGC300_AXIBUS), + JH7100__DIV(JH7100_CLK_VENC_BUS, "venc_bus", 8, JH7100_CLK_VCDECBUS_SRC), + JH7100_GATE(JH7100_CLK_VENC_AXI, "venc_axi", 0, JH7100_CLK_VENC_BUS), + JH7100_GATE(JH7100_CLK_VENCBRG_MAIN, "vencbrg_mainclk", 0, JH7100_CLK_VENC_BUS), + JH7100_GDIV(JH7100_CLK_VENC_BCLK, "venc_bclk", 0, 8, JH7100_CLK_VCDECBUS_SRC), + JH7100_GDIV(JH7100_CLK_VENC_CCLK, "venc_cclk", 0, 8, JH7100_CLK_CDEC_ROOT), + JH7100_GATE(JH7100_CLK_VENC_APB, "venc_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_DDRPLL_DIV2, "ddrpll_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_PLL1_OUT), + JH7100_GDIV(JH7100_CLK_DDRPLL_DIV4, "ddrpll_div4", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV2), + JH7100_GDIV(JH7100_CLK_DDRPLL_DIV8, "ddrpll_div8", CLK_IS_CRITICAL, 2, JH7100_CLK_DDRPLL_DIV4), + JH7100_GDIV(JH7100_CLK_DDROSC_DIV2, "ddrosc_div2", CLK_IS_CRITICAL, 2, JH7100_CLK_OSC_SYS), + JH7100_GMUX(JH7100_CLK_DDRC0, "ddrc0", CLK_IS_CRITICAL, 4, + JH7100_CLK_DDROSC_DIV2, + JH7100_CLK_DDRPLL_DIV2, + JH7100_CLK_DDRPLL_DIV4, + JH7100_CLK_DDRPLL_DIV8), + JH7100_GMUX(JH7100_CLK_DDRC1, "ddrc1", CLK_IS_CRITICAL, 4, + JH7100_CLK_DDROSC_DIV2, + JH7100_CLK_DDRPLL_DIV2, + JH7100_CLK_DDRPLL_DIV4, + JH7100_CLK_DDRPLL_DIV8), + JH7100_GATE(JH7100_CLK_DDRPHY_APB, "ddrphy_apb", 0, JH7100_CLK_APB1_BUS), + JH7100__DIV(JH7100_CLK_NOC_ROB, "noc_rob", 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100__DIV(JH7100_CLK_NOC_COG, "noc_cog", 8, JH7100_CLK_DLA_ROOT), + JH7100_GATE(JH7100_CLK_NNE_AHB, "nne_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100__DIV(JH7100_CLK_NNEBUS_SRC1, "nnebus_src1", 4, JH7100_CLK_DSP_ROOT), + JH7100__MUX(JH7100_CLK_NNE_BUS, "nne_bus", 2, + JH7100_CLK_CPU_AXI, + JH7100_CLK_NNEBUS_SRC1), + JH7100_GATE(JH7100_CLK_NNE_AXI, "nne_axi", 0, JH7100_CLK_NNE_BUS), + JH7100_GATE(JH7100_CLK_NNENOC_AXI, "nnenoc_axi", 0, JH7100_CLK_NNE_BUS), + JH7100_GATE(JH7100_CLK_DLASLV_AXI, "dlaslv_axi", 0, JH7100_CLK_NNE_BUS), + JH7100_GATE(JH7100_CLK_DSPX2C_AXI, "dspx2c_axi", CLK_IS_CRITICAL, JH7100_CLK_NNE_BUS), + JH7100__DIV(JH7100_CLK_HIFI4_SRC, "hifi4_src", 4, JH7100_CLK_CDECHIFI4_ROOT), + JH7100__DIV(JH7100_CLK_HIFI4_COREFREE, "hifi4_corefree", 8, JH7100_CLK_HIFI4_SRC), + JH7100_GATE(JH7100_CLK_HIFI4_CORE, "hifi4_core", 0, JH7100_CLK_HIFI4_COREFREE), + JH7100__DIV(JH7100_CLK_HIFI4_BUS, "hifi4_bus", 8, JH7100_CLK_HIFI4_COREFREE), + JH7100_GATE(JH7100_CLK_HIFI4_AXI, "hifi4_axi", 0, JH7100_CLK_HIFI4_BUS), + JH7100_GATE(JH7100_CLK_HIFI4NOC_AXI, "hifi4noc_axi", 0, JH7100_CLK_HIFI4_BUS), + JH7100__DIV(JH7100_CLK_SGDMA1P_BUS, "sgdma1p_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100_GATE(JH7100_CLK_SGDMA1P_AXI, "sgdma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS), + JH7100_GATE(JH7100_CLK_DMA1P_AXI, "dma1p_axi", 0, JH7100_CLK_SGDMA1P_BUS), + JH7100_GDIV(JH7100_CLK_X2C_AXI, "x2c_axi", CLK_IS_CRITICAL, 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100__DIV(JH7100_CLK_USB_BUS, "usb_bus", 8, JH7100_CLK_CPUNBUS_ROOT_DIV), + JH7100_GATE(JH7100_CLK_USB_AXI, "usb_axi", 0, JH7100_CLK_USB_BUS), + JH7100_GATE(JH7100_CLK_USBNOC_AXI, "usbnoc_axi", 0, JH7100_CLK_USB_BUS), + JH7100__DIV(JH7100_CLK_USBPHY_ROOTDIV, "usbphy_rootdiv", 4, JH7100_CLK_GMACUSB_ROOT), + JH7100_GDIV(JH7100_CLK_USBPHY_125M, "usbphy_125m", 0, 8, JH7100_CLK_USBPHY_ROOTDIV), + JH7100_GDIV(JH7100_CLK_USBPHY_PLLDIV25M, "usbphy_plldiv25m", 0, 32, JH7100_CLK_USBPHY_ROOTDIV), + JH7100__MUX(JH7100_CLK_USBPHY_25M, "usbphy_25m", 2, + JH7100_CLK_OSC_SYS, + JH7100_CLK_USBPHY_PLLDIV25M), + JH7100__DIV(JH7100_CLK_AUDIO_DIV, "audio_div", 131072, JH7100_CLK_AUDIO_ROOT), + JH7100_GATE(JH7100_CLK_AUDIO_SRC, "audio_src", 0, JH7100_CLK_AUDIO_DIV), + JH7100_GATE(JH7100_CLK_AUDIO_12288, "audio_12288", 0, JH7100_CLK_OSC_AUD), + JH7100_GDIV(JH7100_CLK_VIN_SRC, "vin_src", 0, 4, JH7100_CLK_VIN_ROOT), + JH7100__DIV(JH7100_CLK_ISP0_BUS, "isp0_bus", 8, JH7100_CLK_VIN_SRC), + JH7100_GATE(JH7100_CLK_ISP0_AXI, "isp0_axi", 0, JH7100_CLK_ISP0_BUS), + JH7100_GATE(JH7100_CLK_ISP0NOC_AXI, "isp0noc_axi", 0, JH7100_CLK_ISP0_BUS), + JH7100_GATE(JH7100_CLK_ISPSLV_AXI, "ispslv_axi", 0, JH7100_CLK_ISP0_BUS), + JH7100__DIV(JH7100_CLK_ISP1_BUS, "isp1_bus", 8, JH7100_CLK_VIN_SRC), + JH7100_GATE(JH7100_CLK_ISP1_AXI, "isp1_axi", 0, JH7100_CLK_ISP1_BUS), + JH7100_GATE(JH7100_CLK_ISP1NOC_AXI, "isp1noc_axi", 0, JH7100_CLK_ISP1_BUS), + JH7100__DIV(JH7100_CLK_VIN_BUS, "vin_bus", 8, JH7100_CLK_VIN_SRC), + JH7100_GATE(JH7100_CLK_VIN_AXI, "vin_axi", 0, JH7100_CLK_VIN_BUS), + JH7100_GATE(JH7100_CLK_VINNOC_AXI, "vinnoc_axi", 0, JH7100_CLK_VIN_BUS), + JH7100_GDIV(JH7100_CLK_VOUT_SRC, "vout_src", 0, 4, JH7100_CLK_VOUT_ROOT), + JH7100__DIV(JH7100_CLK_DISPBUS_SRC, "dispbus_src", 4, JH7100_CLK_VOUTBUS_ROOT), + JH7100__DIV(JH7100_CLK_DISP_BUS, "disp_bus", 4, JH7100_CLK_DISPBUS_SRC), + JH7100_GATE(JH7100_CLK_DISP_AXI, "disp_axi", 0, JH7100_CLK_DISP_BUS), + JH7100_GATE(JH7100_CLK_DISPNOC_AXI, "dispnoc_axi", 0, JH7100_CLK_DISP_BUS), + JH7100_GATE(JH7100_CLK_SDIO0_AHB, "sdio0_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GDIV(JH7100_CLK_SDIO0_CCLKINT, "sdio0_cclkint", 0, 24, JH7100_CLK_PERH0_SRC), + JH7100__INV(JH7100_CLK_SDIO0_CCLKINT_INV, "sdio0_cclkint_inv", JH7100_CLK_SDIO0_CCLKINT), + JH7100_GATE(JH7100_CLK_SDIO1_AHB, "sdio1_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GDIV(JH7100_CLK_SDIO1_CCLKINT, "sdio1_cclkint", 0, 24, JH7100_CLK_PERH1_SRC), + JH7100__INV(JH7100_CLK_SDIO1_CCLKINT_INV, "sdio1_cclkint_inv", JH7100_CLK_SDIO1_CCLKINT), + JH7100_GATE(JH7100_CLK_GMAC_AHB, "gmac_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100__DIV(JH7100_CLK_GMAC_ROOT_DIV, "gmac_root_div", 8, JH7100_CLK_GMACUSB_ROOT), + JH7100_GDIV(JH7100_CLK_GMAC_PTP_REF, "gmac_ptp_refclk", 0, 31, JH7100_CLK_GMAC_ROOT_DIV), + JH7100_GDIV(JH7100_CLK_GMAC_GTX, "gmac_gtxclk", 0, 255, JH7100_CLK_GMAC_ROOT_DIV), + JH7100_GDIV(JH7100_CLK_GMAC_RMII_TX, "gmac_rmii_txclk", 0, 8, JH7100_CLK_GMAC_RMII_REF), + JH7100_GDIV(JH7100_CLK_GMAC_RMII_RX, "gmac_rmii_rxclk", 0, 8, JH7100_CLK_GMAC_RMII_REF), + JH7100__MUX(JH7100_CLK_GMAC_TX, "gmac_tx", 3, + JH7100_CLK_GMAC_GTX, + JH7100_CLK_GMAC_TX_INV, + JH7100_CLK_GMAC_RMII_TX), + JH7100__INV(JH7100_CLK_GMAC_TX_INV, "gmac_tx_inv", JH7100_CLK_GMAC_TX), + JH7100__MUX(JH7100_CLK_GMAC_RX_PRE, "gmac_rx_pre", 2, + JH7100_CLK_GMAC_GR_MII_RX, + JH7100_CLK_GMAC_RMII_RX), + JH7100__INV(JH7100_CLK_GMAC_RX_INV, "gmac_rx_inv", JH7100_CLK_GMAC_RX_PRE), + JH7100_GATE(JH7100_CLK_GMAC_RMII, "gmac_rmii", 0, JH7100_CLK_GMAC_RMII_REF), + JH7100_GDIV(JH7100_CLK_GMAC_TOPHYREF, "gmac_tophyref", 0, 127, JH7100_CLK_GMAC_ROOT_DIV), + JH7100_GATE(JH7100_CLK_SPI2AHB_AHB, "spi2ahb_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GDIV(JH7100_CLK_SPI2AHB_CORE, "spi2ahb_core", 0, 31, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_EZMASTER_AHB, "ezmaster_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_E24_AHB, "e24_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_E24RTC_TOGGLE, "e24rtc_toggle", 0, JH7100_CLK_OSC_SYS), + JH7100_GATE(JH7100_CLK_QSPI_AHB, "qspi_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_QSPI_APB, "qspi_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_QSPI_REF, "qspi_refclk", 0, 31, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_SEC_AHB, "sec_ahb", 0, JH7100_CLK_AHB_BUS), + JH7100_GATE(JH7100_CLK_AES, "aes_clk", 0, JH7100_CLK_SEC_AHB), + JH7100_GATE(JH7100_CLK_SHA, "sha_clk", 0, JH7100_CLK_SEC_AHB), + JH7100_GATE(JH7100_CLK_PKA, "pka_clk", 0, JH7100_CLK_SEC_AHB), + JH7100_GATE(JH7100_CLK_TRNG_APB, "trng_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GATE(JH7100_CLK_OTP_APB, "otp_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GATE(JH7100_CLK_UART0_APB, "uart0_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_UART0_CORE, "uart0_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_UART1_APB, "uart1_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_UART1_CORE, "uart1_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_SPI0_APB, "spi0_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_SPI0_CORE, "spi0_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_SPI1_APB, "spi1_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_SPI1_CORE, "spi1_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_I2C0_APB, "i2c0_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_I2C0_CORE, "i2c0_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_I2C1_APB, "i2c1_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GDIV(JH7100_CLK_I2C1_CORE, "i2c1_core", 0, 63, JH7100_CLK_PERH1_SRC), + JH7100_GATE(JH7100_CLK_GPIO_APB, "gpio_apb", 0, JH7100_CLK_APB1_BUS), + JH7100_GATE(JH7100_CLK_UART2_APB, "uart2_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_UART2_CORE, "uart2_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_UART3_APB, "uart3_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_UART3_CORE, "uart3_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_SPI2_APB, "spi2_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_SPI2_CORE, "spi2_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_SPI3_APB, "spi3_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_SPI3_CORE, "spi3_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_I2C2_APB, "i2c2_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_I2C2_CORE, "i2c2_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_I2C3_APB, "i2c3_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_I2C3_CORE, "i2c3_core", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_WDTIMER_APB, "wdtimer_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_WDT_CORE, "wdt_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER0_CORE, "timer0_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER1_CORE, "timer1_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER2_CORE, "timer2_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER3_CORE, "timer3_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER4_CORE, "timer4_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER5_CORE, "timer5_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GDIV(JH7100_CLK_TIMER6_CORE, "timer6_coreclk", 0, 63, JH7100_CLK_PERH0_SRC), + JH7100_GATE(JH7100_CLK_VP6INTC_APB, "vp6intc_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GATE(JH7100_CLK_PWM_APB, "pwm_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GATE(JH7100_CLK_MSI_APB, "msi_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GATE(JH7100_CLK_TEMP_APB, "temp_apb", 0, JH7100_CLK_APB2_BUS), + JH7100_GDIV(JH7100_CLK_TEMP_SENSE, "temp_sense", 0, 31, JH7100_CLK_OSC_SYS), + JH7100_GATE(JH7100_CLK_SYSERR_APB, "syserr_apb", 0, JH7100_CLK_APB2_BUS), +}; + +struct jh7100_clk { + struct clk_hw hw; + unsigned int idx; + unsigned int max_div; +}; + +struct jh7100_clk_priv { + /* protect clk enable and set rate/parent from happening at the same time */ + spinlock_t rmw_lock; + struct device *dev; + void __iomem *base; + struct clk_hw *pll[3]; + struct jh7100_clk reg[JH7100_CLK_PLL0_OUT]; +}; + +static struct jh7100_clk *jh7100_clk_from(struct clk_hw *hw) +{ + return container_of(hw, struct jh7100_clk, hw); +} + +static struct jh7100_clk_priv *jh7100_priv_from(struct jh7100_clk *clk) +{ + return container_of(clk, struct jh7100_clk_priv, reg[clk->idx]); +} + +static u32 jh7100_clk_reg_get(struct jh7100_clk *clk) +{ + struct jh7100_clk_priv *priv = jh7100_priv_from(clk); + void __iomem *reg = priv->base + 4 * clk->idx; + + return readl_relaxed(reg); +} + +static void jh7100_clk_reg_rmw(struct jh7100_clk *clk, u32 mask, u32 value) +{ + struct jh7100_clk_priv *priv = jh7100_priv_from(clk); + void __iomem *reg = priv->base + 4 * clk->idx; + unsigned long flags; + + spin_lock_irqsave(&priv->rmw_lock, flags); + value |= readl_relaxed(reg) & ~mask; + writel_relaxed(value, reg); + spin_unlock_irqrestore(&priv->rmw_lock, flags); +} + +static int jh7100_clk_enable(struct clk_hw *hw) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + + jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, JH7100_CLK_ENABLE); + return 0; +} + +static void jh7100_clk_disable(struct clk_hw *hw) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + + jh7100_clk_reg_rmw(clk, JH7100_CLK_ENABLE, 0); +} + +static int jh7100_clk_is_enabled(struct clk_hw *hw) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + + return !!(jh7100_clk_reg_get(clk) & JH7100_CLK_ENABLE); +} + +static unsigned long jh7100_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + u32 div = jh7100_clk_reg_get(clk) & JH7100_CLK_DIV_MASK; + + return div ? parent_rate / div : 0; +} + +static unsigned long jh7100_clk_bestdiv(struct jh7100_clk *clk, + unsigned long rate, unsigned long parent) +{ + unsigned long max = clk->max_div; + unsigned long div = DIV_ROUND_UP(parent, rate); + + return min(div, max); +} + +static int jh7100_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + unsigned long parent = req->best_parent_rate; + unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate); + unsigned long div = jh7100_clk_bestdiv(clk, rate, parent); + unsigned long result = parent / div; + + /* + * we want the result clamped by min_rate and max_rate if possible: + * case 1: div hits the max divider value, which means it's less than + * parent / rate, so the result is greater than rate and min_rate in + * particular. we can't do anything about result > max_rate because the + * divider doesn't go any further. + * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is + * always lower or equal to rate and max_rate. however the result may + * turn out lower than min_rate, but then the next higher rate is fine: + * div - 1 = ceil(parent / rate) - 1 < parent / rate + * and thus + * min_rate <= rate < parent / (div - 1) + */ + if (result < req->min_rate && div > 1) + result = parent / (div - 1); + + req->rate = result; + return 0; +} + +static int jh7100_clk_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + unsigned long div = jh7100_clk_bestdiv(clk, rate, parent_rate); + + jh7100_clk_reg_rmw(clk, JH7100_CLK_DIV_MASK, div); + return 0; +} + +static u8 jh7100_clk_get_parent(struct clk_hw *hw) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + u32 value = jh7100_clk_reg_get(clk); + + return (value & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT; +} + +static int jh7100_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + u32 value = (u32)index << JH7100_CLK_MUX_SHIFT; + + jh7100_clk_reg_rmw(clk, JH7100_CLK_MUX_MASK, value); + return 0; +} + +static int jh7100_clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + return clk_mux_determine_rate_flags(hw, req, 0); +} + +static int jh7100_clk_get_phase(struct clk_hw *hw) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + u32 value = jh7100_clk_reg_get(clk); + + return (value & JH7100_CLK_INVERT) ? 180 : 0; +} + +static int jh7100_clk_set_phase(struct clk_hw *hw, int degrees) +{ + struct jh7100_clk *clk = jh7100_clk_from(hw); + u32 value; + + if (degrees == 0) + value = 0; + else if (degrees == 180) + value = JH7100_CLK_INVERT; + else + return -EINVAL; + + jh7100_clk_reg_rmw(clk, JH7100_CLK_INVERT, value); + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static void jh7100_clk_debug_init(struct clk_hw *hw, struct dentry *dentry) +{ + static const struct debugfs_reg32 jh7100_clk_reg = { + .name = "CTRL", + .offset = 0, + }; + struct jh7100_clk *clk = jh7100_clk_from(hw); + struct jh7100_clk_priv *priv = jh7100_priv_from(clk); + struct debugfs_regset32 *regset; + + regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return; + + regset->regs = &jh7100_clk_reg; + regset->nregs = 1; + regset->base = priv->base + 4 * clk->idx; + + debugfs_create_regset32("registers", 0400, dentry, regset); +} +#else +#define jh7100_clk_debug_init NULL +#endif + +static const struct clk_ops jh7100_clk_gate_ops = { + .enable = jh7100_clk_enable, + .disable = jh7100_clk_disable, + .is_enabled = jh7100_clk_is_enabled, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops jh7100_clk_div_ops = { + .recalc_rate = jh7100_clk_recalc_rate, + .determine_rate = jh7100_clk_determine_rate, + .set_rate = jh7100_clk_set_rate, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops jh7100_clk_gdiv_ops = { + .enable = jh7100_clk_enable, + .disable = jh7100_clk_disable, + .is_enabled = jh7100_clk_is_enabled, + .recalc_rate = jh7100_clk_recalc_rate, + .determine_rate = jh7100_clk_determine_rate, + .set_rate = jh7100_clk_set_rate, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops jh7100_clk_mux_ops = { + .determine_rate = jh7100_clk_mux_determine_rate, + .set_parent = jh7100_clk_set_parent, + .get_parent = jh7100_clk_get_parent, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops jh7100_clk_gmux_ops = { + .enable = jh7100_clk_enable, + .disable = jh7100_clk_disable, + .is_enabled = jh7100_clk_is_enabled, + .determine_rate = jh7100_clk_mux_determine_rate, + .set_parent = jh7100_clk_set_parent, + .get_parent = jh7100_clk_get_parent, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops jh7100_clk_inv_ops = { + .get_phase = jh7100_clk_get_phase, + .set_phase = jh7100_clk_set_phase, + .debug_init = jh7100_clk_debug_init, +}; + +static const struct clk_ops *__init jh7100_clk_ops(u32 max) +{ + if (max & JH7100_CLK_DIV_MASK) { + if (max & JH7100_CLK_ENABLE) + return &jh7100_clk_gdiv_ops; + return &jh7100_clk_div_ops; + } + + if (max & JH7100_CLK_MUX_MASK) { + if (max & JH7100_CLK_ENABLE) + return &jh7100_clk_gmux_ops; + return &jh7100_clk_mux_ops; + } + + if (max & JH7100_CLK_ENABLE) + return &jh7100_clk_gate_ops; + + return &jh7100_clk_inv_ops; +} + +static struct clk_hw *jh7100_clk_get(struct of_phandle_args *clkspec, void *data) +{ + struct jh7100_clk_priv *priv = data; + unsigned int idx = clkspec->args[0]; + + if (idx < JH7100_CLK_PLL0_OUT) + return &priv->reg[idx].hw; + + if (idx < JH7100_CLK_END) + return priv->pll[idx - JH7100_CLK_PLL0_OUT]; + + return ERR_PTR(-EINVAL); +} + +static int __init clk_starfive_jh7100_probe(struct platform_device *pdev) +{ + struct jh7100_clk_priv *priv; + unsigned int idx; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + spin_lock_init(&priv->rmw_lock); + priv->dev = &pdev->dev; + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->pll[0] = devm_clk_hw_register_fixed_factor(priv->dev, "pll0_out", + "osc_sys", 0, 40, 1); + if (IS_ERR(priv->pll[0])) + return PTR_ERR(priv->pll[0]); + + priv->pll[1] = devm_clk_hw_register_fixed_factor(priv->dev, "pll1_out", + "osc_sys", 0, 64, 1); + if (IS_ERR(priv->pll[1])) + return PTR_ERR(priv->pll[1]); + + priv->pll[2] = devm_clk_hw_register_fixed_factor(priv->dev, "pll2_out", + "pll2_refclk", 0, 55, 1); + if (IS_ERR(priv->pll[2])) + return PTR_ERR(priv->pll[2]); + + for (idx = 0; idx < JH7100_CLK_PLL0_OUT; idx++) { + u32 max = jh7100_clk_data[idx].max; + struct clk_parent_data parents[4] = {}; + struct clk_init_data init = { + .name = jh7100_clk_data[idx].name, + .ops = jh7100_clk_ops(max), + .parent_data = parents, + .num_parents = ((max & JH7100_CLK_MUX_MASK) >> JH7100_CLK_MUX_SHIFT) + 1, + .flags = jh7100_clk_data[idx].flags, + }; + struct jh7100_clk *clk = &priv->reg[idx]; + unsigned int i; + + for (i = 0; i < init.num_parents; i++) { + unsigned int pidx = jh7100_clk_data[idx].parents[i]; + + if (pidx < JH7100_CLK_PLL0_OUT) + parents[i].hw = &priv->reg[pidx].hw; + else if (pidx < JH7100_CLK_END) + parents[i].hw = priv->pll[pidx - JH7100_CLK_PLL0_OUT]; + else if (pidx == JH7100_CLK_OSC_SYS) + parents[i].fw_name = "osc_sys"; + else if (pidx == JH7100_CLK_OSC_AUD) + parents[i].fw_name = "osc_aud"; + else if (pidx == JH7100_CLK_GMAC_RMII_REF) + parents[i].fw_name = "gmac_rmii_ref"; + else if (pidx == JH7100_CLK_GMAC_GR_MII_RX) + parents[i].fw_name = "gmac_gr_mii_rxclk"; + } + + clk->hw.init = &init; + clk->idx = idx; + clk->max_div = max & JH7100_CLK_DIV_MASK; + + ret = devm_clk_hw_register(priv->dev, &clk->hw); + if (ret) + return ret; + } + + return devm_of_clk_add_hw_provider(priv->dev, jh7100_clk_get, priv); +} + +static const struct of_device_id clk_starfive_jh7100_match[] = { + { .compatible = "starfive,jh7100-clkgen" }, + { /* sentinel */ } +}; + +static struct platform_driver clk_starfive_jh7100_driver = { + .driver = { + .name = "clk-starfive-jh7100", + .of_match_table = clk_starfive_jh7100_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(clk_starfive_jh7100_driver, clk_starfive_jh7100_probe); diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 54d1f96f4b68..a8c11c0b4e06 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h @@ -51,8 +51,6 @@ #define CLK_USB_OHCI1_12M 92 -#define CLK_DRAM 94 - /* All the DRAM gates are exported */ /* And the DSI and GPU module clock is exported */ diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h index d8c38447e11b..e13f3c4b57d0 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.h +++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.h @@ -42,8 +42,6 @@ /* The first bunch of module clocks are exported */ -#define CLK_DRAM 96 - /* All the DRAM gates are exported */ /* Some more module clocks are exported */ diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 51690e73153a..4f705674f94f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -213,6 +213,18 @@ config CRYPTO_AES_S390 key sizes and XTS mode is hardware accelerated for 256 and 512 bit keys. +config CRYPTO_CHACHA_S390 + tristate "ChaCha20 stream cipher" + depends on S390 + select CRYPTO_ALGAPI + select CRYPTO_SKCIPHER + select CRYPTO_CHACHA20 + help + This is the s390 SIMD implementation of the ChaCha20 stream + cipher (RFC 7539). + + It is available as of z13. + config S390_PRNG tristate "Pseudo random number generator device driver" depends on S390 diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 2fc4c3f91fd5..58ab63642e72 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -484,7 +484,7 @@ config EDAC_ARMADA_XP config EDAC_SYNOPSYS tristate "Synopsys DDR Memory Controller" - depends on ARCH_ZYNQ || ARCH_ZYNQMP + depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA help Support for error detection and correction on the Synopsys DDR memory controller. diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4fce75013674..fba609ada0e6 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -988,6 +988,281 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) return csrow; } +/* Protect the PCI config register pairs used for DF indirect access. */ +static DEFINE_MUTEX(df_indirect_mutex); + +/* + * Data Fabric Indirect Access uses FICAA/FICAD. + * + * Fabric Indirect Configuration Access Address (FICAA): Constructed based + * on the device's Instance Id and the PCI function and register offset of + * the desired register. + * + * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO + * and FICAD HI registers but so far we only need the LO register. + * + * Use Instance Id 0xFF to indicate a broadcast read. + */ +#define DF_BROADCAST 0xFF +static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) +{ + struct pci_dev *F4; + u32 ficaa; + int err = -ENODEV; + + if (node >= amd_nb_num()) + goto out; + + F4 = node_to_amd_nb(node)->link; + if (!F4) + goto out; + + ficaa = (instance_id == DF_BROADCAST) ? 0 : 1; + ficaa |= reg & 0x3FC; + ficaa |= (func & 0x7) << 11; + ficaa |= instance_id << 16; + + mutex_lock(&df_indirect_mutex); + + err = pci_write_config_dword(F4, 0x5C, ficaa); + if (err) { + pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa); + goto out_unlock; + } + + err = pci_read_config_dword(F4, 0x98, lo); + if (err) + pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa); + +out_unlock: + mutex_unlock(&df_indirect_mutex); + +out: + return err; +} + +static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo) +{ + return __df_indirect_read(node, func, reg, instance_id, lo); +} + +static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo) +{ + return __df_indirect_read(node, func, reg, DF_BROADCAST, lo); +} + +struct addr_ctx { + u64 ret_addr; + u32 tmp; + u16 nid; + u8 inst_id; +}; + +static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) +{ + u64 dram_base_addr, dram_limit_addr, dram_hole_base; + + u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask; + u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets; + u8 intlv_addr_sel, intlv_addr_bit; + u8 num_intlv_bits, hashed_bit; + u8 lgcy_mmio_hole_en, base = 0; + u8 cs_mask, cs_id = 0; + bool hash_enabled = false; + + struct addr_ctx ctx; + + memset(&ctx, 0, sizeof(ctx)); + + /* Start from the normalized address */ + ctx.ret_addr = norm_addr; + + ctx.nid = nid; + ctx.inst_id = umc; + + /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ + if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + goto out_err; + + /* Remove HiAddrOffset from normalized address, if enabled: */ + if (ctx.tmp & BIT(0)) { + u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8; + + if (norm_addr >= hi_addr_offset) { + ctx.ret_addr -= hi_addr_offset; + base = 1; + } + } + + /* Read D18F0x110 (DramBaseAddress). */ + if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp)) + goto out_err; + + /* Check if address range is valid. */ + if (!(ctx.tmp & BIT(0))) { + pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n", + __func__, ctx.tmp); + goto out_err; + } + + lgcy_mmio_hole_en = ctx.tmp & BIT(1); + intlv_num_chan = (ctx.tmp >> 4) & 0xF; + intlv_addr_sel = (ctx.tmp >> 8) & 0x7; + dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16; + + /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */ + if (intlv_addr_sel > 3) { + pr_err("%s: Invalid interleave address select %d.\n", + __func__, intlv_addr_sel); + goto out_err; + } + + /* Read D18F0x114 (DramLimitAddress). */ + if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) + goto out_err; + + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + intlv_num_dies = (ctx.tmp >> 10) & 0x3; + dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); + + intlv_addr_bit = intlv_addr_sel + 8; + + /* Re-use intlv_num_chan by setting it equal to log2(#channels) */ + switch (intlv_num_chan) { + case 0: intlv_num_chan = 0; break; + case 1: intlv_num_chan = 1; break; + case 3: intlv_num_chan = 2; break; + case 5: intlv_num_chan = 3; break; + case 7: intlv_num_chan = 4; break; + + case 8: intlv_num_chan = 1; + hash_enabled = true; + break; + default: + pr_err("%s: Invalid number of interleaved channels %d.\n", + __func__, intlv_num_chan); + goto out_err; + } + + num_intlv_bits = intlv_num_chan; + + if (intlv_num_dies > 2) { + pr_err("%s: Invalid number of interleaved nodes/dies %d.\n", + __func__, intlv_num_dies); + goto out_err; + } + + num_intlv_bits += intlv_num_dies; + + /* Add a bit if sockets are interleaved. */ + num_intlv_bits += intlv_num_sockets; + + /* Assert num_intlv_bits <= 4 */ + if (num_intlv_bits > 4) { + pr_err("%s: Invalid interleave bits %d.\n", + __func__, num_intlv_bits); + goto out_err; + } + + if (num_intlv_bits > 0) { + u64 temp_addr_x, temp_addr_i, temp_addr_y; + u8 die_id_bit, sock_id_bit, cs_fabric_id; + + /* + * Read FabricBlockInstanceInformation3_CS[BlockFabricID]. + * This is the fabric id for this coherent slave. Use + * umc/channel# as instance id of the coherent slave + * for FICAA. + */ + if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) + goto out_err; + + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + die_id_bit = 0; + + /* If interleaved over more than 1 channel: */ + if (intlv_num_chan) { + die_id_bit = intlv_num_chan; + cs_mask = (1 << die_id_bit) - 1; + cs_id = cs_fabric_id & cs_mask; + } + + sock_id_bit = die_id_bit; + + /* Read D18F1x208 (SystemFabricIdMask). */ + if (intlv_num_dies || intlv_num_sockets) + if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp)) + goto out_err; + + /* If interleaved over more than 1 die. */ + if (intlv_num_dies) { + sock_id_bit = die_id_bit + intlv_num_dies; + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + + cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; + } + + /* If interleaved over more than 1 socket. */ + if (intlv_num_sockets) { + socket_id_shift = (ctx.tmp >> 28) & 0xF; + socket_id_mask = (ctx.tmp >> 16) & 0xFF; + + cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; + } + + /* + * The pre-interleaved address consists of XXXXXXIIIYYYYY + * where III is the ID for this CS, and XXXXXXYYYYY are the + * address bits from the post-interleaved address. + * "num_intlv_bits" has been calculated to tell us how many "I" + * bits there are. "intlv_addr_bit" tells us how many "Y" bits + * there are (where "I" starts). + */ + temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0); + temp_addr_i = (cs_id << intlv_addr_bit); + temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits; + ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y; + } + + /* Add dram base address */ + ctx.ret_addr += dram_base_addr; + + /* If legacy MMIO hole enabled */ + if (lgcy_mmio_hole_en) { + if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp)) + goto out_err; + + dram_hole_base = ctx.tmp & GENMASK(31, 24); + if (ctx.ret_addr >= dram_hole_base) + ctx.ret_addr += (BIT_ULL(32) - dram_hole_base); + } + + if (hash_enabled) { + /* Save some parentheses and grab ls-bit at the end. */ + hashed_bit = (ctx.ret_addr >> 12) ^ + (ctx.ret_addr >> 18) ^ + (ctx.ret_addr >> 21) ^ + (ctx.ret_addr >> 30) ^ + cs_id; + + hashed_bit &= BIT(0); + + if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0))) + ctx.ret_addr ^= BIT(intlv_addr_bit); + } + + /* Is calculated system address is above DRAM limit address? */ + if (ctx.ret_addr > dram_limit_addr) + goto out_err; + + *sys_addr = ctx.ret_addr; + return 0; + +out_err: + return -EINVAL; +} + static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); /* @@ -2650,6 +2925,26 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_M10H_CPUS] = { + .ctl_name = "F19h_M10h", + .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6, + .max_mcs = 12, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, + [F19_M50H_CPUS] = { + .ctl_name = "F19h_M50h", + .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6, + .max_mcs = 2, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -3687,11 +3982,25 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) break; case 0x19: - if (pvt->model >= 0x20 && pvt->model <= 0x2f) { + if (pvt->model >= 0x10 && pvt->model <= 0x1f) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + break; + } else if (pvt->model >= 0x20 && pvt->model <= 0x2f) { fam_type = &family_types[F17_M70H_CPUS]; pvt->ops = &family_types[F17_M70H_CPUS].ops; fam_type->ctl_name = "F19h_M20h"; break; + } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) { + fam_type = &family_types[F19_M50H_CPUS]; + pvt->ops = &family_types[F19_M50H_CPUS].ops; + fam_type->ctl_name = "F19h_M50h"; + break; + } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) { + fam_type = &family_types[F19_M10H_CPUS]; + pvt->ops = &family_types[F19_M10H_CPUS].ops; + fam_type->ctl_name = "F19h_MA0h"; + break; } fam_type = &family_types[F19_CPUS]; pvt->ops = &family_types[F19_CPUS].ops; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 85aa820bc165..352bda9803f6 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -96,7 +96,7 @@ /* Hardware limit on ChipSelect rows per MC and processors per system */ #define NUM_CHIPSELECTS 8 #define DRAM_RANGES 8 -#define NUM_CONTROLLERS 8 +#define NUM_CONTROLLERS 12 #define ON true #define OFF false @@ -126,6 +126,10 @@ #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 #define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 #define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad +#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3 +#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a +#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670 /* * Function 1 - Address Map @@ -298,6 +302,8 @@ enum amd_families { F17_M60H_CPUS, F17_M70H_CPUS, F19_CPUS, + F19_M10H_CPUS, + F19_M50H_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9f82ca295353..9d9aabdec96b 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -162,6 +162,8 @@ const char * const edac_mem_types[] = { [MEM_LPDDR4] = "Low-Power-DDR4-RAM", [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", [MEM_DDR5] = "Unbuffered-DDR5", + [MEM_RDDR5] = "Registered-DDR5", + [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM", [MEM_NVDIMM] = "Non-volatile-RAM", [MEM_WIO2] = "Wide-IO-2", [MEM_HBM2] = "High-bandwidth-memory-Gen2", diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c index 83345bfac246..6cf50ee0b77c 100644 --- a/drivers/edac/i10nm_base.c +++ b/drivers/edac/i10nm_base.c @@ -358,6 +358,9 @@ static int i10nm_get_hbm_munits(void) mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE); if (!mbase) { + pci_dev_put(d->imc[lmc].mdev); + d->imc[lmc].mdev = NULL; + i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n", base + off); return -ENOMEM; @@ -368,6 +371,12 @@ static int i10nm_get_hbm_munits(void) mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0); if (!I10NM_IS_HBM_IMC(mcmtr)) { + iounmap(d->imc[lmc].mbase); + d->imc[lmc].mbase = NULL; + d->imc[lmc].hbm_mc = false; + pci_dev_put(d->imc[lmc].mdev); + d->imc[lmc].mdev = NULL; + i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n"); return -ENODEV; } diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 67dbf4c31271..cc5c63feb26a 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -399,6 +399,63 @@ static const char * const smca_mp5_mce_desc[] = { "Instruction Tag Cache Bank B ECC or parity error", }; +static const char * const smca_mpdma_mce_desc[] = { + "Main SRAM [31:0] bank ECC or parity error", + "Main SRAM [63:32] bank ECC or parity error", + "Main SRAM [95:64] bank ECC or parity error", + "Main SRAM [127:96] bank ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", + "System Hub Read Buffer ECC or parity error", + "MPDMA TVF DVSEC Memory ECC or parity error", + "MPDMA TVF MMIO Mailbox0 ECC or parity error", + "MPDMA TVF MMIO Mailbox1 ECC or parity error", + "MPDMA TVF Doorbell Memory ECC or parity error", + "MPDMA TVF SDP Slave Memory 0 ECC or parity error", + "MPDMA TVF SDP Slave Memory 1 ECC or parity error", + "MPDMA TVF SDP Slave Memory 2 ECC or parity error", + "MPDMA TVF SDP Master Memory 0 ECC or parity error", + "MPDMA TVF SDP Master Memory 1 ECC or parity error", + "MPDMA TVF SDP Master Memory 2 ECC or parity error", + "MPDMA TVF SDP Master Memory 3 ECC or parity error", + "MPDMA TVF SDP Master Memory 4 ECC or parity error", + "MPDMA TVF SDP Master Memory 5 ECC or parity error", + "MPDMA TVF SDP Master Memory 6 ECC or parity error", + "MPDMA PTE Command FIFO ECC or parity error", + "MPDMA PTE Hub Data FIFO ECC or parity error", + "MPDMA PTE Internal Data FIFO ECC or parity error", + "MPDMA PTE Command Memory DMA ECC or parity error", + "MPDMA PTE Command Memory Internal ECC or parity error", + "MPDMA PTE DMA Completion FIFO ECC or parity error", + "MPDMA PTE Tablewalk Completion FIFO ECC or parity error", + "MPDMA PTE Descriptor Completion FIFO ECC or parity error", + "MPDMA PTE ReadOnly Completion FIFO ECC or parity error", + "MPDMA PTE DirectWrite Completion FIFO ECC or parity error", + "SDP Watchdog Timer expired", +}; + static const char * const smca_nbio_mce_desc[] = { "ECC or Parity error", "PCIE error", @@ -448,7 +505,7 @@ static const char * const smca_xgmipcs_mce_desc[] = { "Rx Replay Timeout Error", "LinkSub Tx Timeout Error", "LinkSub Rx Timeout Error", - "Rx CMD Pocket Error", + "Rx CMD Packet Error", }; static const char * const smca_xgmiphy_mce_desc[] = { @@ -458,11 +515,66 @@ static const char * const smca_xgmiphy_mce_desc[] = { "PHY APB error", }; -static const char * const smca_waflphy_mce_desc[] = { - "RAM ECC Error", - "ARC instruction buffer parity error", - "ARC data buffer parity error", - "PHY APB error", +static const char * const smca_nbif_mce_desc[] = { + "Timeout error from GMI", + "SRAM ECC error", + "NTB Error Event", + "SDP Parity error", +}; + +static const char * const smca_sata_mce_desc[] = { + "Parity error for port 0", + "Parity error for port 1", + "Parity error for port 2", + "Parity error for port 3", + "Parity error for port 4", + "Parity error for port 5", + "Parity error for port 6", + "Parity error for port 7", +}; + +static const char * const smca_usb_mce_desc[] = { + "Parity error or ECC error for S0 RAM0", + "Parity error or ECC error for S0 RAM1", + "Parity error or ECC error for S0 RAM2", + "Parity error for PHY RAM0", + "Parity error for PHY RAM1", + "AXI Slave Response error", +}; + +static const char * const smca_gmipcs_mce_desc[] = { + "Data Loss Error", + "Training Error", + "Replay Parity Error", + "Rx Fifo Underflow Error", + "Rx Fifo Overflow Error", + "CRC Error", + "BER Exceeded Error", + "Tx Fifo Underflow Error", + "Replay Buffer Parity Error", + "Tx Overflow Error", + "Replay Fifo Overflow Error", + "Replay Fifo Underflow Error", + "Elastic Fifo Overflow Error", + "Deskew Error", + "Offline Error", + "Data Startup Limit Error", + "FC Init Timeout Error", + "Recovery Timeout Error", + "Ready Serial Timeout Error", + "Ready Serial Attempt Error", + "Recovery Attempt Error", + "Recovery Relock Attempt Error", + "Deskew Abort Error", + "Rx Buffer Error", + "Rx LFDS Fifo Overflow Error", + "Rx LFDS Fifo Underflow Error", + "LinkSub Tx Timeout Error", + "LinkSub Rx Timeout Error", + "Rx CMD Packet Error", + "LFDS Training Timeout Error", + "LFDS FC Init Timeout Error", + "Data Loss Error", }; struct smca_mce_desc { @@ -490,12 +602,21 @@ static struct smca_mce_desc smca_mce_descs[] = { [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) }, [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) }, [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) }, + [SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) }, [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) }, [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) }, [SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) }, [SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) }, + /* NBIF and SHUB have the same error descriptions, for now. */ + [SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) }, + [SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) }, + [SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) }, + [SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) }, + [SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) }, + /* All the PHY bank types have the same error descriptions, for now. */ [SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, - [SMCA_WAFL_PHY] = { smca_waflphy_mce_desc, ARRAY_SIZE(smca_waflphy_mce_desc) }, + [SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, + [SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) }, }; static bool f12h_mc0_mce(u16 ec, u8 xec) @@ -1045,20 +1166,13 @@ static void decode_mc6_mce(struct mce *m) /* Decode errors according to Scalable MCA specification */ static void decode_smca_error(struct mce *m) { - struct smca_hwid *hwid; - enum smca_bank_types bank_type; + enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank); const char *ip_name; u8 xec = XEC(m->status, xec_mask); - if (m->bank >= ARRAY_SIZE(smca_banks)) + if (bank_type >= N_SMCA_BANK_TYPES) return; - hwid = smca_banks[m->bank].hwid; - if (!hwid) - return; - - bank_type = hwid->bank_type; - if (bank_type == SMCA_RESERVED) { pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank); return; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 1522d4aa2ca6..9678ab97c7ac 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -3439,7 +3439,7 @@ MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); static int sbridge_probe(const struct x86_cpu_id *id) { - int rc = -ENODEV; + int rc; u8 mc, num_mc = 0; struct sbridge_dev *sbridge_dev; struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data; diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index 3a3dcb14ed99..ee800aec7d47 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -19,7 +19,7 @@ struct sifive_edac_priv { struct edac_device_ctl_info *dci; }; -/** +/* * EDAC error callback * * @event: non-zero if unrecoverable. diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 7d08627e738b..f05ff02c0656 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -101,6 +101,7 @@ /* DDR ECC Quirks */ #define DDR_ECC_INTR_SUPPORT BIT(0) #define DDR_ECC_DATA_POISON_SUPPORT BIT(1) +#define DDR_ECC_INTR_SELF_CLEAR BIT(2) /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */ /* ECC Configuration Registers */ @@ -171,6 +172,10 @@ #define DDR_QOS_IRQ_EN_OFST 0x20208 #define DDR_QOS_IRQ_DB_OFST 0x2020C +/* DDR QOS Interrupt register definitions */ +#define DDR_UE_MASK BIT(9) +#define DDR_CE_MASK BIT(8) + /* ECC Corrected Error Register Mask and Shifts*/ #define ECC_CEADDR0_RW_MASK 0x3FFFF #define ECC_CEADDR0_RNK_MASK BIT(24) @@ -533,10 +538,16 @@ static irqreturn_t intr_handler(int irq, void *dev_id) priv = mci->pvt_info; p_data = priv->p_data; - regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); - regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK); - if (!(regval & ECC_CE_UE_INTR_MASK)) - return IRQ_NONE; + /* + * v3.0 of the controller has the ce/ue bits cleared automatically, + * so this condition does not apply. + */ + if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) { + regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK); + if (!(regval & ECC_CE_UE_INTR_MASK)) + return IRQ_NONE; + } status = p_data->get_error_info(priv); if (status) @@ -548,7 +559,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id) edac_dbg(3, "Total error count CE %d UE %d\n", priv->ce_cnt, priv->ue_cnt); - writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); + /* v3.0 of the controller does not have this register */ + if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) + writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); return IRQ_HANDLED; } @@ -834,8 +847,13 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) static void enable_intr(struct synps_edac_priv *priv) { /* Enable UE/CE Interrupts */ - writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, - priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR) + writel(DDR_UE_MASK | DDR_CE_MASK, + priv->baseaddr + ECC_CLR_OFST); + else + writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, + priv->baseaddr + DDR_QOS_IRQ_EN_OFST); + } static void disable_intr(struct synps_edac_priv *priv) @@ -890,6 +908,19 @@ static const struct synps_platform_data zynqmp_edac_def = { ), }; +static const struct synps_platform_data synopsys_edac_def = { + .get_error_info = zynqmp_get_error_info, + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, + .get_ecc_state = zynqmp_get_ecc_state, + .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR +#ifdef CONFIG_EDAC_DEBUG + | DDR_ECC_DATA_POISON_SUPPORT +#endif + ), +}; + + static const struct of_device_id synps_edac_match[] = { { .compatible = "xlnx,zynq-ddrc-a05", @@ -900,6 +931,10 @@ static const struct of_device_id synps_edac_match[] = { .data = (void *)&zynqmp_edac_def }, { + .compatible = "snps,ddrc-3.80a", + .data = (void *)&synopsys_edac_def + }, + { /* end of table */ } }; @@ -1352,8 +1387,7 @@ static int mc_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,zynqmp-ddrc-2.40a")) + if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) setup_address_map(priv); #endif diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 235c7e7869aa..5ae2040b8b02 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -1759,7 +1759,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle, desc->num = resp->range_num; desc->start_sec = resp->range_start_sec; desc->num_sec = resp->range_num_sec; - }; + } fail: ti_sci_put_one_xfer(&info->minfo, xfer); diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 3dd45a7420dc..0dd117860b63 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -1434,7 +1434,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) return ret; /* Check PM API version number */ - zynqmp_pm_get_api_version(&pm_api_version); + ret = zynqmp_pm_get_api_version(&pm_api_version); + if (ret) + return ret; + if (pm_api_version < ZYNQMP_PM_VERSION) { panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n", __func__, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b85b67a88a3d..7d67aec6f4a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1077,6 +1077,7 @@ struct amdgpu_device { bool runpm; bool in_runpm; bool has_pr3; + bool is_fw_fb; bool pm_sysfs_en; bool ucode_sysfs_en; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 86ca80da9eea..99370bdd8c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -39,6 +39,7 @@ #include <linux/mmu_notifier.h> #include <linux/suspend.h> #include <linux/cc_platform.h> +#include <linux/fb.h> #include "amdgpu.h" #include "amdgpu_irq.h" @@ -1890,6 +1891,26 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver amdgpu_kms_driver; +static bool amdgpu_is_fw_framebuffer(resource_size_t base, + resource_size_t size) +{ + bool found = false; +#if IS_REACHABLE(CONFIG_FB) + struct apertures_struct *a; + + a = alloc_apertures(1); + if (!a) + return false; + + a->ranges[0].base = base; + a->ranges[0].size = size; + + found = is_firmware_framebuffer(a); + kfree(a); +#endif + return found; +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1898,6 +1919,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, unsigned long flags = ent->driver_data; int ret, retry = 0, i; bool supports_atomic = false; + bool is_fw_fb; + resource_size_t base, size; /* skip devices which are owned by radeon */ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { @@ -1966,6 +1989,10 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, } #endif + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + is_fw_fb = amdgpu_is_fw_framebuffer(base, size); + /* Get rid of things like offb */ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) @@ -1978,6 +2005,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, adev->dev = &pdev->dev; adev->pdev = pdev; ddev = adev_to_drm(adev); + adev->is_fw_fb = is_fw_fb; if (!supports_atomic) ddev->driver_features &= ~DRIVER_ATOMIC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 9afd11ca2709..45977a72b5dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -547,9 +547,6 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - if (!ring->no_scheduler) - drm_sched_stop(&ring->sched, NULL); - /* You can't wait for HW to signal if it's gone */ if (!drm_dev_is_unplugged(adev_to_drm(adev))) r = amdgpu_fence_wait_empty(ring); @@ -609,11 +606,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) if (!ring || !ring->fence_drv.initialized) continue; - if (!ring->no_scheduler) { - drm_sched_resubmit_jobs(&ring->sched); - drm_sched_start(&ring->sched, true); - } - /* enable the interrupt */ if (ring->fence_drv.irq_src) amdgpu_irq_get(adev, ring->fence_drv.irq_src, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 651c7abfde03..09ad17944eb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -206,6 +206,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) adev->runpm = true; break; } + /* XXX: disable runtime pm if we are the primary adapter + * to avoid displays being re-enabled after DPMS. + * This needs to be sorted out and fixed properly. + */ + if (adev->is_fw_fb) + adev->runpm = false; if (adev->runpm) dev_info(adev->dev, "Using BACO for runtime pm\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 08133de21fdd..75dad0214dc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2647,7 +2647,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, * and error occurred in DramECC (Extended error code = 0) then only * process the error, else bail out. */ - if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) && + if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) && (XEC(m->status, 0x3f) == 0x0))) return NOTIFY_DONE; diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8a817932acdf..9d7d64fdf410 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1400,8 +1400,14 @@ static int smu_disable_dpms(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; int ret = 0; + /* + * TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair + * the workaround which always reset the asic in suspend. + * It's likely that workaround will be dropped in the future. + * Then the change here should be dropped together. + */ bool use_baco = !smu->is_apu && - ((amdgpu_in_reset(adev) && + (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) && (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index dce392839017..c6b854a9e476 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -963,16 +963,10 @@ config I2C_RK3X This driver can also be built as a module. If so, the module will be called i2c-rk3x. -config HAVE_S3C2410_I2C - bool - help - This will include I2C support for Samsung SoCs. If you want to - include I2C support for any machine, kindly select this in the - respective Kconfig file. - config I2C_S3C2410 tristate "S3C/Exynos I2C Driver" - depends on HAVE_S3C2410_I2C || COMPILE_TEST + depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || \ + ARCH_S5PV210 || COMPILE_TEST help Say Y here to include support for I2C controller in the Samsung SoCs (S3C, S5Pv210, Exynos). diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 53b8da6dbb23..db26cc36e13f 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -492,7 +492,7 @@ static void mpc_i2c_finish(struct mpc_i2c *i2c, int rc) static void mpc_i2c_do_action(struct mpc_i2c *i2c) { - struct i2c_msg *msg = &i2c->msgs[i2c->curr_msg]; + struct i2c_msg *msg = NULL; int dir = 0; int recv_len = 0; u8 byte; @@ -501,10 +501,13 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c) i2c->cntl_bits &= ~(CCR_RSTA | CCR_MTX | CCR_TXAK); - if (msg->flags & I2C_M_RD) - dir = 1; - if (msg->flags & I2C_M_RECV_LEN) - recv_len = 1; + if (i2c->action != MPC_I2C_ACTION_STOP) { + msg = &i2c->msgs[i2c->curr_msg]; + if (msg->flags & I2C_M_RD) + dir = 1; + if (msg->flags & I2C_M_RECV_LEN) + recv_len = 1; + } switch (i2c->action) { case MPC_I2C_ACTION_RESTART: @@ -581,7 +584,7 @@ static void mpc_i2c_do_action(struct mpc_i2c *i2c) break; } - if (msg->len == i2c->byte_posn) { + if (msg && msg->len == i2c->byte_posn) { i2c->curr_msg++; i2c->byte_posn = 0; diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f193f9058584..73253e667de1 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -466,14 +466,12 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) static int i2c_device_probe(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver; int status; if (!client) return 0; - adap = client->adapter; client->irq = client->init_irq; if (!client->irq) { @@ -539,14 +537,6 @@ static int i2c_device_probe(struct device *dev) dev_dbg(dev, "probe\n"); - if (adap->bus_regulator) { - status = regulator_enable(adap->bus_regulator); - if (status < 0) { - dev_err(&adap->dev, "Failed to enable bus regulator\n"); - goto err_clear_wakeup_irq; - } - } - status = of_clk_set_defaults(dev->of_node, false); if (status < 0) goto err_clear_wakeup_irq; @@ -605,10 +595,8 @@ put_sync_adapter: static void i2c_device_remove(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); - struct i2c_adapter *adap; struct i2c_driver *driver; - adap = client->adapter; driver = to_i2c_driver(dev->driver); if (driver->remove) { int status; @@ -623,8 +611,6 @@ static void i2c_device_remove(struct device *dev) devres_release_group(&client->dev, client->devres_group_id); dev_pm_domain_detach(&client->dev, !i2c_acpi_waive_d0_probe(dev)); - if (!pm_runtime_status_suspended(&client->dev) && adap->bus_regulator) - regulator_disable(adap->bus_regulator); dev_pm_clear_wake_irq(&client->dev); device_init_wakeup(&client->dev, false); @@ -634,86 +620,6 @@ static void i2c_device_remove(struct device *dev) pm_runtime_put(&client->adapter->dev); } -#ifdef CONFIG_PM_SLEEP -static int i2c_resume_early(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - if (pm_runtime_status_suspended(&client->dev) && - client->adapter->bus_regulator) { - err = regulator_enable(client->adapter->bus_regulator); - if (err) - return err; - } - - return pm_generic_resume_early(&client->dev); -} - -static int i2c_suspend_late(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - err = pm_generic_suspend_late(&client->dev); - if (err) - return err; - - if (!pm_runtime_status_suspended(&client->dev) && - client->adapter->bus_regulator) - return regulator_disable(client->adapter->bus_regulator); - - return 0; -} -#endif - -#ifdef CONFIG_PM -static int i2c_runtime_resume(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - if (client->adapter->bus_regulator) { - err = regulator_enable(client->adapter->bus_regulator); - if (err) - return err; - } - - return pm_generic_runtime_resume(&client->dev); -} - -static int i2c_runtime_suspend(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - int err; - - if (!client) - return 0; - - err = pm_generic_runtime_suspend(&client->dev); - if (err) - return err; - - if (client->adapter->bus_regulator) - return regulator_disable(client->adapter->bus_regulator); - return 0; -} -#endif - -static const struct dev_pm_ops i2c_device_pm = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(i2c_suspend_late, i2c_resume_early) - SET_RUNTIME_PM_OPS(i2c_runtime_suspend, i2c_runtime_resume, NULL) -}; - static void i2c_device_shutdown(struct device *dev) { struct i2c_client *client = i2c_verify_client(dev); @@ -773,7 +679,6 @@ struct bus_type i2c_bus_type = { .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, - .pm = &i2c_device_pm, }; EXPORT_SYMBOL_GPL(i2c_bus_type); diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index b8d715c68ca4..11a080646916 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr conv_ah; - memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); + memset(&dst->grh, 0, sizeof(dst->grh)); if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 2f2c7646fce1..a02916a3a79c 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c @@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi) uapi->num_write_ex = max_write_ex + 1; data = kmalloc_array(uapi->num_write + uapi->num_write_ex, sizeof(*uapi->write_methods), GFP_KERNEL); + if (!data) + return -ENOMEM; + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) data[i] = &uapi->notsupp_method; uapi->write_methods = data; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 4a7a56ed740b..e636e954f6bf 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -664,6 +664,7 @@ struct mlx5_ib_mr { /* User MR data */ struct mlx5_cache_ent *cache_ent; + struct ib_umem *umem; /* This is zero'd when the MR is allocated */ union { @@ -675,7 +676,7 @@ struct mlx5_ib_mr { struct list_head list; }; - /* Used only by kernel MRs */ + /* Used only by kernel MRs (umem == NULL) */ struct { void *descs; void *descs_alloc; @@ -696,9 +697,8 @@ struct mlx5_ib_mr { int data_length; }; - /* Used only by User MRs */ + /* Used only by User MRs (umem != NULL) */ struct { - struct ib_umem *umem; unsigned int page_shift; /* Current access_flags */ int access_flags; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 63e2129f1142..157d862fb864 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1904,18 +1904,19 @@ err: return ret; } -static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) +static void +mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - int size = mr->max_descs * mr->desc_size; - - if (!mr->descs) - return; + if (!mr->umem && mr->descs) { + struct ib_device *device = mr->ibmr.device; + int size = mr->max_descs * mr->desc_size; + struct mlx5_ib_dev *dev = to_mdev(device); - dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, - DMA_TO_DEVICE); - kfree(mr->descs_alloc); - mr->descs = NULL; + dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, + DMA_TO_DEVICE); + kfree(mr->descs_alloc); + mr->descs = NULL; + } } int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) @@ -1991,8 +1992,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) if (mr->cache_ent) { mlx5_mr_cache_free(dev, mr); } else { - if (!udata) - mlx5_free_priv_descs(mr); + mlx5_free_priv_descs(mr); kfree(mr); } return 0; @@ -2079,6 +2079,7 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, if (err) goto err_free_in; + mr->umem = NULL; kfree(in); return mr; @@ -2205,6 +2206,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, } mr->ibmr.device = pd->device; + mr->umem = NULL; switch (mr_type) { case IB_MR_TYPE_MEM_REG: diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 53271df10e47..bcf717bcf0b3 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -135,19 +135,19 @@ static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both) ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set); if (ret) - goto err_out; + return -ENOMEM; if (both) { ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set); - if (ret) { - rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); - goto err_out; - } + if (ret) + goto err_free; } return 0; -err_out: +err_free: + rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); + mr->cur_map_set = NULL; return -ENOMEM; } @@ -214,7 +214,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, pr_warn("%s: Unable to get virtual address\n", __func__); err = -ENOMEM; - goto err_cleanup_map; + goto err_release_umem; } buf->addr = (uintptr_t)vaddr; @@ -237,8 +237,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, return 0; -err_cleanup_map: - rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); err_release_umem: ib_umem_release(umem); err_out: diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index b8d901099378..1e70b8d2a8d7 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -488,6 +488,15 @@ static int zinitix_ts_probe(struct i2c_client *client) return error; } + error = devm_request_threaded_irq(&client->dev, client->irq, + NULL, zinitix_ts_irq_handler, + IRQF_ONESHOT | IRQF_NO_AUTOEN, + client->name, bt541); + if (error) { + dev_err(&client->dev, "Failed to request IRQ: %d\n", error); + return error; + } + error = zinitix_init_input_dev(bt541); if (error) { dev_err(&client->dev, @@ -513,15 +522,6 @@ static int zinitix_ts_probe(struct i2c_client *client) return -EINVAL; } - error = devm_request_threaded_irq(&client->dev, client->irq, - NULL, zinitix_ts_irq_handler, - IRQF_ONESHOT | IRQF_NO_AUTOEN, - client->name, bt541); - if (error) { - dev_err(&client->dev, "Failed to request IRQ: %d\n", error); - return error; - } - return 0; } diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 83df387e70a3..50860ebdd087 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -109,7 +109,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, zdev->dma_table = s390_domain->dma_table; cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, - (u64) zdev->dma_table); + virt_to_phys(zdev->dma_table)); if (cc) { rc = -EIO; goto out_restore; @@ -205,11 +205,11 @@ static void s390_iommu_release_device(struct device *dev) } static int s390_iommu_update_trans(struct s390_domain *s390_domain, - unsigned long pa, dma_addr_t dma_addr, + phys_addr_t pa, dma_addr_t dma_addr, size_t size, int flags) { struct s390_domain_device *domain_device; - u8 *page_addr = (u8 *) (pa & PAGE_MASK); + phys_addr_t page_addr = pa & PAGE_MASK; dma_addr_t start_dma_addr = dma_addr; unsigned long irq_flags, nr_pages, i; unsigned long *entry; @@ -274,7 +274,7 @@ static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, if (!(prot & IOMMU_WRITE)) flags |= ZPCI_TABLE_PROTECTED; - rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, + rc = s390_iommu_update_trans(s390_domain, paddr, iova, size, flags); return rc; @@ -324,7 +324,7 @@ static size_t s390_iommu_unmap(struct iommu_domain *domain, if (!paddr) return 0; - rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, + rc = s390_iommu_update_trans(s390_domain, paddr, iova, size, flags); if (rc) return 0; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 7dc8026cf6ee..85505424f7a4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1496,12 +1496,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, if (!r1_bio->bios[i]) continue; - if (first_clone && test_bit(WriteMostly, &rdev->flags)) { + if (first_clone) { /* do behind I/O ? * Not if there are too many, or cannot * allocate memory, or a reader on WriteMostly * is waiting for behind writes to flush */ if (bitmap && + test_bit(WriteMostly, &rdev->flags) && (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 7435baad0007..e4cc64f56019 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -12,6 +12,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -19,19 +20,17 @@ #define RPCIF_CMNCR 0x0000 /* R/W */ #define RPCIF_CMNCR_MD BIT(31) -#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */ #define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22) #define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20) #define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18) #define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16) -#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \ - RPCIF_CMNCR_MOIIO1(3) | \ - RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3)) -#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */ -#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */ +#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \ + RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val)) +#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */ +#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */ #define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8) -#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \ - RPCIF_CMNCR_IO3FV(3)) +#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \ + RPCIF_CMNCR_IO3FV(val)) #define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0) #define RPCIF_SSLDR 0x0004 /* R/W */ @@ -126,6 +125,9 @@ #define RPCIF_SMDRENR_OPDRE BIT(4) #define RPCIF_SMDRENR_SPIDRE BIT(0) +#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ +#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */ + #define RPCIF_PHYCNT 0x007C /* R/W */ #define RPCIF_PHYCNT_CAL BIT(31) #define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22) @@ -133,10 +135,12 @@ #define RPCIF_PHYCNT_OCT BIT(20) #define RPCIF_PHYCNT_DDRCAL BIT(19) #define RPCIF_PHYCNT_HS BIT(18) -#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) +#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */ +#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) /* valid for R-Car and RZ/G2{E,H,M,N} */ #define RPCIF_PHYCNT_WBUF2 BIT(4) #define RPCIF_PHYCNT_WBUF BIT(2) #define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0) +#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0) #define RPCIF_PHYOFFSET1 0x0080 /* R/W */ #define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28) @@ -147,8 +151,6 @@ #define RPCIF_PHYINT 0x0088 /* R/W */ #define RPCIF_PHYINT_WPVAL BIT(1) -#define RPCIF_DIRMAP_SIZE 0x4000000 - static const struct regmap_range rpcif_volatile_ranges[] = { regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1), regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1), @@ -243,50 +245,74 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rpc->dirmap)) - rpc->dirmap = NULL; + return PTR_ERR(rpc->dirmap); rpc->size = resource_size(res); + rpc->type = (uintptr_t)of_device_get_match_data(dev); rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); return PTR_ERR_OR_ZERO(rpc->rstc); } EXPORT_SYMBOL(rpcif_sw_init); -void rpcif_hw_init(struct rpcif *rpc, bool hyperflash) +static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) +{ + regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000); + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000); + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022); + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080); + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024); + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3), + RPCIF_PHYCNT_CKSEL(3)); + regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030); + regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); +} + +int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) { u32 dummy; pm_runtime_get_sync(rpc->dev); - /* - * NOTE: The 0x260 are undocumented bits, but they must be set. - * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits, - * 0x0 : the delay is biggest, - * 0x1 : the delay is 2nd biggest, - * On H3 ES1.x, the value should be 0, while on others, - * the value should be 7. - */ - regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) | - RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260); - - /* - * NOTE: The 0x1511144 are undocumented bits, but they must be set - * for RPCIF_PHYOFFSET1. - * The 0x31 are undocumented bits, but they must be set - * for RPCIF_PHYOFFSET2. - */ - regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 | - RPCIF_PHYOFFSET1_DDRTMG(3)); - regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 | - RPCIF_PHYOFFSET2_OCTTMG(4)); + if (rpc->type == RPCIF_RZ_G2L) { + int ret; + + ret = reset_control_reset(rpc->rstc); + if (ret) + return ret; + usleep_range(200, 300); + rpcif_rzg2l_timing_adjust_sdr(rpc); + } + + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK, + RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0)); + + if (rpc->type == RPCIF_RCAR_GEN3) + regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, + RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7)); + + regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3), + RPCIF_PHYOFFSET1_DDRTMG(3)); + regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7), + RPCIF_PHYOFFSET2_OCTTMG(4)); if (hyperflash) regmap_update_bits(rpc->regmap, RPCIF_PHYINT, RPCIF_PHYINT_WPVAL, 0); - regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE | - RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ | - RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); + if (rpc->type == RPCIF_RCAR_GEN3) + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, + RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3), + RPCIF_CMNCR_MOIIO(3) | + RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); + else + regmap_update_bits(rpc->regmap, RPCIF_CMNCR, + RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) | + RPCIF_CMNCR_BSZ(3), + RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) | + RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0)); + /* Set RCF after BSZ update */ regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF); /* Dummy read according to spec */ @@ -297,6 +323,8 @@ void rpcif_hw_init(struct rpcif *rpc, bool hyperflash) pm_runtime_put(rpc->dev); rpc->bus_size = hyperflash ? 2 : 1; + + return 0; } EXPORT_SYMBOL(rpcif_hw_init); @@ -588,8 +616,8 @@ static void memcpy_fromio_readw(void *to, ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) { - loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1); - size_t size = RPCIF_DIRMAP_SIZE - from; + loff_t from = offs & (rpc->size - 1); + size_t size = rpc->size - from; if (len > size) len = size; @@ -659,7 +687,8 @@ static int rpcif_remove(struct platform_device *pdev) } static const struct of_device_id rpcif_of_match[] = { - { .compatible = "renesas,rcar-gen3-rpc-if", }, + { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 }, + { .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L }, {}, }; MODULE_DEVICE_TABLE(of, rpcif_of_match); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 9762ffab2e23..35ebba067e87 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/mmc/card.h> @@ -24,6 +26,8 @@ #include <linux/gpio/consumer.h> #include <linux/ktime.h> +#include <soc/tegra/common.h> + #include "sdhci-pltfm.h" #include "cqhci.h" @@ -743,7 +747,9 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct device *dev = mmc_dev(host->mmc); unsigned long host_clk; + int err; if (!clock) return sdhci_set_clock(host, clock); @@ -761,7 +767,12 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) * from clk_get_rate() is used. */ host_clk = tegra_host->ddr_signaling ? clock * 2 : clock; - clk_set_rate(pltfm_host->clk, host_clk); + + err = dev_pm_opp_set_rate(dev, host_clk); + if (err) + dev_err(dev, "failed to set clk rate to %luHz: %d\n", + host_clk, err); + tegra_host->curr_clk_rate = host_clk; if (tegra_host->ddr_signaling) host->max_clk = host_clk; @@ -1714,7 +1725,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev) "failed to get clock\n"); goto err_clk_get; } - clk_prepare_enable(clk); pltfm_host->clk = clk; tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev, @@ -1725,15 +1735,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev) goto err_rst_get; } - rc = reset_control_assert(tegra_host->rst); + rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (rc) goto err_rst_get; + pm_runtime_enable(&pdev->dev); + rc = pm_runtime_resume_and_get(&pdev->dev); + if (rc) + goto err_pm_get; + + rc = reset_control_assert(tegra_host->rst); + if (rc) + goto err_rst_assert; + usleep_range(2000, 4000); rc = reset_control_deassert(tegra_host->rst); if (rc) - goto err_rst_get; + goto err_rst_assert; usleep_range(2000, 4000); @@ -1745,8 +1764,11 @@ static int sdhci_tegra_probe(struct platform_device *pdev) err_add_host: reset_control_assert(tegra_host->rst); +err_rst_assert: + pm_runtime_put_sync_suspend(&pdev->dev); +err_pm_get: + pm_runtime_disable(&pdev->dev); err_rst_get: - clk_disable_unprepare(pltfm_host->clk); err_clk_get: clk_disable_unprepare(tegra_host->tmclk); err_power_req: @@ -1765,19 +1787,38 @@ static int sdhci_tegra_remove(struct platform_device *pdev) reset_control_assert(tegra_host->rst); usleep_range(2000, 4000); - clk_disable_unprepare(pltfm_host->clk); - clk_disable_unprepare(tegra_host->tmclk); + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); + + clk_disable_unprepare(tegra_host->tmclk); sdhci_pltfm_free(pdev); return 0; } -#ifdef CONFIG_PM_SLEEP -static int __maybe_unused sdhci_tegra_suspend(struct device *dev) +static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + clk_disable_unprepare(pltfm_host->clk); + + return 0; +} + +static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_prepare_enable(pltfm_host->clk); +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_tegra_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); int ret; if (host->mmc->caps2 & MMC_CAP2_CQE) { @@ -1792,17 +1833,22 @@ static int __maybe_unused sdhci_tegra_suspend(struct device *dev) return ret; } - clk_disable_unprepare(pltfm_host->clk); + ret = pm_runtime_force_suspend(dev); + if (ret) { + sdhci_resume_host(host); + cqhci_resume(host->mmc); + return ret; + } + return 0; } -static int __maybe_unused sdhci_tegra_resume(struct device *dev) +static int sdhci_tegra_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); int ret; - ret = clk_prepare_enable(pltfm_host->clk); + ret = pm_runtime_force_resume(dev); if (ret) return ret; @@ -1821,13 +1867,16 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev) suspend_host: sdhci_suspend_host(host); disable_clk: - clk_disable_unprepare(pltfm_host->clk); + pm_runtime_force_suspend(dev); return ret; } #endif -static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend, - sdhci_tegra_resume); +static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = { + SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume) +}; static struct platform_driver sdhci_tegra_driver = { .driver = { diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index ecb050ba95cd..8daa296f6eb6 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -130,7 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) rpcif_enable_rpm(&hyperbus->rpc); - rpcif_hw_init(&hyperbus->rpc, true); + error = rpcif_hw_init(&hyperbus->rpc, true); + if (error) + return error; hyperbus->hbdev.map.size = hyperbus->rpc.size; hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index 32431bbe69b8..b36e5260ae27 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -17,8 +17,11 @@ #include <linux/mtd/rawnand.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> +#include <soc/tegra/common.h> + #define COMMAND 0x00 #define COMMAND_GO BIT(31) #define COMMAND_CLE BIT(30) @@ -1151,6 +1154,7 @@ static int tegra_nand_probe(struct platform_device *pdev) return -ENOMEM; ctrl->dev = &pdev->dev; + platform_set_drvdata(pdev, ctrl); nand_controller_init(&ctrl->controller); ctrl->controller.ops = &tegra_nand_controller_ops; @@ -1166,14 +1170,23 @@ static int tegra_nand_probe(struct platform_device *pdev) if (IS_ERR(ctrl->clk)) return PTR_ERR(ctrl->clk); - err = clk_prepare_enable(ctrl->clk); + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; + + /* + * This driver doesn't support active power management yet, + * so we will simply keep device resumed. + */ + pm_runtime_enable(&pdev->dev); + err = pm_runtime_resume_and_get(&pdev->dev); if (err) return err; err = reset_control_reset(rst); if (err) { dev_err(ctrl->dev, "Failed to reset HW: %d\n", err); - goto err_disable_clk; + goto err_put_pm; } writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD); @@ -1188,21 +1201,20 @@ static int tegra_nand_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctrl); if (err) { dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err); - goto err_disable_clk; + goto err_put_pm; } writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL); err = tegra_nand_chips_init(ctrl->dev, ctrl); if (err) - goto err_disable_clk; - - platform_set_drvdata(pdev, ctrl); + goto err_put_pm; return 0; -err_disable_clk: - clk_disable_unprepare(ctrl->clk); +err_put_pm: + pm_runtime_put_sync_suspend(ctrl->dev); + pm_runtime_force_suspend(ctrl->dev); return err; } @@ -1219,11 +1231,40 @@ static int tegra_nand_remove(struct platform_device *pdev) nand_cleanup(chip); + pm_runtime_put_sync_suspend(ctrl->dev); + pm_runtime_force_suspend(ctrl->dev); + + return 0; +} + +static int __maybe_unused tegra_nand_runtime_resume(struct device *dev) +{ + struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(ctrl->clk); + if (err) { + dev_err(dev, "Failed to enable clock: %d\n", err); + return err; + } + + return 0; +} + +static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev) +{ + struct tegra_nand_controller *ctrl = dev_get_drvdata(dev); + clk_disable_unprepare(ctrl->clk); return 0; } +static const struct dev_pm_ops tegra_nand_pm = { + SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume, + NULL) +}; + static const struct of_device_id tegra_nand_of_match[] = { { .compatible = "nvidia,tegra20-nand" }, { /* sentinel */ } @@ -1234,6 +1275,7 @@ static struct platform_driver tegra_nand_driver = { .driver = { .name = "tegra-nand", .of_match_table = tegra_nand_of_match, + .pm = &tegra_nand_pm, }, .probe = tegra_nand_probe, .remove = tegra_nand_remove, diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 4374af292e6d..e1a0c44bc686 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -43,7 +43,7 @@ config ARM_CCN config ARM_CMN tristate "Arm CMN-600 PMU support" - depends on ARM64 || (COMPILE_TEST && 64BIT) + depends on ARM64 || COMPILE_TEST help Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh Network interconnect. @@ -139,6 +139,13 @@ config ARM_DMC620_PMU Support for PMU events monitoring on the ARM DMC-620 memory controller. +config MARVELL_CN10K_TAD_PMU + tristate "Marvell CN10K LLC-TAD PMU" + depends on ARM64 || (COMPILE_TEST && 64BIT) + help + Provides support for Last-Level cache Tag-and-data Units (LLC-TAD) + performance monitors on CN10K family silicons. + source "drivers/perf/hisilicon/Kconfig" endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 5260b116c7da..2db5418d5b0a 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o +obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index bc3cba5f8c5d..0e48adce57ef 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -5,8 +5,10 @@ #include <linux/acpi.h> #include <linux/bitfield.h> #include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> @@ -23,7 +25,10 @@ #define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) #define CMN_NODEID_DEVID(reg) ((reg) & 3) +#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1) #define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) +#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3) +#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7) #define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) #define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) @@ -34,20 +39,28 @@ #define CMN_CHILD_NODE_ADDR GENMASK(27, 0) #define CMN_CHILD_NODE_EXTERNAL BIT(31) -#define CMN_ADDR_NODE_PTR GENMASK(27, 14) +#define CMN_MAX_DIMENSION 8 +#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION) +#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4) -#define CMN_NODE_PTR_DEVID(ptr) (((ptr) >> 2) & 3) -#define CMN_NODE_PTR_PID(ptr) ((ptr) & 1) -#define CMN_NODE_PTR_X(ptr, bits) ((ptr) >> (6 + (bits))) -#define CMN_NODE_PTR_Y(ptr, bits) (((ptr) >> 6) & ((1U << (bits)) - 1)) - -#define CMN_MAX_XPS (8 * 8) - -/* The CFG node has one other useful purpose */ +/* The CFG node has various info besides the discovery tree */ #define CMN_CFGM_PERIPH_ID_2 0x0010 #define CMN_CFGM_PID2_REVISION GENMASK(7, 4) -/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */ +#define CMN_CFGM_INFO_GLOBAL 0x900 +#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63) +#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52) +#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50) + +/* XPs also have some local topology info which has uses too */ +#define CMN_MXP__CONNECT_INFO_P0 0x0008 +#define CMN_MXP__CONNECT_INFO_P1 0x0010 +#define CMN_MXP__CONNECT_INFO_P2 0x0028 +#define CMN_MXP__CONNECT_INFO_P3 0x0030 +#define CMN_MXP__CONNECT_INFO_P4 0x0038 +#define CMN_MXP__CONNECT_INFO_P5 0x0040 + +/* PMU registers occupy the 3rd 4KB page of each node's region */ #define CMN_PMU_OFFSET 0x2000 /* For most nodes, this is all there is */ @@ -57,6 +70,7 @@ /* DTMs live in the PMU space of XP registers */ #define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) #define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) +#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17) #define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6) #define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5) #define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4) @@ -81,7 +95,11 @@ #define CMN_DTM_PMEVCNTSR 0x240 +#define CMN_DTM_UNIT_INFO 0x0910 + #define CMN_DTM_NUM_COUNTERS 4 +/* Want more local counters? Why not replicate the whole DTM! Ugh... */ +#define CMN_DTM_OFFSET(n) ((n) * 0x200) /* The DTC node is where the magic happens */ #define CMN_DT_DTC_CTL 0x0a00 @@ -122,11 +140,11 @@ /* Event attributes */ -#define CMN_CONFIG_TYPE GENMASK(15, 0) -#define CMN_CONFIG_EVENTID GENMASK(23, 16) -#define CMN_CONFIG_OCCUPID GENMASK(27, 24) -#define CMN_CONFIG_BYNODEID BIT(31) -#define CMN_CONFIG_NODEID GENMASK(47, 32) +#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0) +#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16) +#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24) +#define CMN_CONFIG_BYNODEID BIT_ULL(31) +#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32) #define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config) #define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config) @@ -134,13 +152,13 @@ #define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) #define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) -#define CMN_CONFIG_WP_COMBINE GENMASK(27, 24) -#define CMN_CONFIG_WP_DEV_SEL BIT(48) -#define CMN_CONFIG_WP_CHN_SEL GENMASK(50, 49) -#define CMN_CONFIG_WP_GRP BIT(52) -#define CMN_CONFIG_WP_EXCLUSIVE BIT(53) -#define CMN_CONFIG1_WP_VAL GENMASK(63, 0) -#define CMN_CONFIG2_WP_MASK GENMASK(63, 0) +#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(27, 24) +#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48) +#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51) +#define CMN_CONFIG_WP_GRP BIT_ULL(56) +#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57) +#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0) +#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0) #define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config) #define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config) @@ -155,7 +173,13 @@ #define CMN_WP_DOWN 2 -/* r0px probably don't exist in silicon, thankfully */ +enum cmn_model { + CMN_ANY = -1, + CMN600 = 1, + CI700 = 2, +}; + +/* CMN-600 r0px shouldn't exist in silicon, thankfully */ enum cmn_revision { CMN600_R1P0, CMN600_R1P1, @@ -163,6 +187,10 @@ enum cmn_revision { CMN600_R1P3, CMN600_R2P0, CMN600_R3P0, + CMN600_R3P1, + CI700_R0P0 = 0, + CI700_R1P0, + CI700_R2P0, }; enum cmn_node_type { @@ -174,9 +202,12 @@ enum cmn_node_type { CMN_TYPE_HNF, CMN_TYPE_XP, CMN_TYPE_SBSX, - CMN_TYPE_RNI = 0xa, + CMN_TYPE_MPAM_S, + CMN_TYPE_MPAM_NS, + CMN_TYPE_RNI, CMN_TYPE_RND = 0xd, CMN_TYPE_RNSAM = 0xf, + CMN_TYPE_MTSX, CMN_TYPE_CXRA = 0x100, CMN_TYPE_CXHA = 0x101, CMN_TYPE_CXLA = 0x102, @@ -189,32 +220,32 @@ struct arm_cmn_node { u16 id, logid; enum cmn_node_type type; + int dtm; union { - /* Device node */ + /* DN/HN-F/CXHA */ struct { - int to_xp; - /* DN/HN-F/CXHA */ - unsigned int occupid_val; - unsigned int occupid_count; + u8 occupid_val; + u8 occupid_count; }; /* XP */ - struct { - int dtc; - u32 pmu_config_low; - union { - u8 input_sel[4]; - __le32 pmu_config_high; - }; - s8 wp_event[4]; - }; + u8 dtc; }; - union { u8 event[4]; __le32 event_sel; }; }; +struct arm_cmn_dtm { + void __iomem *base; + u32 pmu_config_low; + union { + u8 input_sel[4]; + __le32 pmu_config_high; + }; + s8 wp_event[4]; +}; + struct arm_cmn_dtc { void __iomem *base; int irq; @@ -231,35 +262,238 @@ struct arm_cmn_dtc { struct arm_cmn { struct device *dev; void __iomem *base; + unsigned int state; enum cmn_revision rev; + enum cmn_model model; u8 mesh_x; u8 mesh_y; u16 num_xps; u16 num_dns; + bool multi_dtm; + u8 ports_used; + struct { + unsigned int rsp_vc_num : 2; + unsigned int dat_vc_num : 2; + }; + struct arm_cmn_node *xps; struct arm_cmn_node *dns; + struct arm_cmn_dtm *dtms; struct arm_cmn_dtc *dtc; unsigned int num_dtcs; int cpu; struct hlist_node cpuhp_node; - unsigned int state; struct pmu pmu; + struct dentry *debug; }; #define to_cmn(p) container_of(p, struct arm_cmn, pmu) static int arm_cmn_hp_state; +struct arm_cmn_nodeid { + u8 x; + u8 y; + u8 port; + u8 dev; +}; + +static int arm_cmn_xyidbits(const struct arm_cmn *cmn) +{ + int dim = max(cmn->mesh_x, cmn->mesh_y); + + return dim > 4 ? 3 : 2; +} + +static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id) +{ + struct arm_cmn_nodeid nid; + + if (cmn->num_xps == 1) { + nid.x = 0; + nid.y = 0; + nid.port = CMN_NODEID_1x1_PID(id); + nid.dev = CMN_NODEID_DEVID(id); + } else { + int bits = arm_cmn_xyidbits(cmn); + + nid.x = CMN_NODEID_X(id, bits); + nid.y = CMN_NODEID_Y(id, bits); + if (cmn->ports_used & 0xc) { + nid.port = CMN_NODEID_EXT_PID(id); + nid.dev = CMN_NODEID_EXT_DEVID(id); + } else { + nid.port = CMN_NODEID_PID(id); + nid.dev = CMN_NODEID_DEVID(id); + } + } + return nid; +} + +static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn, + const struct arm_cmn_node *dn) +{ + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + int xp_idx = cmn->mesh_x * nid.y + nid.x; + + return cmn->xps + xp_idx; +} +static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, + enum cmn_node_type type) +{ + struct arm_cmn_node *dn; + + for (dn = cmn->dns; dn->type; dn++) + if (dn->type == type) + return dn; + return NULL; +} + +struct dentry *arm_cmn_debugfs; + +#ifdef CONFIG_DEBUG_FS +static const char *arm_cmn_device_type(u8 type) +{ + switch(type) { + case 0x01: return " RN-I |"; + case 0x02: return " RN-D |"; + case 0x04: return " RN-F_B |"; + case 0x05: return "RN-F_B_E|"; + case 0x06: return " RN-F_A |"; + case 0x07: return "RN-F_A_E|"; + case 0x08: return " HN-T |"; + case 0x09: return " HN-I |"; + case 0x0a: return " HN-D |"; + case 0x0c: return " SN-F |"; + case 0x0d: return " SBSX |"; + case 0x0e: return " HN-F |"; + case 0x0f: return " SN-F_E |"; + case 0x10: return " SN-F_D |"; + case 0x11: return " CXHA |"; + case 0x12: return " CXRA |"; + case 0x13: return " CXRH |"; + case 0x14: return " RN-F_D |"; + case 0x15: return "RN-F_D_E|"; + case 0x16: return " RN-F_C |"; + case 0x17: return "RN-F_C_E|"; + case 0x1c: return " MTSX |"; + default: return " |"; + } +} + +static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) +{ + struct arm_cmn *cmn = s->private; + struct arm_cmn_node *dn; + + for (dn = cmn->dns; dn->type; dn++) { + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + + if (dn->type == CMN_TYPE_XP) + continue; + /* Ignore the extra components that will overlap on some ports */ + if (dn->type < CMN_TYPE_HNI) + continue; + + if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) + continue; + + seq_printf(s, " #%-2d |", dn->logid); + return; + } + seq_puts(s, " |"); +} + +static int arm_cmn_map_show(struct seq_file *s, void *data) +{ + struct arm_cmn *cmn = s->private; + int x, y, p, pmax = fls(cmn->ports_used); + + seq_puts(s, " X"); + for (x = 0; x < cmn->mesh_x; x++) + seq_printf(s, " %d ", x); + seq_puts(s, "\nY P D+"); + y = cmn->mesh_y; + while (y--) { + int xp_base = cmn->mesh_x * y; + u8 port[6][CMN_MAX_DIMENSION]; + + for (x = 0; x < cmn->mesh_x; x++) + seq_puts(s, "--------+"); + + seq_printf(s, "\n%d |", y); + for (x = 0; x < cmn->mesh_x; x++) { + struct arm_cmn_node *xp = cmn->xps + xp_base + x; + void __iomem *base = xp->pmu_base - CMN_PMU_OFFSET; + + port[0][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P0); + port[1][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P1); + port[2][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P2); + port[3][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P3); + port[4][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P4); + port[5][x] = readl_relaxed(base + CMN_MXP__CONNECT_INFO_P5); + seq_printf(s, " XP #%-2d |", xp_base + x); + } + + seq_puts(s, "\n |"); + for (x = 0; x < cmn->mesh_x; x++) { + u8 dtc = cmn->xps[xp_base + x].dtc; + + if (dtc & (dtc - 1)) + seq_puts(s, " DTC ?? |"); + else + seq_printf(s, " DTC %ld |", __ffs(dtc)); + } + seq_puts(s, "\n |"); + for (x = 0; x < cmn->mesh_x; x++) + seq_puts(s, "........|"); + + for (p = 0; p < pmax; p++) { + seq_printf(s, "\n %d |", p); + for (x = 0; x < cmn->mesh_x; x++) + seq_puts(s, arm_cmn_device_type(port[p][x])); + seq_puts(s, "\n 0|"); + for (x = 0; x < cmn->mesh_x; x++) + arm_cmn_show_logid(s, x, y, p, 0); + seq_puts(s, "\n 1|"); + for (x = 0; x < cmn->mesh_x; x++) + arm_cmn_show_logid(s, x, y, p, 1); + } + seq_puts(s, "\n-----+"); + } + for (x = 0; x < cmn->mesh_x; x++) + seq_puts(s, "--------+"); + seq_puts(s, "\n"); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(arm_cmn_map); + +static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) +{ + const char *name = "map"; + + if (id > 0) + name = devm_kasprintf(cmn->dev, GFP_KERNEL, "map_%d", id); + if (!name) + return; + + cmn->debug = debugfs_create_file(name, 0444, arm_cmn_debugfs, cmn, &arm_cmn_map_fops); +} +#else +static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {} +#endif + struct arm_cmn_hw_event { struct arm_cmn_node *dn; u64 dtm_idx[2]; unsigned int dtc_idx; u8 dtcs_used; u8 num_dns; + u8 dtm_offset; }; #define for_each_hw_dn(hw, dn, i) \ @@ -283,6 +517,7 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos) struct arm_cmn_event_attr { struct device_attribute attr; + enum cmn_model model; enum cmn_node_type type; u8 eventid; u8 occupid; @@ -294,50 +529,22 @@ struct arm_cmn_format_attr { int config; }; -static int arm_cmn_xyidbits(const struct arm_cmn *cmn) -{ - return cmn->mesh_x > 4 || cmn->mesh_y > 4 ? 3 : 2; -} - -static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn, - struct arm_cmn_node *dn) -{ - int bits = arm_cmn_xyidbits(cmn); - int x = CMN_NODEID_X(dn->id, bits); - int y = CMN_NODEID_Y(dn->id, bits); - int xp_idx = cmn->mesh_x * y + x; - - dn->to_xp = (cmn->xps + xp_idx) - dn; -} - -static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) -{ - return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp; -} - -static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, - enum cmn_node_type type) -{ - int i; - - for (i = 0; i < cmn->num_dns; i++) - if (cmn->dns[i].type == type) - return &cmn->dns[i]; - return NULL; -} - -#define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \ +#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \ (&((struct arm_cmn_event_attr[]) {{ \ .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ + .model = _model, \ .type = _type, \ .eventid = _eventid, \ .occupid = _occupid, \ }})[0].attr.attr) -static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id) +static bool arm_cmn_is_occup_event(enum cmn_model model, + enum cmn_node_type type, unsigned int id) { - return (type == CMN_TYPE_DVM && id == 0x05) || - (type == CMN_TYPE_HNF && id == 0x0f); + if (type == CMN_TYPE_DVM) + return (model == CMN600 && id == 0x05) || + (model == CI700 && id == 0x0c); + return type == CMN_TYPE_HNF && id == 0x0f; } static ssize_t arm_cmn_event_show(struct device *dev, @@ -355,7 +562,7 @@ static ssize_t arm_cmn_event_show(struct device *dev, "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n", eattr->type, eattr->eventid); - if (arm_cmn_is_occup_event(eattr->type, eattr->eventid)) + if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid)) return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n", eattr->type, eattr->eventid, eattr->occupid); @@ -370,60 +577,81 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); struct arm_cmn_event_attr *eattr; - enum cmn_node_type type; eattr = container_of(attr, typeof(*eattr), attr.attr); - type = eattr->type; - /* Watchpoints aren't nodes */ - if (type == CMN_TYPE_WP) - type = CMN_TYPE_XP; + if (!(eattr->model & cmn->model)) + return 0; + + /* Watchpoints aren't nodes, so avoid confusion */ + if (eattr->type == CMN_TYPE_WP) + return attr->mode; + + /* Hide XP events for unused interfaces/channels */ + if (eattr->type == CMN_TYPE_XP) { + unsigned int intf = (eattr->eventid >> 2) & 7; + unsigned int chan = eattr->eventid >> 5; + + if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3))) + return 0; + + if ((chan == 5 && cmn->rsp_vc_num < 2) || + (chan == 6 && cmn->dat_vc_num < 2)) + return 0; + } /* Revision-specific differences */ - if (cmn->rev < CMN600_R1P2) { - if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b) + if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) { + if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b) return 0; } - if (!arm_cmn_node(cmn, type)) + if (!arm_cmn_node(cmn, eattr->type)) return 0; return attr->mode; } -#define _CMN_EVENT_DVM(_name, _event, _occup) \ - CMN_EVENT_ATTR(dn_##_name, CMN_TYPE_DVM, _event, _occup) +#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \ + CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup) #define CMN_EVENT_DTC(_name) \ - CMN_EVENT_ATTR(dtc_##_name, CMN_TYPE_DTC, 0, 0) -#define _CMN_EVENT_HNF(_name, _event, _occup) \ - CMN_EVENT_ATTR(hnf_##_name, CMN_TYPE_HNF, _event, _occup) + CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0) +#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \ + CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup) #define CMN_EVENT_HNI(_name, _event) \ - CMN_EVENT_ATTR(hni_##_name, CMN_TYPE_HNI, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0) #define __CMN_EVENT_XP(_name, _event) \ - CMN_EVENT_ATTR(mxp_##_name, CMN_TYPE_XP, _event, 0) -#define CMN_EVENT_SBSX(_name, _event) \ - CMN_EVENT_ATTR(sbsx_##_name, CMN_TYPE_SBSX, _event, 0) -#define CMN_EVENT_RNID(_name, _event) \ - CMN_EVENT_ATTR(rnid_##_name, CMN_TYPE_RNI, _event, 0) - -#define CMN_EVENT_DVM(_name, _event) \ - _CMN_EVENT_DVM(_name, _event, 0) -#define CMN_EVENT_HNF(_name, _event) \ - _CMN_EVENT_HNF(_name, _event, 0) + CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0) +#define CMN_EVENT_SBSX(_model, _name, _event) \ + CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0) +#define CMN_EVENT_RNID(_model, _name, _event) \ + CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0) +#define CMN_EVENT_MTSX(_name, _event) \ + CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0) + +#define CMN_EVENT_DVM(_model, _name, _event) \ + _CMN_EVENT_DVM(_model, _name, _event, 0) +#define CMN_EVENT_HNF(_model, _name, _event) \ + _CMN_EVENT_HNF(_model, _name, _event, 0) #define _CMN_EVENT_XP(_name, _event) \ __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \ __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \ __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \ - __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)) + __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)), \ + __CMN_EVENT_XP(p2_##_name, (_event) | (6 << 2)), \ + __CMN_EVENT_XP(p3_##_name, (_event) | (7 << 2)) /* Good thing there are only 3 fundamental XP events... */ #define CMN_EVENT_XP(_name, _event) \ _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \ _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \ _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \ - _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)) + _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \ + _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ + _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ + _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)) static struct attribute *arm_cmn_event_attrs[] = { @@ -434,115 +662,152 @@ static struct attribute *arm_cmn_event_attrs[] = { * slot, but our lazy short-cut of using the DTM counter index for * the PMU index as well happens to avoid that by construction. */ - CMN_EVENT_DVM(rxreq_dvmop, 0x01), - CMN_EVENT_DVM(rxreq_dvmsync, 0x02), - CMN_EVENT_DVM(rxreq_dvmop_vmid_filtered, 0x03), - CMN_EVENT_DVM(rxreq_retried, 0x04), - _CMN_EVENT_DVM(rxreq_trk_occupancy_all, 0x05, 0), - _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmop, 0x05, 1), - _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmsync, 0x05, 2), - - CMN_EVENT_HNF(cache_miss, 0x01), - CMN_EVENT_HNF(slc_sf_cache_access, 0x02), - CMN_EVENT_HNF(cache_fill, 0x03), - CMN_EVENT_HNF(pocq_retry, 0x04), - CMN_EVENT_HNF(pocq_reqs_recvd, 0x05), - CMN_EVENT_HNF(sf_hit, 0x06), - CMN_EVENT_HNF(sf_evictions, 0x07), - CMN_EVENT_HNF(dir_snoops_sent, 0x08), - CMN_EVENT_HNF(brd_snoops_sent, 0x09), - CMN_EVENT_HNF(slc_eviction, 0x0a), - CMN_EVENT_HNF(slc_fill_invalid_way, 0x0b), - CMN_EVENT_HNF(mc_retries, 0x0c), - CMN_EVENT_HNF(mc_reqs, 0x0d), - CMN_EVENT_HNF(qos_hh_retry, 0x0e), - _CMN_EVENT_HNF(qos_pocq_occupancy_all, 0x0f, 0), - _CMN_EVENT_HNF(qos_pocq_occupancy_read, 0x0f, 1), - _CMN_EVENT_HNF(qos_pocq_occupancy_write, 0x0f, 2), - _CMN_EVENT_HNF(qos_pocq_occupancy_atomic, 0x0f, 3), - _CMN_EVENT_HNF(qos_pocq_occupancy_stash, 0x0f, 4), - CMN_EVENT_HNF(pocq_addrhaz, 0x10), - CMN_EVENT_HNF(pocq_atomic_addrhaz, 0x11), - CMN_EVENT_HNF(ld_st_swp_adq_full, 0x12), - CMN_EVENT_HNF(cmp_adq_full, 0x13), - CMN_EVENT_HNF(txdat_stall, 0x14), - CMN_EVENT_HNF(txrsp_stall, 0x15), - CMN_EVENT_HNF(seq_full, 0x16), - CMN_EVENT_HNF(seq_hit, 0x17), - CMN_EVENT_HNF(snp_sent, 0x18), - CMN_EVENT_HNF(sfbi_dir_snp_sent, 0x19), - CMN_EVENT_HNF(sfbi_brd_snp_sent, 0x1a), - CMN_EVENT_HNF(snp_sent_untrk, 0x1b), - CMN_EVENT_HNF(intv_dirty, 0x1c), - CMN_EVENT_HNF(stash_snp_sent, 0x1d), - CMN_EVENT_HNF(stash_data_pull, 0x1e), - CMN_EVENT_HNF(snp_fwded, 0x1f), - - CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), - CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), - CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), - CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), - CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), - CMN_EVENT_HNI(rrt_rd_alloc, 0x25), - CMN_EVENT_HNI(rrt_wr_alloc, 0x26), - CMN_EVENT_HNI(rdt_rd_alloc, 0x27), - CMN_EVENT_HNI(rdt_wr_alloc, 0x28), - CMN_EVENT_HNI(wdb_alloc, 0x29), - CMN_EVENT_HNI(txrsp_retryack, 0x2a), - CMN_EVENT_HNI(arvalid_no_arready, 0x2b), - CMN_EVENT_HNI(arready_no_arvalid, 0x2c), - CMN_EVENT_HNI(awvalid_no_awready, 0x2d), - CMN_EVENT_HNI(awready_no_awvalid, 0x2e), - CMN_EVENT_HNI(wvalid_no_wready, 0x2f), - CMN_EVENT_HNI(txdat_stall, 0x30), - CMN_EVENT_HNI(nonpcie_serialization, 0x31), - CMN_EVENT_HNI(pcie_serialization, 0x32), - - CMN_EVENT_XP(txflit_valid, 0x01), - CMN_EVENT_XP(txflit_stall, 0x02), - CMN_EVENT_XP(partial_dat_flit, 0x03), + CMN_EVENT_DVM(CMN600, rxreq_dvmop, 0x01), + CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02), + CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03), + CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04), + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0), + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1), + _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2), + CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01), + CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02), + CMN_EVENT_DVM(CI700, dvmop_pici, 0x03), + CMN_EVENT_DVM(CI700, dvmop_vici, 0x04), + CMN_EVENT_DVM(CI700, dvmsync, 0x05), + CMN_EVENT_DVM(CI700, vmid_filtered, 0x06), + CMN_EVENT_DVM(CI700, rndop_filtered, 0x07), + CMN_EVENT_DVM(CI700, retry, 0x08), + CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09), + CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a), + CMN_EVENT_DVM(CI700, trkfull, 0x0b), + _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0), + _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1), + _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2), + + CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01), + CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02), + CMN_EVENT_HNF(CMN_ANY, cache_fill, 0x03), + CMN_EVENT_HNF(CMN_ANY, pocq_retry, 0x04), + CMN_EVENT_HNF(CMN_ANY, pocq_reqs_recvd, 0x05), + CMN_EVENT_HNF(CMN_ANY, sf_hit, 0x06), + CMN_EVENT_HNF(CMN_ANY, sf_evictions, 0x07), + CMN_EVENT_HNF(CMN_ANY, dir_snoops_sent, 0x08), + CMN_EVENT_HNF(CMN_ANY, brd_snoops_sent, 0x09), + CMN_EVENT_HNF(CMN_ANY, slc_eviction, 0x0a), + CMN_EVENT_HNF(CMN_ANY, slc_fill_invalid_way, 0x0b), + CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c), + CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d), + CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3), + _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4), + CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10), + CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11), + CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12), + CMN_EVENT_HNF(CMN_ANY, cmp_adq_full, 0x13), + CMN_EVENT_HNF(CMN_ANY, txdat_stall, 0x14), + CMN_EVENT_HNF(CMN_ANY, txrsp_stall, 0x15), + CMN_EVENT_HNF(CMN_ANY, seq_full, 0x16), + CMN_EVENT_HNF(CMN_ANY, seq_hit, 0x17), + CMN_EVENT_HNF(CMN_ANY, snp_sent, 0x18), + CMN_EVENT_HNF(CMN_ANY, sfbi_dir_snp_sent, 0x19), + CMN_EVENT_HNF(CMN_ANY, sfbi_brd_snp_sent, 0x1a), + CMN_EVENT_HNF(CMN_ANY, snp_sent_untrk, 0x1b), + CMN_EVENT_HNF(CMN_ANY, intv_dirty, 0x1c), + CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d), + CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e), + CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f), + CMN_EVENT_HNF(CI700, atomic_fwd, 0x20), + CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21), + CMN_EVENT_HNF(CI700, mpam_softlim, 0x22), + + CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), + CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), + CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), + CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), + CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), + CMN_EVENT_HNI(rrt_rd_alloc, 0x25), + CMN_EVENT_HNI(rrt_wr_alloc, 0x26), + CMN_EVENT_HNI(rdt_rd_alloc, 0x27), + CMN_EVENT_HNI(rdt_wr_alloc, 0x28), + CMN_EVENT_HNI(wdb_alloc, 0x29), + CMN_EVENT_HNI(txrsp_retryack, 0x2a), + CMN_EVENT_HNI(arvalid_no_arready, 0x2b), + CMN_EVENT_HNI(arready_no_arvalid, 0x2c), + CMN_EVENT_HNI(awvalid_no_awready, 0x2d), + CMN_EVENT_HNI(awready_no_awvalid, 0x2e), + CMN_EVENT_HNI(wvalid_no_wready, 0x2f), + CMN_EVENT_HNI(txdat_stall, 0x30), + CMN_EVENT_HNI(nonpcie_serialization, 0x31), + CMN_EVENT_HNI(pcie_serialization, 0x32), + + CMN_EVENT_XP(txflit_valid, 0x01), + CMN_EVENT_XP(txflit_stall, 0x02), + CMN_EVENT_XP(partial_dat_flit, 0x03), /* We treat watchpoints as a special made-up class of XP events */ - CMN_EVENT_ATTR(watchpoint_up, CMN_TYPE_WP, 0, 0), - CMN_EVENT_ATTR(watchpoint_down, CMN_TYPE_WP, 2, 0), - - CMN_EVENT_SBSX(rd_req, 0x01), - CMN_EVENT_SBSX(wr_req, 0x02), - CMN_EVENT_SBSX(cmo_req, 0x03), - CMN_EVENT_SBSX(txrsp_retryack, 0x04), - CMN_EVENT_SBSX(txdat_flitv, 0x05), - CMN_EVENT_SBSX(txrsp_flitv, 0x06), - CMN_EVENT_SBSX(rd_req_trkr_occ_cnt_ovfl, 0x11), - CMN_EVENT_SBSX(wr_req_trkr_occ_cnt_ovfl, 0x12), - CMN_EVENT_SBSX(cmo_req_trkr_occ_cnt_ovfl, 0x13), - CMN_EVENT_SBSX(wdb_occ_cnt_ovfl, 0x14), - CMN_EVENT_SBSX(rd_axi_trkr_occ_cnt_ovfl, 0x15), - CMN_EVENT_SBSX(cmo_axi_trkr_occ_cnt_ovfl, 0x16), - CMN_EVENT_SBSX(arvalid_no_arready, 0x21), - CMN_EVENT_SBSX(awvalid_no_awready, 0x22), - CMN_EVENT_SBSX(wvalid_no_wready, 0x23), - CMN_EVENT_SBSX(txdat_stall, 0x24), - CMN_EVENT_SBSX(txrsp_stall, 0x25), - - CMN_EVENT_RNID(s0_rdata_beats, 0x01), - CMN_EVENT_RNID(s1_rdata_beats, 0x02), - CMN_EVENT_RNID(s2_rdata_beats, 0x03), - CMN_EVENT_RNID(rxdat_flits, 0x04), - CMN_EVENT_RNID(txdat_flits, 0x05), - CMN_EVENT_RNID(txreq_flits_total, 0x06), - CMN_EVENT_RNID(txreq_flits_retried, 0x07), - CMN_EVENT_RNID(rrt_occ_ovfl, 0x08), - CMN_EVENT_RNID(wrt_occ_ovfl, 0x09), - CMN_EVENT_RNID(txreq_flits_replayed, 0x0a), - CMN_EVENT_RNID(wrcancel_sent, 0x0b), - CMN_EVENT_RNID(s0_wdata_beats, 0x0c), - CMN_EVENT_RNID(s1_wdata_beats, 0x0d), - CMN_EVENT_RNID(s2_wdata_beats, 0x0e), - CMN_EVENT_RNID(rrt_alloc, 0x0f), - CMN_EVENT_RNID(wrt_alloc, 0x10), - CMN_EVENT_RNID(rdb_unord, 0x11), - CMN_EVENT_RNID(rdb_replay, 0x12), - CMN_EVENT_RNID(rdb_hybrid, 0x13), - CMN_EVENT_RNID(rdb_ord, 0x14), + CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0), + CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0), + + CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01), + CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02), + CMN_EVENT_SBSX(CMN_ANY, cmo_req, 0x03), + CMN_EVENT_SBSX(CMN_ANY, txrsp_retryack, 0x04), + CMN_EVENT_SBSX(CMN_ANY, txdat_flitv, 0x05), + CMN_EVENT_SBSX(CMN_ANY, txrsp_flitv, 0x06), + CMN_EVENT_SBSX(CMN_ANY, rd_req_trkr_occ_cnt_ovfl, 0x11), + CMN_EVENT_SBSX(CMN_ANY, wr_req_trkr_occ_cnt_ovfl, 0x12), + CMN_EVENT_SBSX(CMN_ANY, cmo_req_trkr_occ_cnt_ovfl, 0x13), + CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14), + CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15), + CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16), + CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17), + CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21), + CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22), + CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23), + CMN_EVENT_SBSX(CMN_ANY, txdat_stall, 0x24), + CMN_EVENT_SBSX(CMN_ANY, txrsp_stall, 0x25), + + CMN_EVENT_RNID(CMN_ANY, s0_rdata_beats, 0x01), + CMN_EVENT_RNID(CMN_ANY, s1_rdata_beats, 0x02), + CMN_EVENT_RNID(CMN_ANY, s2_rdata_beats, 0x03), + CMN_EVENT_RNID(CMN_ANY, rxdat_flits, 0x04), + CMN_EVENT_RNID(CMN_ANY, txdat_flits, 0x05), + CMN_EVENT_RNID(CMN_ANY, txreq_flits_total, 0x06), + CMN_EVENT_RNID(CMN_ANY, txreq_flits_retried, 0x07), + CMN_EVENT_RNID(CMN_ANY, rrt_occ_ovfl, 0x08), + CMN_EVENT_RNID(CMN_ANY, wrt_occ_ovfl, 0x09), + CMN_EVENT_RNID(CMN_ANY, txreq_flits_replayed, 0x0a), + CMN_EVENT_RNID(CMN_ANY, wrcancel_sent, 0x0b), + CMN_EVENT_RNID(CMN_ANY, s0_wdata_beats, 0x0c), + CMN_EVENT_RNID(CMN_ANY, s1_wdata_beats, 0x0d), + CMN_EVENT_RNID(CMN_ANY, s2_wdata_beats, 0x0e), + CMN_EVENT_RNID(CMN_ANY, rrt_alloc, 0x0f), + CMN_EVENT_RNID(CMN_ANY, wrt_alloc, 0x10), + CMN_EVENT_RNID(CMN600, rdb_unord, 0x11), + CMN_EVENT_RNID(CMN600, rdb_replay, 0x12), + CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13), + CMN_EVENT_RNID(CMN600, rdb_ord, 0x14), + CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11), + CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12), + CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13), + CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14), + CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15), + CMN_EVENT_RNID(CI700, wrt_throttled, 0x16), + + CMN_EVENT_MTSX(tc_lookup, 0x01), + CMN_EVENT_MTSX(tc_fill, 0x02), + CMN_EVENT_MTSX(tc_miss, 0x03), + CMN_EVENT_MTSX(tdb_forward, 0x04), + CMN_EVENT_MTSX(tcq_hazard, 0x05), + CMN_EVENT_MTSX(tcq_rd_alloc, 0x06), + CMN_EVENT_MTSX(tcq_wr_alloc, 0x07), + CMN_EVENT_MTSX(tcq_cmo_alloc, 0x08), + CMN_EVENT_MTSX(axi_rd_req, 0x09), + CMN_EVENT_MTSX(axi_wr_req, 0x0a), + CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b), + CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c), NULL }; @@ -644,7 +909,8 @@ static u32 arm_cmn_wp_config(struct perf_event *event) config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | - FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc); + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL2, dev >> 1); if (combine && !grp) config |= CMN_DTM_WPn_CONFIG_WP_COMBINE; @@ -679,18 +945,19 @@ static void arm_cmn_pmu_disable(struct pmu *pmu) static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, bool snapshot) { + struct arm_cmn_dtm *dtm = NULL; struct arm_cmn_node *dn; - unsigned int i, offset; - u64 count = 0; + unsigned int i, offset, dtm_idx; + u64 reg, count = 0; offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; for_each_hw_dn(hw, dn, i) { - struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); - int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); - u64 reg = readq_relaxed(xp->pmu_base + offset); - u16 dtm_count = reg >> (dtm_idx * 16); - - count += dtm_count; + if (dtm != &cmn->dtms[dn->dtm]) { + dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; + reg = readq_relaxed(dtm->base + offset); + } + dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + count += (u16)(reg >> (dtm_idx * 16)); } return count; } @@ -774,8 +1041,10 @@ static void arm_cmn_event_start(struct perf_event *event, int flags) u64 mask = CMN_EVENT_WP_MASK(event); for_each_hw_dn(hw, dn, i) { - writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); - writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); + void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); + + writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx)); + writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx)); } } else for_each_hw_dn(hw, dn, i) { int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); @@ -800,8 +1069,10 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) int wp_idx = arm_cmn_wp_idx(event); for_each_hw_dn(hw, dn, i) { - writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); - writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); + void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset); + + writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx)); + writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx)); } } else for_each_hw_dn(hw, dn, i) { int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); @@ -814,14 +1085,15 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags) } struct arm_cmn_val { - u8 dtm_count[CMN_MAX_XPS]; - u8 occupid[CMN_MAX_XPS]; - u8 wp[CMN_MAX_XPS][4]; + u8 dtm_count[CMN_MAX_DTMS]; + u8 occupid[CMN_MAX_DTMS]; + u8 wp[CMN_MAX_DTMS][4]; int dtc_count; bool cycles; }; -static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event) +static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val, + struct perf_event *event) { struct arm_cmn_hw_event *hw = to_cmn_hw(event); struct arm_cmn_node *dn; @@ -839,33 +1111,33 @@ static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *ev } val->dtc_count++; - if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) occupid = CMN_EVENT_OCCUPID(event) + 1; else occupid = 0; for_each_hw_dn(hw, dn, i) { - int wp_idx, xp = arm_cmn_node_to_xp(dn)->logid; + int wp_idx, dtm = dn->dtm; - val->dtm_count[xp]++; - val->occupid[xp] = occupid; + val->dtm_count[dtm]++; + val->occupid[dtm] = occupid; if (type != CMN_TYPE_WP) continue; wp_idx = arm_cmn_wp_idx(event); - val->wp[xp][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; + val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; } } -static int arm_cmn_validate_group(struct perf_event *event) +static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event) { struct arm_cmn_hw_event *hw = to_cmn_hw(event); struct arm_cmn_node *dn; struct perf_event *sibling, *leader = event->group_leader; enum cmn_node_type type; - struct arm_cmn_val val; - int i; + struct arm_cmn_val *val; + int i, ret = -EINVAL; u8 occupid; if (leader == event) @@ -874,54 +1146,61 @@ static int arm_cmn_validate_group(struct perf_event *event) if (event->pmu != leader->pmu && !is_software_event(leader)) return -EINVAL; - memset(&val, 0, sizeof(val)); + val = kzalloc(sizeof(*val), GFP_KERNEL); + if (!val) + return -ENOMEM; - arm_cmn_val_add_event(&val, leader); + arm_cmn_val_add_event(cmn, val, leader); for_each_sibling_event(sibling, leader) - arm_cmn_val_add_event(&val, sibling); + arm_cmn_val_add_event(cmn, val, sibling); type = CMN_EVENT_TYPE(event); - if (type == CMN_TYPE_DTC) - return val.cycles ? -EINVAL : 0; + if (type == CMN_TYPE_DTC) { + ret = val->cycles ? -EINVAL : 0; + goto done; + } - if (val.dtc_count == CMN_DT_NUM_COUNTERS) - return -EINVAL; + if (val->dtc_count == CMN_DT_NUM_COUNTERS) + goto done; - if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) occupid = CMN_EVENT_OCCUPID(event) + 1; else occupid = 0; for_each_hw_dn(hw, dn, i) { - int wp_idx, wp_cmb, xp = arm_cmn_node_to_xp(dn)->logid; + int wp_idx, wp_cmb, dtm = dn->dtm; - if (val.dtm_count[xp] == CMN_DTM_NUM_COUNTERS) - return -EINVAL; + if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS) + goto done; - if (occupid && val.occupid[xp] && occupid != val.occupid[xp]) - return -EINVAL; + if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm]) + goto done; if (type != CMN_TYPE_WP) continue; wp_idx = arm_cmn_wp_idx(event); - if (val.wp[xp][wp_idx]) - return -EINVAL; + if (val->wp[dtm][wp_idx]) + goto done; - wp_cmb = val.wp[xp][wp_idx ^ 1]; + wp_cmb = val->wp[dtm][wp_idx ^ 1]; if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) - return -EINVAL; + goto done; } - return 0; + ret = 0; +done: + kfree(val); + return ret; } static int arm_cmn_event_init(struct perf_event *event) { struct arm_cmn *cmn = to_cmn(event->pmu); struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; enum cmn_node_type type; - unsigned int i; bool bynodeid; u16 nodeid, eventid; @@ -947,38 +1226,37 @@ static int arm_cmn_event_init(struct perf_event *event) eventid = CMN_EVENT_EVENTID(event); if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN) return -EINVAL; + /* ...but the DTM may depend on which port we're watching */ + if (cmn->multi_dtm) + hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2; } bynodeid = CMN_EVENT_BYNODEID(event); nodeid = CMN_EVENT_NODEID(event); hw->dn = arm_cmn_node(cmn, type); - for (i = hw->dn - cmn->dns; i < cmn->num_dns && cmn->dns[i].type == type; i++) { - if (!bynodeid) { - hw->num_dns++; - } else if (cmn->dns[i].id != nodeid) { + if (!hw->dn) + return -EINVAL; + for (dn = hw->dn; dn->type == type; dn++) { + if (bynodeid && dn->id != nodeid) { hw->dn++; - } else { - hw->num_dns = 1; - break; + continue; } + hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc; + hw->num_dns++; + if (bynodeid) + break; } if (!hw->num_dns) { - int bits = arm_cmn_xyidbits(cmn); + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid); dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", - nodeid, CMN_NODEID_X(nodeid, bits), CMN_NODEID_Y(nodeid, bits), - CMN_NODEID_PID(nodeid), CMN_NODEID_DEVID(nodeid), type); + nodeid, nid.x, nid.y, nid.port, nid.dev, type); return -EINVAL; } - /* - * By assuming events count in all DTC domains, we cunningly avoid - * needing to know anything about how XPs are assigned to domains. - */ - hw->dtcs_used = (1U << cmn->num_dtcs) - 1; - return arm_cmn_validate_group(event); + return arm_cmn_validate_group(cmn, event); } static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, @@ -988,17 +1266,17 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, enum cmn_node_type type = CMN_EVENT_TYPE(event); while (i--) { - struct arm_cmn_node *xp = arm_cmn_node_to_xp(hw->dn + i); + struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset; unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); if (type == CMN_TYPE_WP) - hw->dn[i].wp_event[arm_cmn_wp_idx(event)] = -1; + dtm->wp_event[arm_cmn_wp_idx(event)] = -1; - if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) hw->dn[i].occupid_count--; - xp->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); - writel_relaxed(xp->pmu_config_low, xp->pmu_base + CMN_DTM_PMU_CONFIG); + dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); + writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG); } memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); @@ -1040,12 +1318,12 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) /* ...then the local counters to feed it. */ for_each_hw_dn(hw, dn, i) { - struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); + struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset; unsigned int dtm_idx, shift; u64 reg; dtm_idx = 0; - while (xp->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) + while (dtm->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) if (++dtm_idx == CMN_DTM_NUM_COUNTERS) goto free_dtms; @@ -1055,26 +1333,28 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) int tmp, wp_idx = arm_cmn_wp_idx(event); u32 cfg = arm_cmn_wp_config(event); - if (dn->wp_event[wp_idx] >= 0) + if (dtm->wp_event[wp_idx] >= 0) goto free_dtms; - tmp = dn->wp_event[wp_idx ^ 1]; + tmp = dtm->wp_event[wp_idx ^ 1]; if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) goto free_dtms; input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; - dn->wp_event[wp_idx] = dtc_idx; - writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx)); + dtm->wp_event[wp_idx] = dtc_idx; + writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx)); } else { - unsigned int port = CMN_NODEID_PID(dn->id); - unsigned int dev = CMN_NODEID_DEVID(dn->id); + struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + + if (cmn->multi_dtm) + nid.port %= 2; input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + - (port << 4) + (dev << 2); + (nid.port << 4) + (nid.dev << 2); - if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) { - int occupid = CMN_EVENT_OCCUPID(event); + if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) { + u8 occupid = CMN_EVENT_OCCUPID(event); if (dn->occupid_count == 0) { dn->occupid_val = occupid; @@ -1089,13 +1369,13 @@ static int arm_cmn_event_add(struct perf_event *event, int flags) arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); - xp->input_sel[dtm_idx] = input_sel; + dtm->input_sel[dtm_idx] = input_sel; shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); - xp->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); - xp->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; - xp->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); - reg = (u64)le32_to_cpu(xp->pmu_config_high) << 32 | xp->pmu_config_low; - writeq_relaxed(reg, xp->pmu_base + CMN_DTM_PMU_CONFIG); + dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); + dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; + dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); + reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low; + writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG); } /* Go go go! */ @@ -1147,23 +1427,47 @@ static int arm_cmn_commit_txn(struct pmu *pmu) return 0; } -static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +static void arm_cmn_migrate(struct arm_cmn *cmn, unsigned int cpu) +{ + unsigned int i; + + perf_pmu_migrate_context(&cmn->pmu, cmn->cpu, cpu); + for (i = 0; i < cmn->num_dtcs; i++) + irq_set_affinity(cmn->dtc[i].irq, cpumask_of(cpu)); + cmn->cpu = cpu; +} + +static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) { struct arm_cmn *cmn; - unsigned int i, target; + int node; - cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node); - if (cpu != cmn->cpu) - return 0; + cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); + node = dev_to_node(cmn->dev); + if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node) + arm_cmn_migrate(cmn, cpu); + return 0; +} + +static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct arm_cmn *cmn; + unsigned int target; + int node; + cpumask_t mask; - target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) + cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); + if (cpu != cmn->cpu) return 0; - perf_pmu_migrate_context(&cmn->pmu, cpu, target); - for (i = 0; i < cmn->num_dtcs; i++) - irq_set_affinity(cmn->dtc[i].irq, cpumask_of(target)); - cmn->cpu = target; + node = dev_to_node(cmn->dev); + if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(&mask, &mask, cpumask_of(cpu))) + target = cpumask_any(&mask); + else + target = cpumask_any_but(cpu_online_mask, cpu); + if (target < nr_cpu_ids) + arm_cmn_migrate(cmn, target); return 0; } @@ -1231,23 +1535,22 @@ static int arm_cmn_init_irqs(struct arm_cmn *cmn) return 0; } -static void arm_cmn_init_dtm(struct arm_cmn_node *xp) +static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, int idx) { int i; + dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx); + dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; for (i = 0; i < 4; i++) { - xp->wp_event[i] = -1; - writeq_relaxed(0, xp->pmu_base + CMN_DTM_WPn_MASK(i)); - writeq_relaxed(~0ULL, xp->pmu_base + CMN_DTM_WPn_VAL(i)); + dtm->wp_event[i] = -1; + writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i)); + writeq_relaxed(~0ULL, dtm->base + CMN_DTM_WPn_VAL(i)); } - xp->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; - xp->dtc = -1; } static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx) { struct arm_cmn_dtc *dtc = cmn->dtc + idx; - struct arm_cmn_node *xp; dtc->base = dn->pmu_base - CMN_PMU_OFFSET; dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); @@ -1258,10 +1561,6 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); writel_relaxed(CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); - /* We do at least know that a DTC's XP must be in that DTC's domain */ - xp = arm_cmn_node_to_xp(dn); - xp->dtc = idx; - return 0; } @@ -1278,8 +1577,9 @@ static int arm_cmn_node_cmp(const void *a, const void *b) static int arm_cmn_init_dtcs(struct arm_cmn *cmn) { - struct arm_cmn_node *dn; + struct arm_cmn_node *dn, *xp; int dtc_idx = 0; + u8 dtcs_present = (1 << cmn->num_dtcs) - 1; cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); if (!cmn->dtc) @@ -1289,14 +1589,26 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); - for (dn = cmn->dns; dn < cmn->dns + cmn->num_dns; dn++) { - if (dn->type != CMN_TYPE_XP) - arm_cmn_init_node_to_xp(cmn, dn); - else if (cmn->num_dtcs == 1) - dn->dtc = 0; + for (dn = cmn->dns; dn->type; dn++) { + if (dn->type == CMN_TYPE_XP) { + dn->dtc &= dtcs_present; + continue; + } - if (dn->type == CMN_TYPE_DTC) - arm_cmn_init_dtc(cmn, dn, dtc_idx++); + xp = arm_cmn_node_to_xp(cmn, dn); + dn->dtm = xp->dtm; + if (cmn->multi_dtm) + dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2; + + if (dn->type == CMN_TYPE_DTC) { + int err; + /* We do at least know that a DTC's XP must be in that DTC's domain */ + if (xp->dtc == 0xf) + xp->dtc = 1 << dtc_idx; + err = arm_cmn_init_dtc(cmn, dn, dtc_idx++); + if (err) + return err; + } /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ if (dn->type == CMN_TYPE_RND) @@ -1335,19 +1647,25 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) { void __iomem *cfg_region; struct arm_cmn_node cfg, *dn; + struct arm_cmn_dtm *dtm; u16 child_count, child_poff; u32 xp_offset[CMN_MAX_XPS]; u64 reg; int i, j; + size_t sz; + + arm_cmn_init_node_info(cmn, rgn_offset, &cfg); + if (cfg.type != CMN_TYPE_CFG) + return -ENODEV; cfg_region = cmn->base + rgn_offset; reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); - dev_dbg(cmn->dev, "periph_id_2 revision: %d\n", cmn->rev); - arm_cmn_init_node_info(cmn, rgn_offset, &cfg); - if (cfg.type != CMN_TYPE_CFG) - return -ENODEV; + reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL); + cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN; + cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg); + cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg); reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); @@ -1365,20 +1683,28 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); } - /* Cheeky +1 to help terminate pointer-based iteration */ - cmn->dns = devm_kcalloc(cmn->dev, cmn->num_dns + 1, - sizeof(*cmn->dns), GFP_KERNEL); - if (!cmn->dns) + /* Cheeky +1 to help terminate pointer-based iteration later */ + dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL); + if (!dn) + return -ENOMEM; + + /* Initial safe upper bound on DTMs for any possible mesh layout */ + i = cmn->num_xps; + if (cmn->multi_dtm) + i += cmn->num_xps + 1; + dtm = devm_kcalloc(cmn->dev, i, sizeof(*dtm), GFP_KERNEL); + if (!dtm) return -ENOMEM; /* Pass 2: now we can actually populate the nodes */ - dn = cmn->dns; + cmn->dns = dn; + cmn->dtms = dtm; for (i = 0; i < cmn->num_xps; i++) { void __iomem *xp_region = cmn->base + xp_offset[i]; struct arm_cmn_node *xp = dn++; + unsigned int xp_ports = 0; arm_cmn_init_node_info(cmn, xp_offset[i], xp); - arm_cmn_init_dtm(xp); /* * Thanks to the order in which XP logical IDs seem to be * assigned, we can handily infer the mesh X dimension by @@ -1388,6 +1714,40 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) if (xp->id == (1 << 3)) cmn->mesh_x = xp->logid; + if (cmn->model == CMN600) + xp->dtc = 0xf; + else + xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO); + + xp->dtm = dtm - cmn->dtms; + arm_cmn_init_dtm(dtm++, xp, 0); + /* + * Keeping track of connected ports will let us filter out + * unnecessary XP events easily. We can also reliably infer the + * "extra device ports" configuration for the node ID format + * from this, since in that case we will see at least one XP + * with port 2 connected, for the HN-D. + */ + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P0)) + xp_ports |= BIT(0); + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P1)) + xp_ports |= BIT(1); + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P2)) + xp_ports |= BIT(2); + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P3)) + xp_ports |= BIT(3); + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P4)) + xp_ports |= BIT(4); + if (readq_relaxed(xp_region + CMN_MXP__CONNECT_INFO_P5)) + xp_ports |= BIT(5); + + if (cmn->multi_dtm && (xp_ports & 0xc)) + arm_cmn_init_dtm(dtm++, xp, 1); + if (cmn->multi_dtm && (xp_ports & 0x30)) + arm_cmn_init_dtm(dtm++, xp, 2); + + cmn->ports_used |= xp_ports; + reg = readq_relaxed(xp_region + CMN_CHILD_INFO); child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); @@ -1422,11 +1782,14 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) case CMN_TYPE_SBSX: case CMN_TYPE_RNI: case CMN_TYPE_RND: + case CMN_TYPE_MTSX: case CMN_TYPE_CXRA: case CMN_TYPE_CXHA: dn++; break; /* Nothing to see here */ + case CMN_TYPE_MPAM_S: + case CMN_TYPE_MPAM_NS: case CMN_TYPE_RNSAM: case CMN_TYPE_CXLA: break; @@ -1441,6 +1804,16 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) /* Correct for any nodes we skipped */ cmn->num_dns = dn - cmn->dns; + sz = (void *)(dn + 1) - (void *)cmn->dns; + dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL); + if (dn) + cmn->dns = dn; + + sz = (void *)dtm - (void *)cmn->dtms; + dtm = devm_krealloc(cmn->dev, cmn->dtms, sz, GFP_KERNEL); + if (dtm) + cmn->dtms = dtm; + /* * If mesh_x wasn't set during discovery then we never saw * an XP at (0,1), thus we must have an Nx1 configuration. @@ -1449,13 +1822,20 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) cmn->mesh_x = cmn->num_xps; cmn->mesh_y = cmn->num_xps / cmn->mesh_x; - dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n", - cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn)); + /* 1x1 config plays havoc with XP event encodings */ + if (cmn->num_xps == 1) + dev_warn(cmn->dev, "1x1 config not fully supported, translate XP events manually\n"); + + dev_dbg(cmn->dev, "model %d, periph_id_2 revision %d\n", cmn->model, cmn->rev); + reg = cmn->ports_used; + dev_dbg(cmn->dev, "mesh %dx%d, ID width %d, ports %6pbl%s\n", + cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn), ®, + cmn->multi_dtm ? ", multi-DTM" : ""); return 0; } -static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) +static int arm_cmn600_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) { struct resource *cfg, *root; @@ -1482,21 +1862,11 @@ static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) return root->start - cfg->start; } -static int arm_cmn_of_probe(struct platform_device *pdev, struct arm_cmn *cmn) +static int arm_cmn600_of_probe(struct device_node *np) { - struct device_node *np = pdev->dev.of_node; u32 rootnode; - int ret; - - cmn->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(cmn->base)) - return PTR_ERR(cmn->base); - ret = of_property_read_u32(np, "arm,root-node", &rootnode); - if (ret) - return ret; - - return rootnode; + return of_property_read_u32(np, "arm,root-node", &rootnode) ?: rootnode; } static int arm_cmn_probe(struct platform_device *pdev) @@ -1504,19 +1874,26 @@ static int arm_cmn_probe(struct platform_device *pdev) struct arm_cmn *cmn; const char *name; static atomic_t id; - int err, rootnode; + int err, rootnode, this_id; cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); if (!cmn) return -ENOMEM; cmn->dev = &pdev->dev; + cmn->model = (unsigned long)device_get_match_data(cmn->dev); platform_set_drvdata(pdev, cmn); - if (has_acpi_companion(cmn->dev)) - rootnode = arm_cmn_acpi_probe(pdev, cmn); - else - rootnode = arm_cmn_of_probe(pdev, cmn); + if (cmn->model == CMN600 && has_acpi_companion(cmn->dev)) { + rootnode = arm_cmn600_acpi_probe(pdev, cmn); + } else { + rootnode = 0; + cmn->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cmn->base)) + return PTR_ERR(cmn->base); + if (cmn->model == CMN600) + rootnode = arm_cmn600_of_probe(pdev->dev.of_node); + } if (rootnode < 0) return rootnode; @@ -1532,7 +1909,7 @@ static int arm_cmn_probe(struct platform_device *pdev) if (err) return err; - cmn->cpu = raw_smp_processor_id(); + cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); cmn->pmu = (struct pmu) { .module = THIS_MODULE, .attr_groups = arm_cmn_attr_groups, @@ -1551,7 +1928,8 @@ static int arm_cmn_probe(struct platform_device *pdev) .cancel_txn = arm_cmn_end_txn, }; - name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id)); + this_id = atomic_fetch_inc(&id); + name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", this_id); if (!name) return -ENOMEM; @@ -1561,7 +1939,10 @@ static int arm_cmn_probe(struct platform_device *pdev) err = perf_pmu_register(&cmn->pmu, name, -1); if (err) - cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); + cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); + else + arm_cmn_debugfs_init(cmn, this_id); + return err; } @@ -1572,13 +1953,15 @@ static int arm_cmn_remove(struct platform_device *pdev) writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); perf_pmu_unregister(&cmn->pmu); - cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); + cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); + debugfs_remove(cmn->debug); return 0; } #ifdef CONFIG_OF static const struct of_device_id arm_cmn_of_match[] = { - { .compatible = "arm,cmn-600", }, + { .compatible = "arm,cmn-600", .data = (void *)CMN600 }, + { .compatible = "arm,ci-700", .data = (void *)CI700 }, {} }; MODULE_DEVICE_TABLE(of, arm_cmn_of_match); @@ -1586,7 +1969,7 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id arm_cmn_acpi_match[] = { - { "ARMHC600", }, + { "ARMHC600", CMN600 }, {} }; MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); @@ -1607,15 +1990,20 @@ static int __init arm_cmn_init(void) int ret; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "perf/arm/cmn:online", NULL, + "perf/arm/cmn:online", + arm_cmn_pmu_online_cpu, arm_cmn_pmu_offline_cpu); if (ret < 0) return ret; arm_cmn_hp_state = ret; + arm_cmn_debugfs = debugfs_create_dir("arm-cmn", NULL); + ret = platform_driver_register(&arm_cmn_driver); - if (ret) + if (ret) { cpuhp_remove_multi_state(arm_cmn_hp_state); + debugfs_remove(arm_cmn_debugfs); + } return ret; } @@ -1623,6 +2011,7 @@ static void __exit arm_cmn_exit(void) { platform_driver_unregister(&arm_cmn_driver); cpuhp_remove_multi_state(arm_cmn_hp_state); + debugfs_remove(arm_cmn_debugfs); } module_init(arm_cmn_init); diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 226348822ab3..1ae19f7301b2 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -47,6 +47,7 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/msi.h> +#include <linux/of.h> #include <linux/perf_event.h> #include <linux/platform_device.h> #include <linux/smp.h> @@ -75,6 +76,10 @@ #define SMMU_PMCG_CR 0xE04 #define SMMU_PMCG_CR_ENABLE BIT(0) #define SMMU_PMCG_IIDR 0xE08 +#define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20) +#define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16) +#define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12) +#define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0) #define SMMU_PMCG_CEID0 0xE20 #define SMMU_PMCG_CEID1 0xE28 #define SMMU_PMCG_IRQ_CTRL 0xE50 @@ -83,6 +88,20 @@ #define SMMU_PMCG_IRQ_CFG1 0xE60 #define SMMU_PMCG_IRQ_CFG2 0xE64 +/* IMP-DEF ID registers */ +#define SMMU_PMCG_PIDR0 0xFE0 +#define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0) +#define SMMU_PMCG_PIDR1 0xFE4 +#define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4) +#define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0) +#define SMMU_PMCG_PIDR2 0xFE8 +#define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4) +#define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0) +#define SMMU_PMCG_PIDR3 0xFEC +#define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4) +#define SMMU_PMCG_PIDR4 0xFD0 +#define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0) + /* MSI config fields */ #define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) #define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1 @@ -754,6 +773,41 @@ static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); } +static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu) +{ + return of_device_is_compatible(smmu_pmu->dev->of_node, + "arm,mmu-600-pmcg"); +} + +static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu) +{ + u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR); + + if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) { + u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0); + u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1); + u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2); + u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3); + u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4); + + u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) | + (FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8); + u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2); + u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3); + u32 implementer = + FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) | + (FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) | + (FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8); + + iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) | + FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) | + FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) | + FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer); + } + + smmu_pmu->iidr = iidr; +} + static int smmu_pmu_probe(struct platform_device *pdev) { struct smmu_pmu *smmu_pmu; @@ -825,7 +879,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) return err; } - smmu_pmu->iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR); + smmu_pmu_get_iidr(smmu_pmu); name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", (res_0->start) >> SMMU_PMCG_PA_SHIFT); @@ -834,7 +888,8 @@ static int smmu_pmu_probe(struct platform_device *pdev) return -EINVAL; } - smmu_pmu_get_acpi_options(smmu_pmu); + if (!dev->of_node) + smmu_pmu_get_acpi_options(smmu_pmu); /* Pick one CPU to be the preferred one to use */ smmu_pmu->on_cpu = raw_smp_processor_id(); @@ -884,9 +939,18 @@ static void smmu_pmu_shutdown(struct platform_device *pdev) smmu_pmu_disable(&smmu_pmu->pmu); } +#ifdef CONFIG_OF +static const struct of_device_id smmu_pmu_of_match[] = { + { .compatible = "arm,smmu-v3-pmcg" }, + {} +}; +MODULE_DEVICE_TABLE(of, smmu_pmu_of_match); +#endif + static struct platform_driver smmu_pmu_driver = { .driver = { .name = "arm-smmu-v3-pmcg", + .of_match_table = of_match_ptr(smmu_pmu_of_match), .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig index c5d1b7019fff..5546218b5598 100644 --- a/drivers/perf/hisilicon/Kconfig +++ b/drivers/perf/hisilicon/Kconfig @@ -5,3 +5,12 @@ config HISI_PMU help Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home Agent performance monitor and DDR Controller performance monitor. + +config HISI_PCIE_PMU + tristate "HiSilicon PCIE PERF PMU" + depends on PCI && ARM64 + help + Provide support for HiSilicon PCIe performance monitoring unit (PMU) + RCiEP devices. + Adds the PCIe PMU into perf events system for monitoring latency, + bandwidth etc. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 7643c9f93e36..506ed39e3266 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ hisi_uncore_pa_pmu.o + +obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c new file mode 100644 index 000000000000..21771708597d --- /dev/null +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -0,0 +1,948 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for PCIe PMU RCiEP device. Related + * perf events are bandwidth, latency etc. + * + * Copyright (C) 2021 HiSilicon Limited + * Author: Qi Liu <liuqi115@huawei.com> + */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/perf_event.h> + +#define DRV_NAME "hisi_pcie_pmu" +/* Define registers */ +#define HISI_PCIE_GLOBAL_CTRL 0x00 +#define HISI_PCIE_EVENT_CTRL 0x010 +#define HISI_PCIE_CNT 0x090 +#define HISI_PCIE_EXT_CNT 0x110 +#define HISI_PCIE_INT_STAT 0x150 +#define HISI_PCIE_INT_MASK 0x154 +#define HISI_PCIE_REG_BDF 0xfe0 +#define HISI_PCIE_REG_VERSION 0xfe4 +#define HISI_PCIE_REG_INFO 0xfe8 + +/* Define command in HISI_PCIE_GLOBAL_CTRL */ +#define HISI_PCIE_GLOBAL_EN 0x01 +#define HISI_PCIE_GLOBAL_NONE 0 + +/* Define command in HISI_PCIE_EVENT_CTRL */ +#define HISI_PCIE_EVENT_EN BIT_ULL(20) +#define HISI_PCIE_RESET_CNT BIT_ULL(22) +#define HISI_PCIE_INIT_SET BIT_ULL(34) +#define HISI_PCIE_THR_EN BIT_ULL(26) +#define HISI_PCIE_TARGET_EN BIT_ULL(32) +#define HISI_PCIE_TRIG_EN BIT_ULL(52) + +/* Define offsets in HISI_PCIE_EVENT_CTRL */ +#define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0) +#define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27) +#define HISI_PCIE_THR_M GENMASK_ULL(31, 28) +#define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36) +#define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53) +#define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56) + +#define HISI_PCIE_MAX_COUNTERS 8 +#define HISI_PCIE_REG_STEP 8 +#define HISI_PCIE_THR_MAX_VAL 10 +#define HISI_PCIE_TRIG_MAX_VAL 10 +#define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0)) +#define HISI_PCIE_INIT_VAL BIT_ULL(63) + +struct hisi_pcie_pmu { + struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + u32 identifier; + /* Minimum and maximum BDF of root ports monitored by PMU */ + u16 bdf_min; + u16 bdf_max; + int on_cpu; +}; + +struct hisi_pcie_reg_pair { + u16 lo; + u16 hi; +}; + +#define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu)) +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \ + static u64 hisi_pcie_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \ + } \ + +HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0); +HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0); +HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4); +HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5); +HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9); +HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0); +HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16); + +static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct perf_pmu_events_attr *pmu_attr = + container_of(attr, struct perf_pmu_events_attr, attr); + + return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); +} + +#define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \ + (&((struct dev_ext_attribute[]){ \ + { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \ + NULL), \ + .var = (void *)_format } \ + })[0].attr.attr) + +#define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id) + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min)); +} +static DEVICE_ATTR_RO(bus); + +static struct hisi_pcie_reg_pair +hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off) +{ + u32 val = readl_relaxed(pcie_pmu->base + reg_off); + struct hisi_pcie_reg_pair regs = { + .lo = val, + .hi = val >> 16, + }; + + return regs; +} + +/* + * Hardware counter and ext_counter work together for bandwidth, latency, bus + * utilization and buffer occupancy events. For example, RX memory write latency + * events(index = 0x0010), counter counts total delay cycles and ext_counter + * counts RX memory write PCIe packets number. + * + * As we don't want PMU driver to process these two data, "delay cycles" can + * be treated as an independent event(index = 0x0010), "RX memory write packets + * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15 + * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL. + */ +#define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16)) + +static u32 hisi_pcie_get_real_event(struct perf_event *event) +{ + return hisi_pcie_get_event(event) & GENMASK(15, 0); +} + +static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HISI_PCIE_REG_STEP * idx; +} + +static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, + u32 idx) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + return readl_relaxed(pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + writel_relaxed(val, pcie_pmu->base + offset); +} + +static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + return readq_relaxed(pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + writeq_relaxed(val, pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_config_filter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 reg = HISI_PCIE_INIT_SET; + u64 port, trig_len, thr_len; + + /* Config HISI_PCIE_EVENT_CTRL according to event. */ + reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); + + /* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */ + port = hisi_pcie_get_port(event); + if (port) + reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port); + else + reg |= HISI_PCIE_TARGET_EN | + FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event)); + + /* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */ + trig_len = hisi_pcie_get_trig_len(event); + if (trig_len) { + reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len); + reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event)); + reg |= HISI_PCIE_TRIG_EN; + } + + /* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */ + thr_len = hisi_pcie_get_thr_len(event); + if (thr_len) { + reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len); + reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event)); + reg |= HISI_PCIE_THR_EN; + } + + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg); +} + +static void hisi_pcie_pmu_clear_filter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET); +} + +static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf) +{ + struct pci_dev *root_port, *pdev; + u16 rp_bdf; + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) + return false; + + root_port = pcie_find_root_port(pdev); + if (!root_port) { + pci_dev_put(pdev); + return false; + } + + pci_dev_put(pdev); + rp_bdf = pci_dev_id(root_port); + return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max; +} + +static bool hisi_pcie_pmu_valid_filter(struct perf_event *event, + struct hisi_pcie_pmu *pcie_pmu) +{ + u32 requester_id = hisi_pcie_get_bdf(event); + + if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL) + return false; + + if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL) + return false; + + if (requester_id) { + if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id)) + return false; + } + + return true; +} + +static bool hisi_pcie_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event); +} + +static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hisi_pcie_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HISI_PCIE_MAX_COUNTERS; +} + +static int hisi_pcie_pmu_event_init(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + event->cpu = pcie_pmu->on_cpu; + + if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event))) + hwc->event_base = HISI_PCIE_EXT_CNT; + else + hwc->event_base = HISI_PCIE_CNT; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling is not supported. */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu)) + return -EINVAL; + + if (!hisi_pcie_pmu_validate_event_group(event)) + return -EINVAL; + + return 0; +} + +static u64 hisi_pcie_pmu_read_counter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + u32 idx = event->hw.idx; + + return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx); +} + +static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + sibling = pcie_pmu->hw_events[idx]; + if (!sibling) + continue; + + if (!hisi_pcie_pmu_cmp_event(sibling, event)) + continue; + + /* Related events must be used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + else + return -EINVAL; + } + + return idx; +} + +static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu) +{ + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + if (!pcie_pmu->hw_events[idx]) + return idx; + } + + return -EINVAL; +} + +static void hisi_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hisi_pcie_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, + new_cnt) != prev_cnt); + + delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD; + local64_add(delta, &event->count); +} + +static void hisi_pcie_pmu_read(struct perf_event *event) +{ + hisi_pcie_pmu_event_update(event); +} + +static void hisi_pcie_pmu_set_period(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); +} + +static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 val; + + val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); + val |= HISI_PCIE_EVENT_EN; + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); +} + +static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 val; + + val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); + val &= ~HISI_PCIE_EVENT_EN; + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); +} + +static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0); +} + +static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1); +} + +static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx) +{ + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET); +} + +static void hisi_pcie_pmu_start(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u64 prev_cnt; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hisi_pcie_pmu_config_filter(event); + hisi_pcie_pmu_enable_counter(pcie_pmu, hwc); + hisi_pcie_pmu_enable_int(pcie_pmu, hwc); + hisi_pcie_pmu_set_period(event); + + if (flags & PERF_EF_RELOAD) { + prev_cnt = local64_read(&hwc->prev_count); + hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt); + } + + perf_event_update_userpage(event); +} + +static void hisi_pcie_pmu_stop(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_event_update(event); + hisi_pcie_pmu_disable_int(pcie_pmu, hwc); + hisi_pcie_pmu_disable_counter(pcie_pmu, hwc); + hisi_pcie_pmu_clear_filter(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + hwc->state |= PERF_HES_UPTODATE; +} + +static int hisi_pcie_pmu_add(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event); + if (idx < 0) + return idx; + + /* Current event shares an enabled counter with the related event */ + if (idx < HISI_PCIE_MAX_COUNTERS) { + hwc->idx = idx; + goto start_count; + } + + idx = hisi_pcie_pmu_get_event_idx(pcie_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + pcie_pmu->hw_events[idx] = event; + /* Reset Counter to avoid previous statistic interference. */ + hisi_pcie_pmu_reset_counter(pcie_pmu, idx); + +start_count: + if (flags & PERF_EF_START) + hisi_pcie_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hisi_pcie_pmu_del(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_stop(event, PERF_EF_UPDATE); + pcie_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hisi_pcie_pmu_enable(struct pmu *pmu) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); + int num; + + for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) { + if (pcie_pmu->hw_events[num]) + break; + } + + if (num == HISI_PCIE_MAX_COUNTERS) + return; + + writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); +} + +static void hisi_pcie_pmu_disable(struct pmu *pmu) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); + + writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); +} + +static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data) +{ + struct hisi_pcie_pmu *pcie_pmu = data; + irqreturn_t ret = IRQ_NONE; + struct perf_event *event; + u32 overflown; + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx); + if (!overflown) + continue; + + /* Clear status of interrupt. */ + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1); + event = pcie_pmu->hw_events[idx]; + if (!event) + continue; + + hisi_pcie_pmu_event_update(event); + hisi_pcie_pmu_set_period(event); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME, + pcie_pmu); + if (ret) { + pci_err(pdev, "Failed to register IRQ: %d\n", ret); + pci_free_irq_vectors(pdev); + return ret; + } + + pcie_pmu->irq = irq; + + return 0; +} + +static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + free_irq(pcie_pmu->irq, pcie_pmu); + pci_free_irq_vectors(pdev); +} + +static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); + + if (pcie_pmu->on_cpu == -1) { + pcie_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(cpu))); + } + + return 0; +} + +static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); + unsigned int target; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (pcie_pmu->on_cpu != cpu) + return 0; + + pcie_pmu->on_cpu = -1; + /* Choose a new CPU from all online cpus. */ + target = cpumask_first(cpu_online_mask); + if (target >= nr_cpu_ids) { + pci_err(pcie_pmu->pdev, "There is no CPU to set\n"); + return 0; + } + + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + /* Use this CPU for event counting */ + pcie_pmu->on_cpu = target; + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target))); + + return 0; +} + +static struct attribute *hisi_pcie_pmu_events_attr[] = { + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010), + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x1005), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x11005), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x2004), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x12004), + NULL +}; + +static struct attribute_group hisi_pcie_pmu_events_group = { + .name = "events", + .attrs = hisi_pcie_pmu_events_attr, +}; + +static struct attribute *hisi_pcie_pmu_format_attr[] = { + HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"), + HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"), + HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"), + HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"), + HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"), + HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"), + HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"), + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_format_group = { + .name = "format", + .attrs = hisi_pcie_pmu_format_attr, +}; + +static struct attribute *hisi_pcie_pmu_bus_attrs[] = { + &dev_attr_bus.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_bus_attr_group = { + .attrs = hisi_pcie_pmu_bus_attrs, +}; + +static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = { + .attrs = hisi_pcie_pmu_cpumask_attrs, +}; + +static struct attribute *hisi_pcie_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = { + .attrs = hisi_pcie_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = { + &hisi_pcie_pmu_events_group, + &hisi_pcie_pmu_format_group, + &hisi_pcie_pmu_bus_attr_group, + &hisi_pcie_pmu_cpumask_attr_group, + &hisi_pcie_pmu_identifier_attr_group, + NULL +}; + +static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + struct hisi_pcie_reg_pair regs; + u16 sicl_id, core_id; + char *name; + + regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF); + pcie_pmu->bdf_min = regs.lo; + pcie_pmu->bdf_max = regs.hi; + + regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO); + sicl_id = regs.hi; + core_id = regs.lo; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id); + if (!name) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->on_cpu = -1; + pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION); + pcie_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hisi_pcie_pmu_event_init, + .pmu_enable = hisi_pcie_pmu_enable, + .pmu_disable = hisi_pcie_pmu_disable, + .add = hisi_pcie_pmu_add, + .del = hisi_pcie_pmu_del, + .start = hisi_pcie_pmu_start, + .stop = hisi_pcie_pmu_stop, + .read = hisi_pcie_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hisi_pcie_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + int ret; + + pcie_pmu->base = pci_ioremap_bar(pdev, 2); + if (!pcie_pmu->base) { + pci_err(pdev, "Ioremap failed for pcie_pmu resource\n"); + return -ENOMEM; + } + + ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu); + if (ret) + goto err_iounmap; + + ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu); + if (ret) + goto err_iounmap; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + if (ret) { + pci_err(pdev, "Failed to register hotplug: %d\n", ret); + goto err_irq_unregister; + } + + ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret); + goto err_hotplug_unregister; + } + + return ret; + +err_hotplug_unregister: + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + +err_irq_unregister: + hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); + +err_iounmap: + iounmap(pcie_pmu->base); + + return ret; +} + +static void hisi_pcie_uninit_pmu(struct pci_dev *pdev) +{ + struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&pcie_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); + iounmap(pcie_pmu->base); +} + +static int hisi_pcie_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "Failed to enable PCI device: %d\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME); + if (ret < 0) { + pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_pcie_pmu *pcie_pmu; + int ret; + + pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + ret = hisi_pcie_init_dev(pdev); + if (ret) + return ret; + + ret = hisi_pcie_init_pmu(pdev, pcie_pmu); + if (ret) + return ret; + + pci_set_drvdata(pdev, pcie_pmu); + + return ret; +} + +static void hisi_pcie_pmu_remove(struct pci_dev *pdev) +{ + hisi_pcie_uninit_pmu(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hisi_pcie_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids); + +static struct pci_driver hisi_pcie_pmu_driver = { + .name = DRV_NAME, + .id_table = hisi_pcie_pmu_ids, + .probe = hisi_pcie_pmu_probe, + .remove = hisi_pcie_pmu_remove, +}; + +static int __init hisi_pcie_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, + "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE", + hisi_pcie_pmu_online_cpu, + hisi_pcie_pmu_offline_cpu); + if (ret) { + pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&hisi_pcie_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); + + return ret; +} +module_init(hisi_pcie_module_init); + +static void __exit hisi_pcie_module_exit(void) +{ + pci_unregister_driver(&hisi_pcie_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); +} +module_exit(hisi_pcie_module_exit); + +MODULE_DESCRIPTION("HiSilicon PCIe PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c new file mode 100644 index 000000000000..7f4d292658e3 --- /dev/null +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell CN10K LLC-TAD perf driver + * + * Copyright (C) 2021 Marvell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "tad_pmu: " fmt + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/cpuhotplug.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +#define TAD_PFC_OFFSET 0x0 +#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3)) +#define TAD_PRF_OFFSET 0x100 +#define TAD_PRF(counter) (TAD_PRF_OFFSET | (counter << 3)) +#define TAD_PRF_CNTSEL_MASK 0xFF +#define TAD_MAX_COUNTERS 8 + +#define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu)) + +struct tad_region { + void __iomem *base; +}; + +struct tad_pmu { + struct pmu pmu; + struct tad_region *regions; + u32 region_cnt; + unsigned int cpu; + struct hlist_node node; + struct perf_event *events[TAD_MAX_COUNTERS]; + DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS); +}; + +static int tad_pmu_cpuhp_state; + +static void tad_pmu_event_counter_read(struct perf_event *event) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u32 counter_idx = hwc->idx; + u64 prev, new; + int i; + + do { + prev = local64_read(&hwc->prev_count); + for (i = 0, new = 0; i < tad_pmu->region_cnt; i++) + new += readq(tad_pmu->regions[i].base + + TAD_PFC(counter_idx)); + } while (local64_cmpxchg(&hwc->prev_count, prev, new) != prev); + + local64_add(new - prev, &event->count); +} + +static void tad_pmu_event_counter_stop(struct perf_event *event, int flags) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u32 counter_idx = hwc->idx; + int i; + + /* TAD()_PFC() stop counting on the write + * which sets TAD()_PRF()[CNTSEL] == 0 + */ + for (i = 0; i < tad_pmu->region_cnt; i++) { + writeq_relaxed(0, tad_pmu->regions[i].base + + TAD_PRF(counter_idx)); + } + + tad_pmu_event_counter_read(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static void tad_pmu_event_counter_start(struct perf_event *event, int flags) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u32 event_idx = event->attr.config; + u32 counter_idx = hwc->idx; + u64 reg_val; + int i; + + hwc->state = 0; + + /* Typically TAD_PFC() are zeroed to start counting */ + for (i = 0; i < tad_pmu->region_cnt; i++) + writeq_relaxed(0, tad_pmu->regions[i].base + + TAD_PFC(counter_idx)); + + /* TAD()_PFC() start counting on the write + * which sets TAD()_PRF()[CNTSEL] != 0 + */ + for (i = 0; i < tad_pmu->region_cnt; i++) { + reg_val = readq_relaxed(tad_pmu->regions[i].base + + TAD_PRF(counter_idx)); + reg_val |= (event_idx & 0xFF); + writeq_relaxed(reg_val, tad_pmu->regions[i].base + + TAD_PRF(counter_idx)); + } +} + +static void tad_pmu_event_counter_del(struct perf_event *event, int flags) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + tad_pmu_event_counter_stop(event, flags | PERF_EF_UPDATE); + tad_pmu->events[idx] = NULL; + clear_bit(idx, tad_pmu->counters_map); +} + +static int tad_pmu_event_counter_add(struct perf_event *event, int flags) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + /* Get a free counter for this event */ + idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS); + if (idx == TAD_MAX_COUNTERS) + return -EAGAIN; + + set_bit(idx, tad_pmu->counters_map); + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED; + tad_pmu->events[idx] = event; + + if (flags & PERF_EF_START) + tad_pmu_event_counter_start(event, flags); + + return 0; +} + +static int tad_pmu_event_init(struct perf_event *event) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); + + if (!event->attr.disabled) + return -EINVAL; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (event->state != PERF_EVENT_STATE_OFF) + return -EINVAL; + + event->cpu = tad_pmu->cpu; + event->hw.idx = -1; + event->hw.config_base = event->attr.config; + + return 0; +} + +static ssize_t tad_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define TAD_PMU_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR_ID(name, tad_pmu_event_show, config) + +static struct attribute *tad_pmu_event_attrs[] = { + TAD_PMU_EVENT_ATTR(tad_none, 0x0), + TAD_PMU_EVENT_ATTR(tad_req_msh_in_any, 0x1), + TAD_PMU_EVENT_ATTR(tad_req_msh_in_mn, 0x2), + TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_any, 0x4), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_mn, 0x5), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_exlmn, 0x6), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_dss, 0x7), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_in_retry_dss, 0x8), + TAD_PMU_EVENT_ATTR(tad_dat_msh_in_any, 0x9), + TAD_PMU_EVENT_ATTR(tad_dat_msh_in_dss, 0xa), + TAD_PMU_EVENT_ATTR(tad_req_msh_out_any, 0xb), + TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_rd, 0xc), + TAD_PMU_EVENT_ATTR(tad_req_msh_out_dss_wr, 0xd), + TAD_PMU_EVENT_ATTR(tad_req_msh_out_evict, 0xe), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_any, 0xf), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_exlmn, 0x10), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_retry_mn, 0x11), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_exlmn, 0x12), + TAD_PMU_EVENT_ATTR(tad_rsp_msh_out_mn, 0x13), + TAD_PMU_EVENT_ATTR(tad_snp_msh_out_any, 0x14), + TAD_PMU_EVENT_ATTR(tad_snp_msh_out_mn, 0x15), + TAD_PMU_EVENT_ATTR(tad_snp_msh_out_exlmn, 0x16), + TAD_PMU_EVENT_ATTR(tad_dat_msh_out_any, 0x17), + TAD_PMU_EVENT_ATTR(tad_dat_msh_out_fill, 0x18), + TAD_PMU_EVENT_ATTR(tad_dat_msh_out_dss, 0x19), + TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a), + TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b), + TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c), + TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d), + TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e), + TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f), + TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20), + TAD_PMU_EVENT_ATTR(tad_dat_rd, 0x21), + TAD_PMU_EVENT_ATTR(tad_dat_rd_byp, 0x22), + TAD_PMU_EVENT_ATTR(tad_ifb_occ, 0x23), + TAD_PMU_EVENT_ATTR(tad_req_occ, 0x24), + NULL +}; + +static const struct attribute_group tad_pmu_events_attr_group = { + .name = "events", + .attrs = tad_pmu_event_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); + +static struct attribute *tad_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL +}; + +static struct attribute_group tad_pmu_format_attr_group = { + .name = "format", + .attrs = tad_pmu_format_attrs, +}; + +static ssize_t tad_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu)); +} + +static DEVICE_ATTR(cpumask, 0444, tad_pmu_cpumask_show, NULL); + +static struct attribute *tad_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group tad_pmu_cpumask_attr_group = { + .attrs = tad_pmu_cpumask_attrs, +}; + +static const struct attribute_group *tad_pmu_attr_groups[] = { + &tad_pmu_events_attr_group, + &tad_pmu_format_attr_group, + &tad_pmu_cpumask_attr_group, + NULL +}; + +static int tad_pmu_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct tad_region *regions; + struct tad_pmu *tad_pmu; + struct resource *res; + u32 tad_pmu_page_size; + u32 tad_page_size; + u32 tad_cnt; + int i, ret; + char *name; + + tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL); + if (!tad_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, tad_pmu); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Mem resource not found\n"); + return -ENODEV; + } + + ret = of_property_read_u32(node, "marvell,tad-page-size", + &tad_page_size); + if (ret) { + dev_err(&pdev->dev, "Can't find tad-page-size property\n"); + return ret; + } + + ret = of_property_read_u32(node, "marvell,tad-pmu-page-size", + &tad_pmu_page_size); + if (ret) { + dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n"); + return ret; + } + + ret = of_property_read_u32(node, "marvell,tad-cnt", &tad_cnt); + if (ret) { + dev_err(&pdev->dev, "Can't find tad-cnt property\n"); + return ret; + } + + regions = devm_kcalloc(&pdev->dev, tad_cnt, + sizeof(*regions), GFP_KERNEL); + if (!regions) + return -ENOMEM; + + /* ioremap the distributed TAD pmu regions */ + for (i = 0; i < tad_cnt && res->start < res->end; i++) { + regions[i].base = devm_ioremap(&pdev->dev, + res->start, + tad_pmu_page_size); + if (!regions[i].base) { + dev_err(&pdev->dev, "TAD%d ioremap fail\n", i); + return -ENOMEM; + } + res->start += tad_page_size; + } + + tad_pmu->regions = regions; + tad_pmu->region_cnt = tad_cnt; + + tad_pmu->pmu = (struct pmu) { + + .module = THIS_MODULE, + .attr_groups = tad_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | + PERF_PMU_CAP_NO_INTERRUPT, + .task_ctx_nr = perf_invalid_context, + + .event_init = tad_pmu_event_init, + .add = tad_pmu_event_counter_add, + .del = tad_pmu_event_counter_del, + .start = tad_pmu_event_counter_start, + .stop = tad_pmu_event_counter_stop, + .read = tad_pmu_event_counter_read, + }; + + tad_pmu->cpu = raw_smp_processor_id(); + + /* Register pmu instance for cpu hotplug */ + ret = cpuhp_state_add_instance_nocalls(tad_pmu_cpuhp_state, + &tad_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = "tad"; + ret = perf_pmu_register(&tad_pmu->pmu, name, -1); + if (ret) + cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state, + &tad_pmu->node); + + return ret; +} + +static int tad_pmu_remove(struct platform_device *pdev) +{ + struct tad_pmu *pmu = platform_get_drvdata(pdev); + + cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state, + &pmu->node); + perf_pmu_unregister(&pmu->pmu); + + return 0; +} + +static const struct of_device_id tad_pmu_of_match[] = { + { .compatible = "marvell,cn10k-tad-pmu", }, + {}, +}; + +static struct platform_driver tad_pmu_driver = { + .driver = { + .name = "cn10k_tad_pmu", + .of_match_table = of_match_ptr(tad_pmu_of_match), + .suppress_bind_attrs = true, + }, + .probe = tad_pmu_probe, + .remove = tad_pmu_remove, +}; + +static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node); + unsigned int target; + + if (cpu != pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&pmu->pmu, cpu, target); + pmu->cpu = target; + + return 0; +} + +static int __init tad_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/cn10k/tadpmu:online", + NULL, + tad_pmu_offline_cpu); + if (ret < 0) + return ret; + tad_pmu_cpuhp_state = ret; + return platform_driver_register(&tad_pmu_driver); +} + +static void __exit tad_pmu_exit(void) +{ + platform_driver_unregister(&tad_pmu_driver); + cpuhp_remove_multi_state(tad_pmu_cpuhp_state); +} + +module_init(tad_pmu_init); +module_exit(tad_pmu_exit); + +MODULE_DESCRIPTION("Marvell CN10K LLC-TAD Perf driver"); +MODULE_AUTHOR("Bhaskara Budiredla <bbudiredla@marvell.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 6a961d5f8726..0d5b61e4c21e 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -281,6 +281,23 @@ config PINCTRL_ST select PINCONF select GPIOLIB_IRQCHIP +config PINCTRL_STARFIVE + tristate "Pinctrl and GPIO driver for the StarFive JH7100 SoC" + depends on SOC_STARFIVE || COMPILE_TEST + depends on OF + default SOC_STARFIVE + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + select OF_GPIO + help + Say yes here to support pin control on the StarFive JH7100 SoC. + This also provides an interface to the GPIO pins not used by other + peripherals supporting inputs, outputs, configuring pull-up/pull-down + and interrupts on input changes. + config PINCTRL_STMFX tristate "STMicroelectronics STMFX GPIO expander pinctrl driver" depends on I2C diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 5e63de2ffcf4..f5bdd6b209a6 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o +obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c new file mode 100644 index 000000000000..0b912152a405 --- /dev/null +++ b/drivers/pinctrl/pinctrl-starfive.c @@ -0,0 +1,1354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Pinctrl / GPIO driver for StarFive JH7100 SoC + * + * Copyright (C) 2020 Shanghai StarFive Technology Co., Ltd. + * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk> + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/gpio/driver.h> +#include <linux/io.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/spinlock.h> + +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include <dt-bindings/pinctrl/pinctrl-starfive.h> + +#include "core.h" +#include "pinctrl-utils.h" +#include "pinmux.h" +#include "pinconf.h" + +#define DRIVER_NAME "pinctrl-starfive" + +/* + * Refer to Section 12. GPIO Registers in the JH7100 data sheet: + * https://github.com/starfive-tech/JH7100_Docs + */ +#define NR_GPIOS 64 + +/* + * Global enable for GPIO interrupts. If bit 0 is set to 1 the GPIO interrupts + * are enabled. If set to 0 the GPIO interrupts are disabled. + */ +#define GPIOEN 0x000 + +/* + * The following 32-bit registers come in pairs, but only the offset of the + * first register is defined. The first controls (interrupts for) GPIO 0-31 and + * the second GPIO 32-63. + */ + +/* + * Interrupt Type. If set to 1 the interrupt is edge-triggered. If set to 0 the + * interrupt is level-triggered. + */ +#define GPIOIS 0x010 + +/* + * Edge-Trigger Interrupt Type. If set to 1 the interrupt gets triggered on + * both positive and negative edges. If set to 0 the interrupt is triggered by a + * single edge. + */ +#define GPIOIBE 0x018 + +/* + * Interrupt Trigger Polarity. If set to 1 the interrupt is triggered on a + * rising edge (edge-triggered) or high level (level-triggered). If set to 0 the + * interrupt is triggered on a falling edge (edge-triggered) or low level + * (level-triggered). + */ +#define GPIOIEV 0x020 + +/* + * Interrupt Mask. If set to 1 the interrupt is enabled (unmasked). If set to 0 + * the interrupt is disabled (masked). Note that the current documentation is + * wrong and says the exct opposite of this. + */ +#define GPIOIE 0x028 + +/* + * Clear Edge-Triggered Interrupts. Write a 1 to clear the edge-triggered + * interrupt. + */ +#define GPIOIC 0x030 + +/* + * Edge-Triggered Interrupt Status. A 1 means the configured edge was detected. + */ +#define GPIORIS 0x038 + +/* + * Interrupt Status after Masking. A 1 means the configured edge or level was + * detected and not masked. + */ +#define GPIOMIS 0x040 + +/* + * Data Value. Dynamically reflects the value of the GPIO pin. If 1 the pin is + * a digital 1 and if 0 the pin is a digital 0. + */ +#define GPIODIN 0x048 + +/* + * From the data sheet section 12.2, there are 64 32-bit output data registers + * and 64 output enable registers. Output data and output enable registers for + * a given GPIO are contiguous. Eg. GPO0_DOUT_CFG is 0x50 and GPO0_DOEN_CFG is + * 0x54 while GPO1_DOUT_CFG is 0x58 and GPO1_DOEN_CFG is 0x5c. The stride + * between GPIO registers is effectively 8, thus: GPOn_DOUT_CFG is 0x50 + 8n + * and GPOn_DOEN_CFG is 0x54 + 8n. + */ +#define GPON_DOUT_CFG 0x050 +#define GPON_DOEN_CFG 0x054 + +/* + * From Section 12.3, there are 75 input signal configuration registers which + * are 4 bytes wide starting with GPI_CPU_JTAG_TCK_CFG at 0x250 and ending with + * GPI_USB_OVER_CURRENT_CFG 0x378 + */ +#define GPI_CFG_OFFSET 0x250 + +/* + * Pad Control Bits. There are 16 pad control bits for each pin located in 103 + * 32-bit registers controlling PAD_GPIO[0] to PAD_GPIO[63] followed by + * PAD_FUNC_SHARE[0] to PAD_FUNC_SHARE[141]. Odd numbered pins use the upper 16 + * bit of each register. + */ +#define PAD_SLEW_RATE_MASK GENMASK(11, 9) +#define PAD_SLEW_RATE_POS 9 +#define PAD_BIAS_STRONG_PULL_UP BIT(8) +#define PAD_INPUT_ENABLE BIT(7) +#define PAD_INPUT_SCHMITT_ENABLE BIT(6) +#define PAD_BIAS_DISABLE BIT(5) +#define PAD_BIAS_PULL_DOWN BIT(4) +#define PAD_BIAS_MASK \ + (PAD_BIAS_STRONG_PULL_UP | \ + PAD_BIAS_DISABLE | \ + PAD_BIAS_PULL_DOWN) +#define PAD_DRIVE_STRENGTH_MASK GENMASK(3, 0) +#define PAD_DRIVE_STRENGTH_POS 0 + +/* + * From Section 11, the IO_PADSHARE_SEL register can be programmed to select + * one of seven pre-defined multiplexed signal groups on PAD_FUNC_SHARE and + * PAD_GPIO pads. This is a global setting. + */ +#define IO_PADSHARE_SEL 0x1a0 + +/* + * This just needs to be some number such that when + * sfp->gpio.pin_base = PAD_INVALID_GPIO then + * starfive_pin_to_gpio(sfp, validpin) is never a valid GPIO number. + * That is it should underflow and return something >= NR_GPIOS. + */ +#define PAD_INVALID_GPIO 0x10000 + +/* + * The packed pinmux values from the device tree look like this: + * + * | 31 - 24 | 23 - 16 | 15 - 8 | 7 | 6 | 5 - 0 | + * | dout | doen | din | dout rev | doen rev | gpio nr | + * + * ..but the GPOn_DOUT_CFG and GPOn_DOEN_CFG registers look like this: + * + * | 31 | 30 - 8 | 7 - 0 | + * | dout/doen rev | unused | dout/doen | + */ +static unsigned int starfive_pinmux_to_gpio(u32 v) +{ + return v & (NR_GPIOS - 1); +} + +static u32 starfive_pinmux_to_dout(u32 v) +{ + return ((v & BIT(7)) << (31 - 7)) | ((v >> 24) & GENMASK(7, 0)); +} + +static u32 starfive_pinmux_to_doen(u32 v) +{ + return ((v & BIT(6)) << (31 - 6)) | ((v >> 16) & GENMASK(7, 0)); +} + +static u32 starfive_pinmux_to_din(u32 v) +{ + return (v >> 8) & GENMASK(7, 0); +} + +/* + * The maximum GPIO output current depends on the chosen drive strength: + * + * DS: 0 1 2 3 4 5 6 7 + * mA: 14.2 21.2 28.2 35.2 42.2 49.1 56.0 62.8 + * + * After rounding that is 7*DS + 14 mA + */ +static u32 starfive_drive_strength_to_max_mA(u16 ds) +{ + return 7 * ds + 14; +} + +static u16 starfive_drive_strength_from_max_mA(u32 i) +{ + return (clamp(i, 14U, 63U) - 14) / 7; +} + +struct starfive_pinctrl { + struct gpio_chip gc; + struct pinctrl_gpio_range gpios; + raw_spinlock_t lock; + void __iomem *base; + void __iomem *padctl; + struct pinctrl_dev *pctl; +}; + +static inline unsigned int starfive_pin_to_gpio(const struct starfive_pinctrl *sfp, + unsigned int pin) +{ + return pin - sfp->gpios.pin_base; +} + +static inline unsigned int starfive_gpio_to_pin(const struct starfive_pinctrl *sfp, + unsigned int gpio) +{ + return sfp->gpios.pin_base + gpio; +} + +static struct starfive_pinctrl *starfive_from_irq_data(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + + return container_of(gc, struct starfive_pinctrl, gc); +} + +static struct starfive_pinctrl *starfive_from_irq_desc(struct irq_desc *desc) +{ + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + + return container_of(gc, struct starfive_pinctrl, gc); +} + +static const struct pinctrl_pin_desc starfive_pins[] = { + PINCTRL_PIN(PAD_GPIO(0), "GPIO[0]"), + PINCTRL_PIN(PAD_GPIO(1), "GPIO[1]"), + PINCTRL_PIN(PAD_GPIO(2), "GPIO[2]"), + PINCTRL_PIN(PAD_GPIO(3), "GPIO[3]"), + PINCTRL_PIN(PAD_GPIO(4), "GPIO[4]"), + PINCTRL_PIN(PAD_GPIO(5), "GPIO[5]"), + PINCTRL_PIN(PAD_GPIO(6), "GPIO[6]"), + PINCTRL_PIN(PAD_GPIO(7), "GPIO[7]"), + PINCTRL_PIN(PAD_GPIO(8), "GPIO[8]"), + PINCTRL_PIN(PAD_GPIO(9), "GPIO[9]"), + PINCTRL_PIN(PAD_GPIO(10), "GPIO[10]"), + PINCTRL_PIN(PAD_GPIO(11), "GPIO[11]"), + PINCTRL_PIN(PAD_GPIO(12), "GPIO[12]"), + PINCTRL_PIN(PAD_GPIO(13), "GPIO[13]"), + PINCTRL_PIN(PAD_GPIO(14), "GPIO[14]"), + PINCTRL_PIN(PAD_GPIO(15), "GPIO[15]"), + PINCTRL_PIN(PAD_GPIO(16), "GPIO[16]"), + PINCTRL_PIN(PAD_GPIO(17), "GPIO[17]"), + PINCTRL_PIN(PAD_GPIO(18), "GPIO[18]"), + PINCTRL_PIN(PAD_GPIO(19), "GPIO[19]"), + PINCTRL_PIN(PAD_GPIO(20), "GPIO[20]"), + PINCTRL_PIN(PAD_GPIO(21), "GPIO[21]"), + PINCTRL_PIN(PAD_GPIO(22), "GPIO[22]"), + PINCTRL_PIN(PAD_GPIO(23), "GPIO[23]"), + PINCTRL_PIN(PAD_GPIO(24), "GPIO[24]"), + PINCTRL_PIN(PAD_GPIO(25), "GPIO[25]"), + PINCTRL_PIN(PAD_GPIO(26), "GPIO[26]"), + PINCTRL_PIN(PAD_GPIO(27), "GPIO[27]"), + PINCTRL_PIN(PAD_GPIO(28), "GPIO[28]"), + PINCTRL_PIN(PAD_GPIO(29), "GPIO[29]"), + PINCTRL_PIN(PAD_GPIO(30), "GPIO[30]"), + PINCTRL_PIN(PAD_GPIO(31), "GPIO[31]"), + PINCTRL_PIN(PAD_GPIO(32), "GPIO[32]"), + PINCTRL_PIN(PAD_GPIO(33), "GPIO[33]"), + PINCTRL_PIN(PAD_GPIO(34), "GPIO[34]"), + PINCTRL_PIN(PAD_GPIO(35), "GPIO[35]"), + PINCTRL_PIN(PAD_GPIO(36), "GPIO[36]"), + PINCTRL_PIN(PAD_GPIO(37), "GPIO[37]"), + PINCTRL_PIN(PAD_GPIO(38), "GPIO[38]"), + PINCTRL_PIN(PAD_GPIO(39), "GPIO[39]"), + PINCTRL_PIN(PAD_GPIO(40), "GPIO[40]"), + PINCTRL_PIN(PAD_GPIO(41), "GPIO[41]"), + PINCTRL_PIN(PAD_GPIO(42), "GPIO[42]"), + PINCTRL_PIN(PAD_GPIO(43), "GPIO[43]"), + PINCTRL_PIN(PAD_GPIO(44), "GPIO[44]"), + PINCTRL_PIN(PAD_GPIO(45), "GPIO[45]"), + PINCTRL_PIN(PAD_GPIO(46), "GPIO[46]"), + PINCTRL_PIN(PAD_GPIO(47), "GPIO[47]"), + PINCTRL_PIN(PAD_GPIO(48), "GPIO[48]"), + PINCTRL_PIN(PAD_GPIO(49), "GPIO[49]"), + PINCTRL_PIN(PAD_GPIO(50), "GPIO[50]"), + PINCTRL_PIN(PAD_GPIO(51), "GPIO[51]"), + PINCTRL_PIN(PAD_GPIO(52), "GPIO[52]"), + PINCTRL_PIN(PAD_GPIO(53), "GPIO[53]"), + PINCTRL_PIN(PAD_GPIO(54), "GPIO[54]"), + PINCTRL_PIN(PAD_GPIO(55), "GPIO[55]"), + PINCTRL_PIN(PAD_GPIO(56), "GPIO[56]"), + PINCTRL_PIN(PAD_GPIO(57), "GPIO[57]"), + PINCTRL_PIN(PAD_GPIO(58), "GPIO[58]"), + PINCTRL_PIN(PAD_GPIO(59), "GPIO[59]"), + PINCTRL_PIN(PAD_GPIO(60), "GPIO[60]"), + PINCTRL_PIN(PAD_GPIO(61), "GPIO[61]"), + PINCTRL_PIN(PAD_GPIO(62), "GPIO[62]"), + PINCTRL_PIN(PAD_GPIO(63), "GPIO[63]"), + PINCTRL_PIN(PAD_FUNC_SHARE(0), "FUNC_SHARE[0]"), + PINCTRL_PIN(PAD_FUNC_SHARE(1), "FUNC_SHARE[1]"), + PINCTRL_PIN(PAD_FUNC_SHARE(2), "FUNC_SHARE[2]"), + PINCTRL_PIN(PAD_FUNC_SHARE(3), "FUNC_SHARE[3]"), + PINCTRL_PIN(PAD_FUNC_SHARE(4), "FUNC_SHARE[4]"), + PINCTRL_PIN(PAD_FUNC_SHARE(5), "FUNC_SHARE[5]"), + PINCTRL_PIN(PAD_FUNC_SHARE(6), "FUNC_SHARE[6]"), + PINCTRL_PIN(PAD_FUNC_SHARE(7), "FUNC_SHARE[7]"), + PINCTRL_PIN(PAD_FUNC_SHARE(8), "FUNC_SHARE[8]"), + PINCTRL_PIN(PAD_FUNC_SHARE(9), "FUNC_SHARE[9]"), + PINCTRL_PIN(PAD_FUNC_SHARE(10), "FUNC_SHARE[10]"), + PINCTRL_PIN(PAD_FUNC_SHARE(11), "FUNC_SHARE[11]"), + PINCTRL_PIN(PAD_FUNC_SHARE(12), "FUNC_SHARE[12]"), + PINCTRL_PIN(PAD_FUNC_SHARE(13), "FUNC_SHARE[13]"), + PINCTRL_PIN(PAD_FUNC_SHARE(14), "FUNC_SHARE[14]"), + PINCTRL_PIN(PAD_FUNC_SHARE(15), "FUNC_SHARE[15]"), + PINCTRL_PIN(PAD_FUNC_SHARE(16), "FUNC_SHARE[16]"), + PINCTRL_PIN(PAD_FUNC_SHARE(17), "FUNC_SHARE[17]"), + PINCTRL_PIN(PAD_FUNC_SHARE(18), "FUNC_SHARE[18]"), + PINCTRL_PIN(PAD_FUNC_SHARE(19), "FUNC_SHARE[19]"), + PINCTRL_PIN(PAD_FUNC_SHARE(20), "FUNC_SHARE[20]"), + PINCTRL_PIN(PAD_FUNC_SHARE(21), "FUNC_SHARE[21]"), + PINCTRL_PIN(PAD_FUNC_SHARE(22), "FUNC_SHARE[22]"), + PINCTRL_PIN(PAD_FUNC_SHARE(23), "FUNC_SHARE[23]"), + PINCTRL_PIN(PAD_FUNC_SHARE(24), "FUNC_SHARE[24]"), + PINCTRL_PIN(PAD_FUNC_SHARE(25), "FUNC_SHARE[25]"), + PINCTRL_PIN(PAD_FUNC_SHARE(26), "FUNC_SHARE[26]"), + PINCTRL_PIN(PAD_FUNC_SHARE(27), "FUNC_SHARE[27]"), + PINCTRL_PIN(PAD_FUNC_SHARE(28), "FUNC_SHARE[28]"), + PINCTRL_PIN(PAD_FUNC_SHARE(29), "FUNC_SHARE[29]"), + PINCTRL_PIN(PAD_FUNC_SHARE(30), "FUNC_SHARE[30]"), + PINCTRL_PIN(PAD_FUNC_SHARE(31), "FUNC_SHARE[31]"), + PINCTRL_PIN(PAD_FUNC_SHARE(32), "FUNC_SHARE[32]"), + PINCTRL_PIN(PAD_FUNC_SHARE(33), "FUNC_SHARE[33]"), + PINCTRL_PIN(PAD_FUNC_SHARE(34), "FUNC_SHARE[34]"), + PINCTRL_PIN(PAD_FUNC_SHARE(35), "FUNC_SHARE[35]"), + PINCTRL_PIN(PAD_FUNC_SHARE(36), "FUNC_SHARE[36]"), + PINCTRL_PIN(PAD_FUNC_SHARE(37), "FUNC_SHARE[37]"), + PINCTRL_PIN(PAD_FUNC_SHARE(38), "FUNC_SHARE[38]"), + PINCTRL_PIN(PAD_FUNC_SHARE(39), "FUNC_SHARE[39]"), + PINCTRL_PIN(PAD_FUNC_SHARE(40), "FUNC_SHARE[40]"), + PINCTRL_PIN(PAD_FUNC_SHARE(41), "FUNC_SHARE[41]"), + PINCTRL_PIN(PAD_FUNC_SHARE(42), "FUNC_SHARE[42]"), + PINCTRL_PIN(PAD_FUNC_SHARE(43), "FUNC_SHARE[43]"), + PINCTRL_PIN(PAD_FUNC_SHARE(44), "FUNC_SHARE[44]"), + PINCTRL_PIN(PAD_FUNC_SHARE(45), "FUNC_SHARE[45]"), + PINCTRL_PIN(PAD_FUNC_SHARE(46), "FUNC_SHARE[46]"), + PINCTRL_PIN(PAD_FUNC_SHARE(47), "FUNC_SHARE[47]"), + PINCTRL_PIN(PAD_FUNC_SHARE(48), "FUNC_SHARE[48]"), + PINCTRL_PIN(PAD_FUNC_SHARE(49), "FUNC_SHARE[49]"), + PINCTRL_PIN(PAD_FUNC_SHARE(50), "FUNC_SHARE[50]"), + PINCTRL_PIN(PAD_FUNC_SHARE(51), "FUNC_SHARE[51]"), + PINCTRL_PIN(PAD_FUNC_SHARE(52), "FUNC_SHARE[52]"), + PINCTRL_PIN(PAD_FUNC_SHARE(53), "FUNC_SHARE[53]"), + PINCTRL_PIN(PAD_FUNC_SHARE(54), "FUNC_SHARE[54]"), + PINCTRL_PIN(PAD_FUNC_SHARE(55), "FUNC_SHARE[55]"), + PINCTRL_PIN(PAD_FUNC_SHARE(56), "FUNC_SHARE[56]"), + PINCTRL_PIN(PAD_FUNC_SHARE(57), "FUNC_SHARE[57]"), + PINCTRL_PIN(PAD_FUNC_SHARE(58), "FUNC_SHARE[58]"), + PINCTRL_PIN(PAD_FUNC_SHARE(59), "FUNC_SHARE[59]"), + PINCTRL_PIN(PAD_FUNC_SHARE(60), "FUNC_SHARE[60]"), + PINCTRL_PIN(PAD_FUNC_SHARE(61), "FUNC_SHARE[61]"), + PINCTRL_PIN(PAD_FUNC_SHARE(62), "FUNC_SHARE[62]"), + PINCTRL_PIN(PAD_FUNC_SHARE(63), "FUNC_SHARE[63]"), + PINCTRL_PIN(PAD_FUNC_SHARE(64), "FUNC_SHARE[64]"), + PINCTRL_PIN(PAD_FUNC_SHARE(65), "FUNC_SHARE[65]"), + PINCTRL_PIN(PAD_FUNC_SHARE(66), "FUNC_SHARE[66]"), + PINCTRL_PIN(PAD_FUNC_SHARE(67), "FUNC_SHARE[67]"), + PINCTRL_PIN(PAD_FUNC_SHARE(68), "FUNC_SHARE[68]"), + PINCTRL_PIN(PAD_FUNC_SHARE(69), "FUNC_SHARE[69]"), + PINCTRL_PIN(PAD_FUNC_SHARE(70), "FUNC_SHARE[70]"), + PINCTRL_PIN(PAD_FUNC_SHARE(71), "FUNC_SHARE[71]"), + PINCTRL_PIN(PAD_FUNC_SHARE(72), "FUNC_SHARE[72]"), + PINCTRL_PIN(PAD_FUNC_SHARE(73), "FUNC_SHARE[73]"), + PINCTRL_PIN(PAD_FUNC_SHARE(74), "FUNC_SHARE[74]"), + PINCTRL_PIN(PAD_FUNC_SHARE(75), "FUNC_SHARE[75]"), + PINCTRL_PIN(PAD_FUNC_SHARE(76), "FUNC_SHARE[76]"), + PINCTRL_PIN(PAD_FUNC_SHARE(77), "FUNC_SHARE[77]"), + PINCTRL_PIN(PAD_FUNC_SHARE(78), "FUNC_SHARE[78]"), + PINCTRL_PIN(PAD_FUNC_SHARE(79), "FUNC_SHARE[79]"), + PINCTRL_PIN(PAD_FUNC_SHARE(80), "FUNC_SHARE[80]"), + PINCTRL_PIN(PAD_FUNC_SHARE(81), "FUNC_SHARE[81]"), + PINCTRL_PIN(PAD_FUNC_SHARE(82), "FUNC_SHARE[82]"), + PINCTRL_PIN(PAD_FUNC_SHARE(83), "FUNC_SHARE[83]"), + PINCTRL_PIN(PAD_FUNC_SHARE(84), "FUNC_SHARE[84]"), + PINCTRL_PIN(PAD_FUNC_SHARE(85), "FUNC_SHARE[85]"), + PINCTRL_PIN(PAD_FUNC_SHARE(86), "FUNC_SHARE[86]"), + PINCTRL_PIN(PAD_FUNC_SHARE(87), "FUNC_SHARE[87]"), + PINCTRL_PIN(PAD_FUNC_SHARE(88), "FUNC_SHARE[88]"), + PINCTRL_PIN(PAD_FUNC_SHARE(89), "FUNC_SHARE[89]"), + PINCTRL_PIN(PAD_FUNC_SHARE(90), "FUNC_SHARE[90]"), + PINCTRL_PIN(PAD_FUNC_SHARE(91), "FUNC_SHARE[91]"), + PINCTRL_PIN(PAD_FUNC_SHARE(92), "FUNC_SHARE[92]"), + PINCTRL_PIN(PAD_FUNC_SHARE(93), "FUNC_SHARE[93]"), + PINCTRL_PIN(PAD_FUNC_SHARE(94), "FUNC_SHARE[94]"), + PINCTRL_PIN(PAD_FUNC_SHARE(95), "FUNC_SHARE[95]"), + PINCTRL_PIN(PAD_FUNC_SHARE(96), "FUNC_SHARE[96]"), + PINCTRL_PIN(PAD_FUNC_SHARE(97), "FUNC_SHARE[97]"), + PINCTRL_PIN(PAD_FUNC_SHARE(98), "FUNC_SHARE[98]"), + PINCTRL_PIN(PAD_FUNC_SHARE(99), "FUNC_SHARE[99]"), + PINCTRL_PIN(PAD_FUNC_SHARE(100), "FUNC_SHARE[100]"), + PINCTRL_PIN(PAD_FUNC_SHARE(101), "FUNC_SHARE[101]"), + PINCTRL_PIN(PAD_FUNC_SHARE(102), "FUNC_SHARE[102]"), + PINCTRL_PIN(PAD_FUNC_SHARE(103), "FUNC_SHARE[103]"), + PINCTRL_PIN(PAD_FUNC_SHARE(104), "FUNC_SHARE[104]"), + PINCTRL_PIN(PAD_FUNC_SHARE(105), "FUNC_SHARE[105]"), + PINCTRL_PIN(PAD_FUNC_SHARE(106), "FUNC_SHARE[106]"), + PINCTRL_PIN(PAD_FUNC_SHARE(107), "FUNC_SHARE[107]"), + PINCTRL_PIN(PAD_FUNC_SHARE(108), "FUNC_SHARE[108]"), + PINCTRL_PIN(PAD_FUNC_SHARE(109), "FUNC_SHARE[109]"), + PINCTRL_PIN(PAD_FUNC_SHARE(110), "FUNC_SHARE[110]"), + PINCTRL_PIN(PAD_FUNC_SHARE(111), "FUNC_SHARE[111]"), + PINCTRL_PIN(PAD_FUNC_SHARE(112), "FUNC_SHARE[112]"), + PINCTRL_PIN(PAD_FUNC_SHARE(113), "FUNC_SHARE[113]"), + PINCTRL_PIN(PAD_FUNC_SHARE(114), "FUNC_SHARE[114]"), + PINCTRL_PIN(PAD_FUNC_SHARE(115), "FUNC_SHARE[115]"), + PINCTRL_PIN(PAD_FUNC_SHARE(116), "FUNC_SHARE[116]"), + PINCTRL_PIN(PAD_FUNC_SHARE(117), "FUNC_SHARE[117]"), + PINCTRL_PIN(PAD_FUNC_SHARE(118), "FUNC_SHARE[118]"), + PINCTRL_PIN(PAD_FUNC_SHARE(119), "FUNC_SHARE[119]"), + PINCTRL_PIN(PAD_FUNC_SHARE(120), "FUNC_SHARE[120]"), + PINCTRL_PIN(PAD_FUNC_SHARE(121), "FUNC_SHARE[121]"), + PINCTRL_PIN(PAD_FUNC_SHARE(122), "FUNC_SHARE[122]"), + PINCTRL_PIN(PAD_FUNC_SHARE(123), "FUNC_SHARE[123]"), + PINCTRL_PIN(PAD_FUNC_SHARE(124), "FUNC_SHARE[124]"), + PINCTRL_PIN(PAD_FUNC_SHARE(125), "FUNC_SHARE[125]"), + PINCTRL_PIN(PAD_FUNC_SHARE(126), "FUNC_SHARE[126]"), + PINCTRL_PIN(PAD_FUNC_SHARE(127), "FUNC_SHARE[127]"), + PINCTRL_PIN(PAD_FUNC_SHARE(128), "FUNC_SHARE[128]"), + PINCTRL_PIN(PAD_FUNC_SHARE(129), "FUNC_SHARE[129]"), + PINCTRL_PIN(PAD_FUNC_SHARE(130), "FUNC_SHARE[130]"), + PINCTRL_PIN(PAD_FUNC_SHARE(131), "FUNC_SHARE[131]"), + PINCTRL_PIN(PAD_FUNC_SHARE(132), "FUNC_SHARE[132]"), + PINCTRL_PIN(PAD_FUNC_SHARE(133), "FUNC_SHARE[133]"), + PINCTRL_PIN(PAD_FUNC_SHARE(134), "FUNC_SHARE[134]"), + PINCTRL_PIN(PAD_FUNC_SHARE(135), "FUNC_SHARE[135]"), + PINCTRL_PIN(PAD_FUNC_SHARE(136), "FUNC_SHARE[136]"), + PINCTRL_PIN(PAD_FUNC_SHARE(137), "FUNC_SHARE[137]"), + PINCTRL_PIN(PAD_FUNC_SHARE(138), "FUNC_SHARE[138]"), + PINCTRL_PIN(PAD_FUNC_SHARE(139), "FUNC_SHARE[139]"), + PINCTRL_PIN(PAD_FUNC_SHARE(140), "FUNC_SHARE[140]"), + PINCTRL_PIN(PAD_FUNC_SHARE(141), "FUNC_SHARE[141]"), +}; + +#ifdef CONFIG_DEBUG_FS +static void starfive_pin_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, + unsigned int pin) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + unsigned int gpio = starfive_pin_to_gpio(sfp, pin); + void __iomem *reg; + u32 dout, doen; + + if (gpio >= NR_GPIOS) + return; + + reg = sfp->base + GPON_DOUT_CFG + 8 * gpio; + dout = readl_relaxed(reg + 0x000); + doen = readl_relaxed(reg + 0x004); + + seq_printf(s, "dout=%lu%s doen=%lu%s", + dout & GENMASK(7, 0), (dout & BIT(31)) ? "r" : "", + doen & GENMASK(7, 0), (doen & BIT(31)) ? "r" : ""); +} +#else +#define starfive_pin_dbg_show NULL +#endif + +static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **maps, + unsigned int *num_maps) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = sfp->gc.parent; + struct device_node *child; + struct pinctrl_map *map; + const char **pgnames; + const char *grpname; + u32 *pinmux; + int ngroups; + int *pins; + int nmaps; + int ret; + + nmaps = 0; + ngroups = 0; + for_each_child_of_node(np, child) { + int npinmux = of_property_count_u32_elems(child, "pinmux"); + int npins = of_property_count_u32_elems(child, "pins"); + + if (npinmux > 0 && npins > 0) { + dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: both pinmux and pins set\n", + np, child); + of_node_put(child); + return -EINVAL; + } + if (npinmux == 0 && npins == 0) { + dev_err(dev, "invalid pinctrl group %pOFn.%pOFn: neither pinmux nor pins set\n", + np, child); + of_node_put(child); + return -EINVAL; + } + + if (npinmux > 0) + nmaps += 2; + else + nmaps += 1; + ngroups += 1; + } + + pgnames = devm_kcalloc(dev, ngroups, sizeof(*pgnames), GFP_KERNEL); + if (!pgnames) + return -ENOMEM; + + map = kcalloc(nmaps, sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + nmaps = 0; + ngroups = 0; + for_each_child_of_node(np, child) { + int npins; + int i; + + grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", np, child); + if (!grpname) { + ret = -ENOMEM; + goto put_child; + } + + pgnames[ngroups++] = grpname; + + if ((npins = of_property_count_u32_elems(child, "pinmux")) > 0) { + pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) { + ret = -ENOMEM; + goto put_child; + } + + pinmux = devm_kcalloc(dev, npins, sizeof(*pinmux), GFP_KERNEL); + if (!pinmux) { + ret = -ENOMEM; + goto put_child; + } + + ret = of_property_read_u32_array(child, "pinmux", pinmux, npins); + if (ret) + goto put_child; + + for (i = 0; i < npins; i++) { + unsigned int gpio = starfive_pinmux_to_gpio(pinmux[i]); + + pins[i] = starfive_gpio_to_pin(sfp, gpio); + } + + map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP; + map[nmaps].data.mux.function = np->name; + map[nmaps].data.mux.group = grpname; + nmaps += 1; + } else if ((npins = of_property_count_u32_elems(child, "pins")) > 0) { + pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL); + if (!pins) { + ret = -ENOMEM; + goto put_child; + } + + pinmux = NULL; + + for (i = 0; i < npins; i++) { + u32 v; + + ret = of_property_read_u32_index(child, "pins", i, &v); + if (ret) + goto put_child; + pins[i] = v; + } + } else { + ret = -EINVAL; + goto put_child; + } + + ret = pinctrl_generic_add_group(pctldev, grpname, pins, npins, pinmux); + if (ret < 0) { + dev_err(dev, "error adding group %s: %d\n", grpname, ret); + goto put_child; + } + + ret = pinconf_generic_parse_dt_config(child, pctldev, + &map[nmaps].data.configs.configs, + &map[nmaps].data.configs.num_configs); + if (ret) { + dev_err(dev, "error parsing pin config of group %s: %d\n", + grpname, ret); + goto put_child; + } + + /* don't create a map if there are no pinconf settings */ + if (map[nmaps].data.configs.num_configs == 0) + continue; + + map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP; + map[nmaps].data.configs.group_or_pin = grpname; + nmaps += 1; + } + + ret = pinmux_generic_add_function(pctldev, np->name, pgnames, ngroups, NULL); + if (ret < 0) { + dev_err(dev, "error adding function %s: %d\n", np->name, ret); + goto free_map; + } + + *maps = map; + *num_maps = nmaps; + return 0; + +put_child: + of_node_put(child); +free_map: + pinctrl_utils_free_map(pctldev, map, nmaps); + return ret; +} + +static const struct pinctrl_ops starfive_pinctrl_ops = { + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, + .pin_dbg_show = starfive_pin_dbg_show, + .dt_node_to_map = starfive_dt_node_to_map, + .dt_free_map = pinctrl_utils_free_map, +}; + +static int starfive_set_mux(struct pinctrl_dev *pctldev, + unsigned int fsel, unsigned int gsel) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + struct device *dev = sfp->gc.parent; + const struct group_desc *group; + const u32 *pinmux; + unsigned int i; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + pinmux = group->data; + for (i = 0; i < group->num_pins; i++) { + u32 v = pinmux[i]; + unsigned int gpio = starfive_pinmux_to_gpio(v); + u32 dout = starfive_pinmux_to_dout(v); + u32 doen = starfive_pinmux_to_doen(v); + u32 din = starfive_pinmux_to_din(v); + void __iomem *reg_dout; + void __iomem *reg_doen; + void __iomem *reg_din; + unsigned long flags; + + dev_dbg(dev, "GPIO%u: dout=0x%x doen=0x%x din=0x%x\n", + gpio, dout, doen, din); + + reg_dout = sfp->base + GPON_DOUT_CFG + 8 * gpio; + reg_doen = sfp->base + GPON_DOEN_CFG + 8 * gpio; + if (din != GPI_NONE) + reg_din = sfp->base + GPI_CFG_OFFSET + 4 * din; + else + reg_din = NULL; + + raw_spin_lock_irqsave(&sfp->lock, flags); + writel_relaxed(dout, reg_dout); + writel_relaxed(doen, reg_doen); + if (reg_din) + writel_relaxed(gpio + 2, reg_din); + raw_spin_unlock_irqrestore(&sfp->lock, flags); + } + + return 0; +} + +static const struct pinmux_ops starfive_pinmux_ops = { + .get_functions_count = pinmux_generic_get_function_count, + .get_function_name = pinmux_generic_get_function_name, + .get_function_groups = pinmux_generic_get_function_groups, + .set_mux = starfive_set_mux, + .strict = true, +}; + +static u16 starfive_padctl_get(struct starfive_pinctrl *sfp, + unsigned int pin) +{ + void __iomem *reg = sfp->padctl + 4 * (pin / 2); + int shift = 16 * (pin % 2); + + return readl_relaxed(reg) >> shift; +} + +static void starfive_padctl_rmw(struct starfive_pinctrl *sfp, + unsigned int pin, + u16 _mask, u16 _value) +{ + void __iomem *reg = sfp->padctl + 4 * (pin / 2); + int shift = 16 * (pin % 2); + u32 mask = (u32)_mask << shift; + u32 value = (u32)_value << shift; + unsigned long flags; + + dev_dbg(sfp->gc.parent, "padctl_rmw(%u, 0x%03x, 0x%03x)\n", pin, _mask, _value); + + raw_spin_lock_irqsave(&sfp->lock, flags); + value |= readl_relaxed(reg) & ~mask; + writel_relaxed(value, reg); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +#define PIN_CONFIG_STARFIVE_STRONG_PULL_UP (PIN_CONFIG_END + 1) + +static const struct pinconf_generic_params starfive_pinconf_custom_params[] = { + { "starfive,strong-pull-up", PIN_CONFIG_STARFIVE_STRONG_PULL_UP, 1 }, +}; + +#ifdef CONFIG_DEBUG_FS +static const struct pin_config_item starfive_pinconf_custom_conf_items[] = { + PCONFDUMP(PIN_CONFIG_STARFIVE_STRONG_PULL_UP, "input bias strong pull-up", NULL, false), +}; + +static_assert(ARRAY_SIZE(starfive_pinconf_custom_conf_items) == + ARRAY_SIZE(starfive_pinconf_custom_params)); +#else +#define starfive_pinconf_custom_conf_items NULL +#endif + +static int starfive_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + int param = pinconf_to_config_param(*config); + u16 value = starfive_padctl_get(sfp, pin); + bool enabled; + u32 arg; + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + enabled = value & PAD_BIAS_DISABLE; + arg = 0; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + enabled = value & PAD_BIAS_PULL_DOWN; + arg = 1; + break; + case PIN_CONFIG_BIAS_PULL_UP: + enabled = !(value & PAD_BIAS_MASK); + arg = 1; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + enabled = value & PAD_DRIVE_STRENGTH_MASK; + arg = starfive_drive_strength_to_max_mA(value & PAD_DRIVE_STRENGTH_MASK); + break; + case PIN_CONFIG_INPUT_ENABLE: + enabled = value & PAD_INPUT_ENABLE; + arg = enabled; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + enabled = value & PAD_INPUT_SCHMITT_ENABLE; + arg = enabled; + break; + case PIN_CONFIG_SLEW_RATE: + enabled = value & PAD_SLEW_RATE_MASK; + arg = (value & PAD_SLEW_RATE_MASK) >> PAD_SLEW_RATE_POS; + break; + case PIN_CONFIG_STARFIVE_STRONG_PULL_UP: + enabled = value & PAD_BIAS_STRONG_PULL_UP; + arg = enabled; + break; + default: + return -ENOTSUPP; + } + + *config = pinconf_to_config_packed(param, arg); + return enabled ? 0 : -EINVAL; +} + +static int starfive_pinconf_group_get(struct pinctrl_dev *pctldev, + unsigned int gsel, unsigned long *config) +{ + const struct group_desc *group; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + return starfive_pinconf_get(pctldev, group->pins[0], config); +} + +static int starfive_pinconf_group_set(struct pinctrl_dev *pctldev, + unsigned int gsel, + unsigned long *configs, + unsigned int num_configs) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + const struct group_desc *group; + u16 mask, value; + int i; + + group = pinctrl_generic_get_group(pctldev, gsel); + if (!group) + return -EINVAL; + + mask = 0; + value = 0; + for (i = 0; i < num_configs; i++) { + int param = pinconf_to_config_param(configs[i]); + u32 arg = pinconf_to_config_argument(configs[i]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + mask |= PAD_BIAS_MASK; + value = (value & ~PAD_BIAS_MASK) | PAD_BIAS_DISABLE; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + if (arg == 0) + return -ENOTSUPP; + mask |= PAD_BIAS_MASK; + value = (value & ~PAD_BIAS_MASK) | PAD_BIAS_PULL_DOWN; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (arg == 0) + return -ENOTSUPP; + mask |= PAD_BIAS_MASK; + value = value & ~PAD_BIAS_MASK; + break; + case PIN_CONFIG_DRIVE_STRENGTH: + mask |= PAD_DRIVE_STRENGTH_MASK; + value = (value & ~PAD_DRIVE_STRENGTH_MASK) | + starfive_drive_strength_from_max_mA(arg); + break; + case PIN_CONFIG_INPUT_ENABLE: + mask |= PAD_INPUT_ENABLE; + if (arg) + value |= PAD_INPUT_ENABLE; + else + value &= ~PAD_INPUT_ENABLE; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + mask |= PAD_INPUT_SCHMITT_ENABLE; + if (arg) + value |= PAD_INPUT_SCHMITT_ENABLE; + else + value &= ~PAD_INPUT_SCHMITT_ENABLE; + break; + case PIN_CONFIG_SLEW_RATE: + mask |= PAD_SLEW_RATE_MASK; + value = (value & ~PAD_SLEW_RATE_MASK) | + ((arg << PAD_SLEW_RATE_POS) & PAD_SLEW_RATE_MASK); + break; + case PIN_CONFIG_STARFIVE_STRONG_PULL_UP: + if (arg) { + mask |= PAD_BIAS_MASK; + value = (value & ~PAD_BIAS_MASK) | + PAD_BIAS_STRONG_PULL_UP; + } else { + mask |= PAD_BIAS_STRONG_PULL_UP; + value = value & ~PAD_BIAS_STRONG_PULL_UP; + } + break; + default: + return -ENOTSUPP; + } + } + + for (i = 0; i < group->num_pins; i++) + starfive_padctl_rmw(sfp, group->pins[i], mask, value); + + return 0; +} + +#ifdef CONFIG_DEBUG_FS +static void starfive_pinconf_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned int pin) +{ + struct starfive_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev); + u16 value = starfive_padctl_get(sfp, pin); + + seq_printf(s, " (0x%03x)", value); +} +#else +#define starfive_pinconf_dbg_show NULL +#endif + +static const struct pinconf_ops starfive_pinconf_ops = { + .pin_config_get = starfive_pinconf_get, + .pin_config_group_get = starfive_pinconf_group_get, + .pin_config_group_set = starfive_pinconf_group_set, + .pin_config_dbg_show = starfive_pinconf_dbg_show, + .is_generic = true, +}; + +static struct pinctrl_desc starfive_desc = { + .name = DRIVER_NAME, + .pins = starfive_pins, + .npins = ARRAY_SIZE(starfive_pins), + .pctlops = &starfive_pinctrl_ops, + .pmxops = &starfive_pinmux_ops, + .confops = &starfive_pinconf_ops, + .owner = THIS_MODULE, + .num_custom_params = ARRAY_SIZE(starfive_pinconf_custom_params), + .custom_params = starfive_pinconf_custom_params, + .custom_conf_items = starfive_pinconf_custom_conf_items, +}; + +static int starfive_gpio_request(struct gpio_chip *gc, unsigned int gpio) +{ + return pinctrl_gpio_request(gc->base + gpio); +} + +static void starfive_gpio_free(struct gpio_chip *gc, unsigned int gpio) +{ + pinctrl_gpio_free(gc->base + gpio); +} + +static int starfive_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio; + + if (readl_relaxed(doen) == GPO_ENABLE) + return GPIO_LINE_DIRECTION_OUT; + + return GPIO_LINE_DIRECTION_IN; +} + +static int starfive_gpio_direction_input(struct gpio_chip *gc, + unsigned int gpio) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio; + unsigned long flags; + + /* enable input and schmitt trigger */ + starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio), + PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE, + PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE); + + raw_spin_lock_irqsave(&sfp->lock, flags); + writel_relaxed(GPO_DISABLE, doen); + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +static int starfive_gpio_direction_output(struct gpio_chip *gc, + unsigned int gpio, int value) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + void __iomem *dout = sfp->base + GPON_DOUT_CFG + 8 * gpio; + void __iomem *doen = sfp->base + GPON_DOEN_CFG + 8 * gpio; + unsigned long flags; + + raw_spin_lock_irqsave(&sfp->lock, flags); + writel_relaxed(value, dout); + writel_relaxed(GPO_ENABLE, doen); + raw_spin_unlock_irqrestore(&sfp->lock, flags); + + /* disable input, schmitt trigger and bias */ + starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio), + PAD_BIAS_MASK | PAD_INPUT_ENABLE | PAD_INPUT_SCHMITT_ENABLE, + PAD_BIAS_DISABLE); + + return 0; +} + +static int starfive_gpio_get(struct gpio_chip *gc, unsigned int gpio) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + void __iomem *din = sfp->base + GPIODIN + 4 * (gpio / 32); + + return !!(readl_relaxed(din) & BIT(gpio % 32)); +} + +static void starfive_gpio_set(struct gpio_chip *gc, unsigned int gpio, + int value) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + void __iomem *dout = sfp->base + GPON_DOUT_CFG + 8 * gpio; + unsigned long flags; + + raw_spin_lock_irqsave(&sfp->lock, flags); + writel_relaxed(value, dout); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +static int starfive_gpio_set_config(struct gpio_chip *gc, unsigned int gpio, + unsigned long config) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + u32 arg = pinconf_to_config_argument(config); + u16 value; + u16 mask; + + switch (pinconf_to_config_param(config)) { + case PIN_CONFIG_BIAS_DISABLE: + mask = PAD_BIAS_MASK; + value = PAD_BIAS_DISABLE; + break; + case PIN_CONFIG_BIAS_PULL_DOWN: + if (arg == 0) + return -ENOTSUPP; + mask = PAD_BIAS_MASK; + value = PAD_BIAS_PULL_DOWN; + break; + case PIN_CONFIG_BIAS_PULL_UP: + if (arg == 0) + return -ENOTSUPP; + mask = PAD_BIAS_MASK; + value = 0; + break; + case PIN_CONFIG_DRIVE_PUSH_PULL: + return 0; + case PIN_CONFIG_INPUT_ENABLE: + mask = PAD_INPUT_ENABLE; + value = arg ? PAD_INPUT_ENABLE : 0; + break; + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + mask = PAD_INPUT_SCHMITT_ENABLE; + value = arg ? PAD_INPUT_SCHMITT_ENABLE : 0; + break; + default: + return -ENOTSUPP; + }; + + starfive_padctl_rmw(sfp, starfive_gpio_to_pin(sfp, gpio), mask, value); + return 0; +} + +static int starfive_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + + sfp->gpios.name = sfp->gc.label; + sfp->gpios.base = sfp->gc.base; + /* + * sfp->gpios.pin_base depends on the chosen signal group + * and is set in starfive_probe() + */ + sfp->gpios.npins = NR_GPIOS; + sfp->gpios.gc = &sfp->gc; + pinctrl_add_gpio_range(sfp->pctl, &sfp->gpios); + return 0; +} + +static void starfive_irq_ack(struct irq_data *d) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_data(d); + irq_hw_number_t gpio = irqd_to_hwirq(d); + void __iomem *ic = sfp->base + GPIOIC + 4 * (gpio / 32); + u32 mask = BIT(gpio % 32); + unsigned long flags; + + raw_spin_lock_irqsave(&sfp->lock, flags); + writel_relaxed(mask, ic); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +static void starfive_irq_mask(struct irq_data *d) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_data(d); + irq_hw_number_t gpio = irqd_to_hwirq(d); + void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32); + u32 mask = BIT(gpio % 32); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&sfp->lock, flags); + value = readl_relaxed(ie) & ~mask; + writel_relaxed(value, ie); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +static void starfive_irq_mask_ack(struct irq_data *d) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_data(d); + irq_hw_number_t gpio = irqd_to_hwirq(d); + void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32); + void __iomem *ic = sfp->base + GPIOIC + 4 * (gpio / 32); + u32 mask = BIT(gpio % 32); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&sfp->lock, flags); + value = readl_relaxed(ie) & ~mask; + writel_relaxed(value, ie); + writel_relaxed(mask, ic); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +static void starfive_irq_unmask(struct irq_data *d) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_data(d); + irq_hw_number_t gpio = irqd_to_hwirq(d); + void __iomem *ie = sfp->base + GPIOIE + 4 * (gpio / 32); + u32 mask = BIT(gpio % 32); + unsigned long flags; + u32 value; + + raw_spin_lock_irqsave(&sfp->lock, flags); + value = readl_relaxed(ie) | mask; + writel_relaxed(value, ie); + raw_spin_unlock_irqrestore(&sfp->lock, flags); +} + +static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_data(d); + irq_hw_number_t gpio = irqd_to_hwirq(d); + void __iomem *base = sfp->base + 4 * (gpio / 32); + u32 mask = BIT(gpio % 32); + u32 irq_type, edge_both, polarity; + unsigned long flags; + + switch (trigger) { + case IRQ_TYPE_EDGE_RISING: + irq_type = mask; /* 1: edge triggered */ + edge_both = 0; /* 0: single edge */ + polarity = mask; /* 1: rising edge */ + break; + case IRQ_TYPE_EDGE_FALLING: + irq_type = mask; /* 1: edge triggered */ + edge_both = 0; /* 0: single edge */ + polarity = 0; /* 0: falling edge */ + break; + case IRQ_TYPE_EDGE_BOTH: + irq_type = mask; /* 1: edge triggered */ + edge_both = mask; /* 1: both edges */ + polarity = 0; /* 0: ignored */ + break; + case IRQ_TYPE_LEVEL_HIGH: + irq_type = 0; /* 0: level triggered */ + edge_both = 0; /* 0: ignored */ + polarity = mask; /* 1: high level */ + break; + case IRQ_TYPE_LEVEL_LOW: + irq_type = 0; /* 0: level triggered */ + edge_both = 0; /* 0: ignored */ + polarity = 0; /* 0: low level */ + break; + default: + return -EINVAL; + } + + if (trigger & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else + irq_set_handler_locked(d, handle_level_irq); + + raw_spin_lock_irqsave(&sfp->lock, flags); + irq_type |= readl_relaxed(base + GPIOIS) & ~mask; + writel_relaxed(irq_type, base + GPIOIS); + edge_both |= readl_relaxed(base + GPIOIBE) & ~mask; + writel_relaxed(edge_both, base + GPIOIBE); + polarity |= readl_relaxed(base + GPIOIEV) & ~mask; + writel_relaxed(polarity, base + GPIOIEV); + raw_spin_unlock_irqrestore(&sfp->lock, flags); + return 0; +} + +static struct irq_chip starfive_irq_chip = { + .irq_ack = starfive_irq_ack, + .irq_mask = starfive_irq_mask, + .irq_mask_ack = starfive_irq_mask_ack, + .irq_unmask = starfive_irq_unmask, + .irq_set_type = starfive_irq_set_type, + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static void starfive_gpio_irq_handler(struct irq_desc *desc) +{ + struct starfive_pinctrl *sfp = starfive_from_irq_desc(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long mis; + unsigned int pin; + + chained_irq_enter(chip, desc); + + mis = readl_relaxed(sfp->base + GPIOMIS + 0); + for_each_set_bit(pin, &mis, 32) + generic_handle_domain_irq(sfp->gc.irq.domain, pin); + + mis = readl_relaxed(sfp->base + GPIOMIS + 4); + for_each_set_bit(pin, &mis, 32) + generic_handle_domain_irq(sfp->gc.irq.domain, pin + 32); + + chained_irq_exit(chip, desc); +} + +static int starfive_gpio_init_hw(struct gpio_chip *gc) +{ + struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc); + + /* mask all GPIO interrupts */ + writel(0, sfp->base + GPIOIE + 0); + writel(0, sfp->base + GPIOIE + 4); + /* clear edge interrupt flags */ + writel(~0U, sfp->base + GPIOIC + 0); + writel(~0U, sfp->base + GPIOIC + 4); + /* enable GPIO interrupts */ + writel(1, sfp->base + GPIOEN); + return 0; +} + +static void starfive_disable_clock(void *data) +{ + clk_disable_unprepare(data); +} + +static int starfive_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct starfive_pinctrl *sfp; + struct reset_control *rst; + struct clk *clk; + u32 value; + int ret; + + sfp = devm_kzalloc(dev, sizeof(*sfp), GFP_KERNEL); + if (!sfp) + return -ENOMEM; + + sfp->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); + if (IS_ERR(sfp->base)) + return PTR_ERR(sfp->base); + + sfp->padctl = devm_platform_ioremap_resource_byname(pdev, "padctl"); + if (IS_ERR(sfp->padctl)) + return PTR_ERR(sfp->padctl); + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "could not get clock\n"); + + rst = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "could not get reset\n"); + + ret = clk_prepare_enable(clk); + if (ret) + return dev_err_probe(dev, ret, "could not enable clock\n"); + + ret = devm_add_action_or_reset(dev, starfive_disable_clock, clk); + if (ret) + return ret; + + /* + * We don't want to assert reset and risk undoing pin muxing for the + * early boot serial console, but let's make sure the reset line is + * deasserted in case someone runs a really minimal bootloader. + */ + ret = reset_control_deassert(rst); + if (ret) + return dev_err_probe(dev, ret, "could not deassert reset\n"); + + platform_set_drvdata(pdev, sfp); + sfp->gc.parent = dev; + raw_spin_lock_init(&sfp->lock); + + ret = devm_pinctrl_register_and_init(dev, &starfive_desc, sfp, &sfp->pctl); + if (ret) + return dev_err_probe(dev, ret, "could not register pinctrl driver\n"); + + if (!of_property_read_u32(dev->of_node, "starfive,signal-group", &value)) { + if (value > 6) + return dev_err_probe(dev, -EINVAL, "invalid signal group %u\n", value); + writel(value, sfp->padctl + IO_PADSHARE_SEL); + } + + value = readl(sfp->padctl + IO_PADSHARE_SEL); + switch (value) { + case 0: + sfp->gpios.pin_base = PAD_INVALID_GPIO; + goto out_pinctrl_enable; + case 1: + sfp->gpios.pin_base = PAD_GPIO(0); + break; + case 2: + sfp->gpios.pin_base = PAD_FUNC_SHARE(72); + break; + case 3: + sfp->gpios.pin_base = PAD_FUNC_SHARE(70); + break; + case 4: case 5: case 6: + sfp->gpios.pin_base = PAD_FUNC_SHARE(0); + break; + default: + return dev_err_probe(dev, -EINVAL, "invalid signal group %u\n", value); + } + + sfp->gc.label = dev_name(dev); + sfp->gc.owner = THIS_MODULE; + sfp->gc.request = starfive_gpio_request; + sfp->gc.free = starfive_gpio_free; + sfp->gc.get_direction = starfive_gpio_get_direction; + sfp->gc.direction_input = starfive_gpio_direction_input; + sfp->gc.direction_output = starfive_gpio_direction_output; + sfp->gc.get = starfive_gpio_get; + sfp->gc.set = starfive_gpio_set; + sfp->gc.set_config = starfive_gpio_set_config; + sfp->gc.add_pin_ranges = starfive_gpio_add_pin_ranges; + sfp->gc.base = -1; + sfp->gc.ngpio = NR_GPIOS; + + starfive_irq_chip.parent_device = dev; + starfive_irq_chip.name = sfp->gc.label; + + sfp->gc.irq.chip = &starfive_irq_chip; + sfp->gc.irq.parent_handler = starfive_gpio_irq_handler; + sfp->gc.irq.num_parents = 1; + sfp->gc.irq.parents = devm_kcalloc(dev, sfp->gc.irq.num_parents, + sizeof(*sfp->gc.irq.parents), GFP_KERNEL); + if (!sfp->gc.irq.parents) + return -ENOMEM; + sfp->gc.irq.default_type = IRQ_TYPE_NONE; + sfp->gc.irq.handler = handle_bad_irq; + sfp->gc.irq.init_hw = starfive_gpio_init_hw; + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + sfp->gc.irq.parents[0] = ret; + + ret = devm_gpiochip_add_data(dev, &sfp->gc, sfp); + if (ret) + return dev_err_probe(dev, ret, "could not register gpiochip\n"); + +out_pinctrl_enable: + return pinctrl_enable(sfp->pctl); +} + +static const struct of_device_id starfive_of_match[] = { + { .compatible = "starfive,jh7100-pinctrl" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, starfive_of_match); + +static struct platform_driver starfive_pinctrl_driver = { + .probe = starfive_probe, + .driver = { + .name = DRIVER_NAME, + .of_match_table = starfive_of_match, + }, +}; +module_platform_driver(starfive_pinctrl_driver); + +MODULE_DESCRIPTION("Pinctrl driver for StarFive SoCs"); +MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/reset/ltc2952-poweroff.c b/drivers/power/reset/ltc2952-poweroff.c index fbb344353fe4..65d9528cc989 100644 --- a/drivers/power/reset/ltc2952-poweroff.c +++ b/drivers/power/reset/ltc2952-poweroff.c @@ -159,8 +159,8 @@ static void ltc2952_poweroff_kill(void) static void ltc2952_poweroff_default(struct ltc2952_poweroff *data) { - data->wde_interval = 300L * 1E6L; - data->trigger_delay = ktime_set(2, 500L*1E6L); + data->wde_interval = 300L * NSEC_PER_MSEC; + data->trigger_delay = ktime_set(2, 500L * NSEC_PER_MSEC); hrtimer_init(&data->timer_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_REL); data->timer_trigger.function = ltc2952_poweroff_timer_trigger; diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 34ec186a2e9a..b7eac5428083 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -581,12 +581,12 @@ static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq) if (!new_state.online && bq->state.online) { /* power removed */ /* disable ADC */ - ret = bq25890_field_write(bq, F_CONV_START, 0); + ret = bq25890_field_write(bq, F_CONV_RATE, 0); if (ret < 0) goto error; } else if (new_state.online && !bq->state.online) { /* power inserted */ /* enable ADC, to have control of charge current/voltage */ - ret = bq25890_field_write(bq, F_CONV_START, 1); + ret = bq25890_field_write(bq, F_CONV_RATE, 1); if (ret < 0) goto error; } diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index fc12a4f407f4..6093754cebd5 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -853,6 +853,10 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, return NULL; for (i = 0; i < POWER_SUPPLY_OCV_TEMP_MAX; i++) { + /* Out of capacity tables */ + if (!info->ocv_table[i]) + break; + temp_diff = abs(info->ocv_temp[i] - temp); if (temp_diff < best_temp_diff) { diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 11a10b575ace..18cf974ac776 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -42,12 +42,16 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/pwm.h> #include <linux/platform_device.h> #include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/reset.h> +#include <soc/tegra/common.h> + #define PWM_ENABLE (1 << 31) #define PWM_DUTY_WIDTH 8 #define PWM_DUTY_SHIFT 16 @@ -145,7 +149,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, required_clk_rate = (NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH; - err = clk_set_rate(pc->clk, required_clk_rate); + err = dev_pm_opp_set_rate(pc->dev, required_clk_rate); if (err < 0) return -EINVAL; @@ -181,8 +185,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * before writing the register. Otherwise, keep it enabled. */ if (!pwm_is_enabled(pwm)) { - err = clk_prepare_enable(pc->clk); - if (err < 0) + err = pm_runtime_resume_and_get(pc->dev); + if (err) return err; } else val |= PWM_ENABLE; @@ -193,7 +197,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * If the PWM is not enabled, turn the clock off again to save power. */ if (!pwm_is_enabled(pwm)) - clk_disable_unprepare(pc->clk); + pm_runtime_put(pc->dev); return 0; } @@ -204,8 +208,8 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) int rc = 0; u32 val; - rc = clk_prepare_enable(pc->clk); - if (rc < 0) + rc = pm_runtime_resume_and_get(pc->dev); + if (rc) return rc; val = pwm_readl(pc, pwm->hwpwm); @@ -224,7 +228,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) val &= ~PWM_ENABLE; pwm_writel(pc, pwm->hwpwm, val); - clk_disable_unprepare(pc->clk); + pm_runtime_put_sync(pc->dev); } static const struct pwm_ops tegra_pwm_ops = { @@ -256,11 +260,20 @@ static int tegra_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm->clk)) return PTR_ERR(pwm->clk); + ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (ret) + return ret; + + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret) + return ret; + /* Set maximum frequency of the IP */ - ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency); + ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency); if (ret < 0) { dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret); - return ret; + goto put_pm; } /* @@ -278,7 +291,7 @@ static int tegra_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm->rst)) { ret = PTR_ERR(pwm->rst); dev_err(&pdev->dev, "Reset control is not found: %d\n", ret); - return ret; + goto put_pm; } reset_control_deassert(pwm->rst); @@ -291,10 +304,16 @@ static int tegra_pwm_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); reset_control_assert(pwm->rst); - return ret; + goto put_pm; } + pm_runtime_put(&pdev->dev); + return 0; +put_pm: + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); + return ret; } static int tegra_pwm_remove(struct platform_device *pdev) @@ -305,20 +324,44 @@ static int tegra_pwm_remove(struct platform_device *pdev) reset_control_assert(pc->rst); + pm_runtime_force_suspend(&pdev->dev); + return 0; } -#ifdef CONFIG_PM_SLEEP -static int tegra_pwm_suspend(struct device *dev) +static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev) { - return pinctrl_pm_select_sleep_state(dev); + struct tegra_pwm_chip *pc = dev_get_drvdata(dev); + int err; + + clk_disable_unprepare(pc->clk); + + err = pinctrl_pm_select_sleep_state(dev); + if (err) { + clk_prepare_enable(pc->clk); + return err; + } + + return 0; } -static int tegra_pwm_resume(struct device *dev) +static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev) { - return pinctrl_pm_select_default_state(dev); + struct tegra_pwm_chip *pc = dev_get_drvdata(dev); + int err; + + err = pinctrl_pm_select_default_state(dev); + if (err) + return err; + + err = clk_prepare_enable(pc->clk); + if (err) { + pinctrl_pm_select_sleep_state(dev); + return err; + } + + return 0; } -#endif static const struct tegra_pwm_soc tegra20_pwm_soc = { .num_channels = 4, @@ -344,7 +387,10 @@ static const struct of_device_id tegra_pwm_of_match[] = { MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); static const struct dev_pm_ops tegra_pwm_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume) + SET_RUNTIME_PM_OPS(tegra_pwm_runtime_suspend, tegra_pwm_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) }; static struct platform_driver tegra_pwm_driver = { diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index d7894f178bd4..42f2fc0bc8a9 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -38,7 +38,7 @@ * elements entered into the array, during which, we're decaying all elements. * If, after decay, an element gets inserted again, its generation is set to 11b * to make sure it has higher numerical count than other, older elements and - * thus emulate an an LRU-like behavior when deleting elements to free up space + * thus emulate an LRU-like behavior when deleting elements to free up space * in the page. * * When an element reaches it's max count of action_threshold, we try to poison diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 85024eb1d2ea..6f8ba0ddc05f 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -224,6 +224,13 @@ config RESET_SOCFPGA This enables the reset driver for the SoCFPGA ARMv7 platforms. This driver gets initialized early during platform init calls. +config RESET_STARFIVE_JH7100 + bool "StarFive JH7100 Reset Driver" + depends on SOC_STARFIVE || COMPILE_TEST + default SOC_STARFIVE + help + This enables the reset controller driver for the StarFive JH7100 SoC. + config RESET_SUNXI bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI default ARCH_SUNXI diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 21d46d8869ff..bd0a97be18b5 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o +obj-$(CONFIG_RESET_STARFIVE_JH7100) += reset-starfive-jh7100.o obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c index e0704fd2b533..1e8315038850 100644 --- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -137,7 +137,12 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) dev_set_drvdata(dev, priv); pm_runtime_enable(&pdev->dev); - pm_runtime_resume_and_get(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) { + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed"); + } /* put pll and phy into reset state */ spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/reset/reset-starfive-jh7100.c b/drivers/reset/reset-starfive-jh7100.c new file mode 100644 index 000000000000..fc44b2fb3e03 --- /dev/null +++ b/drivers/reset/reset-starfive-jh7100.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Reset driver for the StarFive JH7100 SoC + * + * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk> + */ + +#include <linux/bitmap.h> +#include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/iopoll.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/spinlock.h> + +#include <dt-bindings/reset/starfive-jh7100.h> + +/* register offsets */ +#define JH7100_RESET_ASSERT0 0x00 +#define JH7100_RESET_ASSERT1 0x04 +#define JH7100_RESET_ASSERT2 0x08 +#define JH7100_RESET_ASSERT3 0x0c +#define JH7100_RESET_STATUS0 0x10 +#define JH7100_RESET_STATUS1 0x14 +#define JH7100_RESET_STATUS2 0x18 +#define JH7100_RESET_STATUS3 0x1c + +/* + * Writing a 1 to the n'th bit of the m'th ASSERT register asserts + * line 32m + n, and writing a 0 deasserts the same line. + * Most reset lines have their status inverted so a 0 bit in the STATUS + * register means the line is asserted and a 1 means it's deasserted. A few + * lines don't though, so store the expected value of the status registers when + * all lines are asserted. + */ +static const u64 jh7100_reset_asserted[2] = { + /* STATUS0 */ + BIT_ULL_MASK(JH7100_RST_U74) | + BIT_ULL_MASK(JH7100_RST_VP6_DRESET) | + BIT_ULL_MASK(JH7100_RST_VP6_BRESET) | + /* STATUS1 */ + BIT_ULL_MASK(JH7100_RST_HIFI4_DRESET) | + BIT_ULL_MASK(JH7100_RST_HIFI4_BRESET), + /* STATUS2 */ + BIT_ULL_MASK(JH7100_RST_E24) | + /* STATUS3 */ + 0, +}; + +struct jh7100_reset { + struct reset_controller_dev rcdev; + /* protect registers against concurrent read-modify-write */ + spinlock_t lock; + void __iomem *base; +}; + +static inline struct jh7100_reset * +jh7100_reset_from(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct jh7100_reset, rcdev); +} + +static int jh7100_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct jh7100_reset *data = jh7100_reset_from(rcdev); + unsigned long offset = BIT_ULL_WORD(id); + u64 mask = BIT_ULL_MASK(id); + void __iomem *reg_assert = data->base + JH7100_RESET_ASSERT0 + offset * sizeof(u64); + void __iomem *reg_status = data->base + JH7100_RESET_STATUS0 + offset * sizeof(u64); + u64 done = jh7100_reset_asserted[offset] & mask; + u64 value; + unsigned long flags; + int ret; + + if (!assert) + done ^= mask; + + spin_lock_irqsave(&data->lock, flags); + + value = readq(reg_assert); + if (assert) + value |= mask; + else + value &= ~mask; + writeq(value, reg_assert); + + /* if the associated clock is gated, deasserting might otherwise hang forever */ + ret = readq_poll_timeout_atomic(reg_status, value, (value & mask) == done, 0, 1000); + + spin_unlock_irqrestore(&data->lock, flags); + return ret; +} + +static int jh7100_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return jh7100_reset_update(rcdev, id, true); +} + +static int jh7100_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return jh7100_reset_update(rcdev, id, false); +} + +static int jh7100_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = jh7100_reset_assert(rcdev, id); + if (ret) + return ret; + + return jh7100_reset_deassert(rcdev, id); +} + +static int jh7100_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct jh7100_reset *data = jh7100_reset_from(rcdev); + unsigned long offset = BIT_ULL_WORD(id); + u64 mask = BIT_ULL_MASK(id); + void __iomem *reg_status = data->base + JH7100_RESET_STATUS0 + offset * sizeof(u64); + u64 value = readq(reg_status); + + return !((value ^ jh7100_reset_asserted[offset]) & mask); +} + +static const struct reset_control_ops jh7100_reset_ops = { + .assert = jh7100_reset_assert, + .deassert = jh7100_reset_deassert, + .reset = jh7100_reset_reset, + .status = jh7100_reset_status, +}; + +static int __init jh7100_reset_probe(struct platform_device *pdev) +{ + struct jh7100_reset *data; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + + data->rcdev.ops = &jh7100_reset_ops; + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = JH7100_RSTN_END; + data->rcdev.dev = &pdev->dev; + data->rcdev.of_node = pdev->dev.of_node; + spin_lock_init(&data->lock); + + return devm_reset_controller_register(&pdev->dev, &data->rcdev); +} + +static const struct of_device_id jh7100_reset_dt_ids[] = { + { .compatible = "starfive,jh7100-reset" }, + { /* sentinel */ } +}; + +static struct platform_driver jh7100_reset_driver = { + .driver = { + .name = "jh7100-reset", + .of_match_table = jh7100_reset_dt_ids, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(jh7100_reset_driver, jh7100_reset_probe); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 6043c832d09e..811e79c9f59c 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1824,10 +1824,11 @@ static struct attribute *paths_info_attrs[] = { &path_fcs_attribute.attr, NULL, }; +ATTRIBUTE_GROUPS(paths_info); static struct kobj_type path_attr_type = { .release = dasd_path_release, - .default_attrs = paths_info_attrs, + .default_groups = paths_info_groups, .sysfs_ops = &kobj_sysfs_ops, }; diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index b64feab62caa..e9943a86c361 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(struct sclp_core_info *info) } sclp_fill_core_info(info, sccb); out: - memblock_phys_free((unsigned long)sccb, length); + memblock_free(sccb, length); return rc; } diff --git a/drivers/s390/char/sclp_sd.c b/drivers/s390/char/sclp_sd.c index 25c2d760f6e6..f9e164be7568 100644 --- a/drivers/s390/char/sclp_sd.c +++ b/drivers/s390/char/sclp_sd.c @@ -438,11 +438,12 @@ static struct attribute *sclp_sd_file_default_attrs[] = { &reload_attr.attr, NULL, }; +ATTRIBUTE_GROUPS(sclp_sd_file_default); static struct kobj_type sclp_sd_file_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = sclp_sd_file_release, - .default_attrs = sclp_sd_file_default_attrs, + .default_groups = sclp_sd_file_default_groups, }; /** diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 9e066281e2d0..4cebfaaa22b4 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -72,7 +72,7 @@ static void vmcp_response_alloc(struct vmcp_session *session) if (order > 2) page = cma_alloc(vmcp_cma, nr_pages, 0, false); if (page) { - session->response = (char *)page_to_phys(page); + session->response = (char *)page_to_virt(page); session->cma_alloc = 1; return; } @@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session) order = get_order(session->bufsize); nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; if (session->cma_alloc) { - page = phys_to_page((unsigned long)session->response); + page = virt_to_page((unsigned long)session->response); cma_release(vmcp_cma, page, nr_pages); session->cma_alloc = 0; } else { diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 684348d82f08..962dfa25a310 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -91,11 +91,6 @@ static int chsc_subchannel_probe(struct subchannel *sch) sch->schid.ssid, sch->schid.sch_no, ret); dev_set_drvdata(&sch->dev, NULL); kfree(private); - } else { - if (dev_get_uevent_suppress(&sch->dev)) { - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } } return ret; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ce9e7517430f..fa8293335077 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -470,16 +470,6 @@ int css_register_subchannel(struct subchannel *sch) if (sch->st == SUBCHANNEL_TYPE_IO) sch->dev.type = &io_subchannel_type; - /* - * We don't want to generate uevents for I/O subchannels that don't - * have a working ccw device behind them since they will be - * unregistered before they can be used anyway, so we delay the add - * uevent until after device recognition was successful. - * Note that we suppress the uevent for all subchannel types; - * the subchannel driver can decide itself when it wants to inform - * userspace of its existence. - */ - dev_set_uevent_suppress(&sch->dev, 1); css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); @@ -488,15 +478,6 @@ int css_register_subchannel(struct subchannel *sch) sch->schid.ssid, sch->schid.sch_no, ret); return ret; } - if (!sch->driver) { - /* - * No driver matched. Generate the uevent now so that - * a fitting driver module may be loaded based on the - * modalias. - */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } return ret; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 07a17613fab5..cd938a26b76c 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -838,14 +838,6 @@ static void io_subchannel_register(struct ccw_device *cdev) adjust_init_count = 0; goto out; } - /* - * Now we know this subchannel will stay, we can throw - * our delayed uevent. - */ - if (dev_get_uevent_suppress(&sch->dev)) { - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } /* make it known to the system */ ret = device_add(&cdev->dev); if (ret) { @@ -1036,15 +1028,9 @@ static int io_subchannel_probe(struct subchannel *sch) "0.%x.%04x (rc=%d)\n", sch->schid.ssid, sch->schid.sch_no, rc); /* - * The console subchannel already has an associated ccw_device. - * Throw the delayed uevent for the subchannel, register - * the ccw_device and exit. - */ - if (dev_get_uevent_suppress(&sch->dev)) { - /* should always be the case for the console */ - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } + * The console subchannel already has an associated ccw_device. + * Register it and exit. + */ cdev = sch_get_cdev(sch); rc = device_add(&cdev->dev); if (rc) { diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 15bdae5981ca..8b463681a149 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -243,11 +243,6 @@ static int eadm_subchannel_probe(struct subchannel *sch) spin_lock_irq(&list_lock); list_add(&private->head, &eadm_list); spin_unlock_irq(&list_lock); - - if (dev_get_uevent_suppress(&sch->dev)) { - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } out: return ret; } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 99c2212dc6a6..5ea6249d8180 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -236,12 +236,11 @@ struct qdio_irq { int nr_input_qs; int nr_output_qs; - struct ccw1 ccw; - struct ciw equeue; - struct ciw aqueue; + struct ccw1 *ccw; struct qdio_ssqd_desc ssqd_desc; void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); + qdio_handler_t (*error_handler); int perf_stat_enabled; @@ -338,7 +337,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, struct subchannel_id *schid, struct qdio_ssqd_desc *data); -int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data); +void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data); void qdio_shutdown_irq(struct qdio_irq *irq); void qdio_print_subchannel_info(struct qdio_irq *irq_ptr); void qdio_free_queues(struct qdio_irq *irq_ptr); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 45e810c6ea3b..9cde55730b65 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kmemleak.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/io.h> @@ -169,8 +170,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, int tmp_count = count, tmp_start = start; int nr = q->nr; - if (!count) - return 0; qperf_inc(q, sqbs); if (!q->is_input_q) @@ -499,6 +498,31 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start, } } +int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq = cdev->private->qdio_data; + unsigned int start; + struct qdio_q *q; + int count; + + if (!irq) + return -ENODEV; + + q = irq->input_qs[nr]; + start = q->first_to_check; + *error = 0; + + count = get_inbound_buffer_frontier(q, start, error); + if (count == 0) + return 0; + + *bufnr = start; + q->first_to_check = add_buf(start, count); + return count; +} +EXPORT_SYMBOL_GPL(qdio_inspect_input_queue); + static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) { unsigned char state = 0; @@ -578,6 +602,31 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, } } +int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr, + unsigned int *bufnr, unsigned int *error) +{ + struct qdio_irq *irq = cdev->private->qdio_data; + unsigned int start; + struct qdio_q *q; + int count; + + if (!irq) + return -ENODEV; + + q = irq->output_qs[nr]; + start = q->first_to_check; + *error = 0; + + count = get_outbound_buffer_frontier(q, start, error); + if (count == 0) + return 0; + + *bufnr = start; + q->first_to_check = add_buf(start, count); + return count; +} +EXPORT_SYMBOL_GPL(qdio_inspect_output_queue); + static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, unsigned long aob) { @@ -653,24 +702,18 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, unsigned long intparm, int cstat, int dstat) { - struct qdio_q *q; + unsigned int first_to_check = 0; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); - if (irq_ptr->nr_input_qs) { - q = irq_ptr->input_qs[0]; - } else if (irq_ptr->nr_output_qs) { - q = irq_ptr->output_qs[0]; - } else { - dump_stack(); - goto no_handler; - } + /* zfcp wants this: */ + if (irq_ptr->nr_input_qs) + first_to_check = irq_ptr->input_qs[0]->first_to_check; - q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, - q->nr, q->first_to_check, 0, irq_ptr->int_parm); -no_handler: + irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0, + first_to_check, 0, irq_ptr->int_parm); qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); /* * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. @@ -874,6 +917,7 @@ int qdio_free(struct ccw_device *cdev) qdio_free_queues(irq_ptr); free_page((unsigned long) irq_ptr->qdr); free_page(irq_ptr->chsc_page); + kfree(irq_ptr->ccw); free_page((unsigned long) irq_ptr); return 0; } @@ -899,11 +943,17 @@ int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, no_output_qs > QDIO_MAX_QUEUES_PER_IRQ) return -EINVAL; - /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ - irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL); if (!irq_ptr) return -ENOMEM; + irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA); + if (!irq_ptr->ccw) + goto err_ccw; + + /* kmemleak doesn't scan the page-allocated irq_ptr: */ + kmemleak_not_leak(irq_ptr->ccw); + irq_ptr->cdev = cdev; mutex_init(&irq_ptr->setup_mutex); if (qdio_allocate_dbf(irq_ptr)) @@ -941,6 +991,8 @@ err_qdr: free_page(irq_ptr->chsc_page); err_chsc: err_dbf: + kfree(irq_ptr->ccw); +err_ccw: free_page((unsigned long) irq_ptr); return rc; } @@ -972,6 +1024,7 @@ int qdio_establish(struct ccw_device *cdev, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct subchannel_id schid; + struct ciw *ciw; long timeout; int rc; @@ -985,8 +1038,11 @@ int qdio_establish(struct ccw_device *cdev, init_data->no_output_qs > irq_ptr->max_output_qs) return -EINVAL; - if ((init_data->no_input_qs && !init_data->input_handler) || - (init_data->no_output_qs && !init_data->output_handler)) + /* Needed as error_handler: */ + if (!init_data->input_handler) + return -EINVAL; + + if (init_data->no_output_qs && !init_data->output_handler) return -EINVAL; if (!init_data->input_sbal_addr_array || @@ -996,6 +1052,12 @@ int qdio_establish(struct ccw_device *cdev, if (!init_data->irq_poll) return -EINVAL; + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO EQ", schid.sch_no); + return -EIO; + } + mutex_lock(&irq_ptr->setup_mutex); qdio_trace_init_data(irq_ptr, init_data); qdio_setup_irq(irq_ptr, init_data); @@ -1005,15 +1067,15 @@ int qdio_establish(struct ccw_device *cdev, goto err_thinint; /* establish q */ - irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; - irq_ptr->ccw.flags = CCW_FLAG_SLI; - irq_ptr->ccw.count = irq_ptr->equeue.count; - irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr); + irq_ptr->ccw->cmd_code = ciw->cmd; + irq_ptr->ccw->flags = CCW_FLAG_SLI; + irq_ptr->ccw->count = ciw->count; + irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr); spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options_mask(cdev, 0); - rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); @@ -1065,6 +1127,7 @@ int qdio_activate(struct ccw_device *cdev) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct subchannel_id schid; + struct ciw *ciw; int rc; ccw_device_get_schid(cdev, &schid); @@ -1073,21 +1136,27 @@ int qdio_activate(struct ccw_device *cdev) if (!irq_ptr) return -ENODEV; + ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + DBF_ERROR("%4x NO AQ", schid.sch_no); + return -EIO; + } + mutex_lock(&irq_ptr->setup_mutex); if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { rc = -EBUSY; goto out; } - irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags = CCW_FLAG_SLI; - irq_ptr->ccw.count = irq_ptr->aqueue.count; - irq_ptr->ccw.cda = 0; + irq_ptr->ccw->cmd_code = ciw->cmd; + irq_ptr->ccw->flags = CCW_FLAG_SLI; + irq_ptr->ccw->count = ciw->count; + irq_ptr->ccw->cda = 0; spin_lock_irq(get_ccwdev_lock(cdev)); ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE, 0, DOIO_DENY_PREFETCH); spin_unlock_irq(get_ccwdev_lock(cdev)); if (rc) { @@ -1144,6 +1213,35 @@ static int handle_inbound(struct qdio_q *q, int bufnr, int count) } /** + * qdio_add_bufs_to_input_queue - process buffers on an Input Queue + * @cdev: associated ccw_device for the qdio subchannel + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr, + unsigned int bufnr, unsigned int count) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) + return -EINVAL; + + if (!irq_ptr) + return -ENODEV; + + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count); + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EIO; + if (!count) + return 0; + + return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count); +} +EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue); + +/** * handle_outbound - process filled outbound buffers * @q: queue containing the buffers * @bufnr: first buffer to process @@ -1184,16 +1282,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co } /** - * do_QDIO - process input or output buffers + * qdio_add_bufs_to_output_queue - process buffers on an Output Queue * @cdev: associated ccw_device for the qdio subchannel - * @callflags: input or output and special flags from the program * @q_nr: queue number * @bufnr: buffer number * @count: how many buffers to process - * @aob: asynchronous operation block (outbound only) + * @aob: asynchronous operation block */ -int do_QDIO(struct ccw_device *cdev, unsigned int callflags, - int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob) +int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr, + unsigned int bufnr, unsigned int count, + struct qaob *aob) { struct qdio_irq *irq_ptr = cdev->private->qdio_data; @@ -1203,20 +1301,16 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, if (!irq_ptr) return -ENODEV; - DBF_DEV_EVENT(DBF_INFO, irq_ptr, - "do%02x b:%02x c:%02x", callflags, bufnr, count); + DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count); if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) return -EIO; if (!count) return 0; - if (callflags & QDIO_FLAG_SYNC_INPUT) - return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count); - else if (callflags & QDIO_FLAG_SYNC_OUTPUT) - return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob); - return -EINVAL; + + return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob); } -EXPORT_SYMBOL_GPL(do_QDIO); +EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue); /** * qdio_start_irq - enable interrupt processing for the device @@ -1263,40 +1357,6 @@ rescan: } EXPORT_SYMBOL(qdio_start_irq); -static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, - unsigned int *error) -{ - unsigned int start = q->first_to_check; - int count; - - *error = 0; - count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) : - get_outbound_buffer_frontier(q, start, error); - if (count == 0) - return 0; - - *bufnr = start; - - /* for the next time */ - q->first_to_check = add_buf(start, count); - - return count; -} - -int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, - unsigned int *bufnr, unsigned int *error) -{ - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - struct qdio_q *q; - - if (!irq_ptr) - return -ENODEV; - q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; - - return __qdio_inspect_queue(q, bufnr, error); -} -EXPORT_SYMBOL_GPL(qdio_inspect_queue); - /** * qdio_stop_irq - disable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index efbb5e5eca05..714878e2acc4 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -351,19 +351,18 @@ static void setup_qib(struct qdio_irq *irq_ptr, sizeof(irq_ptr->qib.parm)); } -int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) +void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) { struct ccw_device *cdev = irq_ptr->cdev; - struct ciw *ciw; irq_ptr->qdioac1 = 0; - memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); irq_ptr->debugfs_dev = NULL; irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0; irq_ptr->state = QDIO_IRQ_STATE_INACTIVE; + irq_ptr->error_handler = init_data->input_handler; irq_ptr->int_parm = init_data->int_parm; irq_ptr->nr_input_qs = init_data->no_input_qs; @@ -386,23 +385,6 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data) irq_ptr->orig_handler = cdev->handler; cdev->handler = qdio_int_handler; spin_unlock_irq(get_ccwdev_lock(cdev)); - - /* get qdio commands */ - ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE); - if (!ciw) { - DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); - return -EINVAL; - } - irq_ptr->equeue = *ciw; - - ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE); - if (!ciw) { - DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); - return -EINVAL; - } - irq_ptr->aqueue = *ciw; - - return 0; } void qdio_shutdown_irq(struct qdio_irq *irq) diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 040742777095..ee182cfb467d 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -244,11 +244,6 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) if (ret) goto out_disable; - if (dev_get_uevent_suppress(&sch->dev)) { - dev_set_uevent_suppress(&sch->dev, 0); - kobject_uevent(&sch->dev.kobj, KOBJ_ADD); - } - VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, sch->schid.sch_no); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 03311a476366..e043ae236630 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -17,6 +17,9 @@ #define VFIO_AP_ROOT_NAME "vfio_ap" #define VFIO_AP_DEV_NAME "matrix" +#define AP_QUEUE_ASSIGNED "assigned" +#define AP_QUEUE_UNASSIGNED "unassigned" +#define AP_QUEUE_IN_USE "in use" MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018"); @@ -41,26 +44,95 @@ static struct ap_device_id ap_queue_ids[] = { MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids); +static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q) +{ + struct ap_matrix_mdev *matrix_mdev; + unsigned long apid = AP_QID_CARD(q->apqn); + unsigned long apqi = AP_QID_QUEUE(q->apqn); + + list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + if (test_bit_inv(apid, matrix_mdev->matrix.apm) && + test_bit_inv(apqi, matrix_mdev->matrix.aqm)) + return matrix_mdev; + } + + return NULL; +} + +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t nchars = 0; + struct vfio_ap_queue *q; + struct ap_matrix_mdev *matrix_mdev; + struct ap_device *apdev = to_ap_dev(dev); + + mutex_lock(&matrix_dev->lock); + q = dev_get_drvdata(&apdev->device); + matrix_mdev = vfio_ap_mdev_for_queue(q); + + if (matrix_mdev) { + if (matrix_mdev->kvm) + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_IN_USE); + else + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_ASSIGNED); + } else { + nchars = scnprintf(buf, PAGE_SIZE, "%s\n", + AP_QUEUE_UNASSIGNED); + } + + mutex_unlock(&matrix_dev->lock); + + return nchars; +} + +static DEVICE_ATTR_RO(status); + +static struct attribute *vfio_queue_attrs[] = { + &dev_attr_status.attr, + NULL, +}; + +static const struct attribute_group vfio_queue_attr_group = { + .attrs = vfio_queue_attrs, +}; + /** * vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it * with the device as driver_data. * * @apdev: the AP device being probed * - * Return: returns 0 if the probe succeeded; otherwise, returns -ENOMEM if - * storage could not be allocated for a vfio_ap_queue object. + * Return: returns 0 if the probe succeeded; otherwise, returns an error if + * storage could not be allocated for a vfio_ap_queue object or the + * sysfs 'status' attribute could not be created for the queue device. */ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) { + int ret; struct vfio_ap_queue *q; q = kzalloc(sizeof(*q), GFP_KERNEL); if (!q) return -ENOMEM; + + mutex_lock(&matrix_dev->lock); dev_set_drvdata(&apdev->device, q); q->apqn = to_ap_queue(&apdev->device)->qid; q->saved_isc = VFIO_AP_ISC_INVALID; - return 0; + + ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group); + if (ret) { + dev_set_drvdata(&apdev->device, NULL); + kfree(q); + } + + mutex_unlock(&matrix_dev->lock); + + return ret; } /** @@ -75,6 +147,7 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev) struct vfio_ap_queue *q; mutex_lock(&matrix_dev->lock); + sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group); q = dev_get_drvdata(&apdev->device); vfio_ap_mdev_reset_queue(q, 1); dev_set_drvdata(&apdev->device, NULL); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4c3dcc435e83..9811ab81f3c4 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -878,14 +878,13 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, /* * If a valid target domain is set and this domain is NOT a usage - * domain but a control only domain, use the default domain as target. + * domain but a control only domain, autoselect target domain. */ tdom = *domain; if (tdom < AP_DOMAINS && !ap_test_config_usage_domain(tdom) && - ap_test_config_ctrl_domain(tdom) && - ap_domain_index >= 0) - tdom = ap_domain_index; + ap_test_config_ctrl_domain(tdom)) + tdom = AUTOSEL_DOM; pref_zc = NULL; pref_zq = NULL; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 26c55f67289f..fe2c4c699d37 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -355,8 +355,8 @@ static int qeth_cq_init(struct qeth_card *card) qdio_reset_buffers(card->qdio.c_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); card->qdio.c_q->next_buf_to_init = 127; - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127, - NULL); + + rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127); if (rc) { QETH_CARD_TEXT_(card, 2, "1err%d", rc); goto out; @@ -2926,8 +2926,7 @@ static int qeth_init_qdio_queues(struct qeth_card *card) } card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs, - NULL); + rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs); if (rc) { QETH_CARD_TEXT_(card, 2, "1err%d", rc); return rc; @@ -3415,8 +3414,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card, return 0; } - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, - queue->next_buf_to_init, count, NULL); + rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, + queue->next_buf_to_init, + count); if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } @@ -3588,8 +3588,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } QETH_TXQ_STAT_INC(queue, doorbell); - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no, - index, count, aob); + rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no, + index, count, aob); switch (rc) { case 0: @@ -3739,8 +3739,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, } qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); } - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, - cq->next_buf_to_init, count, NULL); + rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue, + cq->next_buf_to_init, count); if (rc) { dev_warn(&card->gdev->dev, "QDIO reported an error, rc=%i\n", rc); @@ -5850,10 +5850,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) /* Fetch completed RX buffers: */ if (!card->rx.b_count) { card->rx.qdio_err = 0; - card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card), - 0, true, - &card->rx.b_index, - &card->rx.qdio_err); + card->rx.b_count = + qdio_inspect_input_queue(CARD_DDEV(card), 0, + &card->rx.b_index, + &card->rx.qdio_err); if (card->rx.b_count <= 0) { card->rx.b_count = 0; break; @@ -5900,8 +5900,8 @@ static void qeth_cq_poll(struct qeth_card *card) unsigned int start, error; int completed; - completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, - &error); + completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start, + &error); if (completed <= 0) return; @@ -6038,8 +6038,8 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget) return 0; } - completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, - &start, &error); + completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no, + &start, &error); if (completed <= 0) { /* Ensure we see TX completion for pending work: */ if (napi_complete_done(napi, 0) && diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 6a2720105138..f54f506b02d6 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -79,7 +79,7 @@ static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet) unsigned int start, error; int completed; - completed = qdio_inspect_queue(cdev, 0, false, &start, &error); + completed = qdio_inspect_output_queue(cdev, 0, &start, &error); if (completed > 0) { if (error) { zfcp_qdio_handler_error(qdio, "qdreqt1", error); @@ -154,7 +154,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, /* * put SBALs back to response queue */ - if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL)) + if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count)) zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); } @@ -169,7 +169,7 @@ static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet) tasklet_schedule(&qdio->request_tasklet); /* Check the Response Queue: */ - completed = qdio_inspect_queue(cdev, 0, true, &start, &error); + completed = qdio_inspect_input_queue(cdev, 0, &start, &error); if (completed < 0) return; if (completed > 0) @@ -326,8 +326,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) atomic_sub(sbal_number, &qdio->req_q_free); - retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, - q_req->sbal_first, sbal_number, NULL); + retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0, + q_req->sbal_first, sbal_number, + NULL); if (unlikely(retval)) { /* Failed to submit the IO, roll back our modifications. */ @@ -395,7 +396,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; - /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ + /* + * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called + * during qdio_shutdown(). + */ spin_lock_irq(&qdio->req_q_lock); atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); spin_unlock_irq(&qdio->req_q_lock); @@ -498,8 +502,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) sbale->addr = 0; } - if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q, - NULL)) + if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio; /* set index of first available SBALS / number of available SBALS */ diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index e8a30c4c5aec..a8562678c437 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers" source "drivers/soc/actions/Kconfig" source "drivers/soc/amlogic/Kconfig" +source "drivers/soc/apple/Kconfig" source "drivers/soc/aspeed/Kconfig" source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index a05e9fbcd3e0..adb30c2d4fea 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -4,6 +4,7 @@ # obj-$(CONFIG_ARCH_ACTIONS) += actions/ +obj-$(CONFIG_ARCH_APPLE) += apple/ obj-y += aspeed/ obj-$(CONFIG_ARCH_AT91) += atmel/ obj-y += bcm/ diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig new file mode 100644 index 000000000000..9b8de31d6a8f --- /dev/null +++ b/drivers/soc/apple/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only + +if ARCH_APPLE || COMPILE_TEST + +menu "Apple SoC drivers" + +config APPLE_PMGR_PWRSTATE + bool "Apple SoC PMGR power state control" + depends on PM + select REGMAP + select MFD_SYSCON + select PM_GENERIC_DOMAINS + select RESET_CONTROLLER + default ARCH_APPLE + help + The PMGR block in Apple SoCs provides high-level power state + controls for SoC devices. This driver manages them through the + generic power domain framework, and also provides reset support. + +endmenu + +endif diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile new file mode 100644 index 000000000000..c114e84667e4 --- /dev/null +++ b/drivers/soc/apple/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o diff --git a/drivers/soc/apple/apple-pmgr-pwrstate.c b/drivers/soc/apple/apple-pmgr-pwrstate.c new file mode 100644 index 000000000000..e1122288409a --- /dev/null +++ b/drivers/soc/apple/apple-pmgr-pwrstate.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple SoC PMGR device power state driver + * + * Copyright The Asahi Linux Contributors + */ + +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/regmap.h> +#include <linux/mfd/syscon.h> +#include <linux/reset-controller.h> +#include <linux/module.h> + +#define APPLE_PMGR_RESET BIT(31) +#define APPLE_PMGR_AUTO_ENABLE BIT(28) +#define APPLE_PMGR_PS_AUTO GENMASK(27, 24) +#define APPLE_PMGR_PS_MIN GENMASK(19, 16) +#define APPLE_PMGR_PARENT_OFF BIT(11) +#define APPLE_PMGR_DEV_DISABLE BIT(10) +#define APPLE_PMGR_WAS_CLKGATED BIT(9) +#define APPLE_PMGR_WAS_PWRGATED BIT(8) +#define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4) +#define APPLE_PMGR_PS_TARGET GENMASK(3, 0) + +#define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED) + +#define APPLE_PMGR_PS_ACTIVE 0xf +#define APPLE_PMGR_PS_CLKGATE 0x4 +#define APPLE_PMGR_PS_PWRGATE 0x0 + +#define APPLE_PMGR_PS_SET_TIMEOUT 100 +#define APPLE_PMGR_RESET_TIME 1 + +struct apple_pmgr_ps { + struct device *dev; + struct generic_pm_domain genpd; + struct reset_controller_dev rcdev; + struct regmap *regmap; + u32 offset; + u32 min_state; +}; + +#define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd) +#define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev) + +static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable) +{ + int ret; + struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd); + u32 reg; + + ret = regmap_read(ps->regmap, ps->offset, ®); + if (ret < 0) + return ret; + + /* Resets are synchronous, and only work if the device is powered and clocked. */ + if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE) + dev_err(ps->dev, "PS %s: powering off with RESET active\n", + genpd->name); + + reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET); + reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate); + + dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg); + + regmap_write(ps->regmap, ps->offset, reg); + + ret = regmap_read_poll_timeout_atomic( + ps->regmap, ps->offset, reg, + (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1, + APPLE_PMGR_PS_SET_TIMEOUT); + if (ret < 0) + dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n", + genpd->name, pstate, reg); + + if (auto_enable) { + /* Not all devices implement this; this is a no-op where not implemented. */ + reg &= ~APPLE_PMGR_FLAGS; + reg |= APPLE_PMGR_AUTO_ENABLE; + regmap_write(ps->regmap, ps->offset, reg); + } + + return ret; +} + +static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps) +{ + u32 reg = 0; + + regmap_read(ps->regmap, ps->offset, ®); + /* + * We consider domains as active if they are actually on, or if they have auto-PM + * enabled and the intended target is on. + */ + return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE || + (FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE && + reg & APPLE_PMGR_AUTO_ENABLE)); +} + +static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd) +{ + return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true); +} + +static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd) +{ + return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false); +} + +static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); + + mutex_lock(&ps->genpd.mlock); + + if (ps->genpd.status == GENPD_STATE_OFF) + dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset); + + dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset); + /* Quiesce device before asserting reset */ + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, + APPLE_PMGR_DEV_DISABLE); + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, + APPLE_PMGR_RESET); + + mutex_unlock(&ps->genpd.mlock); + + return 0; +} + +static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); + + mutex_lock(&ps->genpd.mlock); + + dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset); + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0); + regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0); + + if (ps->genpd.status == GENPD_STATE_OFF) + dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset); + + mutex_unlock(&ps->genpd.mlock); + + return 0; +} + +static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id) +{ + int ret; + + ret = apple_pmgr_reset_assert(rcdev, id); + if (ret) + return ret; + + usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME); + + return apple_pmgr_reset_deassert(rcdev, id); +} + +static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev); + u32 reg = 0; + + regmap_read(ps->regmap, ps->offset, ®); + + return !!(reg & APPLE_PMGR_RESET); +} + +const struct reset_control_ops apple_pmgr_reset_ops = { + .assert = apple_pmgr_reset_assert, + .deassert = apple_pmgr_reset_deassert, + .reset = apple_pmgr_reset_reset, + .status = apple_pmgr_reset_status, +}; + +static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev, + const struct of_phandle_args *reset_spec) +{ + return 0; +} + +static int apple_pmgr_ps_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct apple_pmgr_ps *ps; + struct regmap *regmap; + struct of_phandle_iterator it; + int ret; + const char *name; + bool active; + + regmap = syscon_node_to_regmap(node->parent); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL); + if (!ps) + return -ENOMEM; + + ps->dev = dev; + ps->regmap = regmap; + + ret = of_property_read_string(node, "label", &name); + if (ret < 0) { + dev_err(dev, "missing label property\n"); + return ret; + } + + ret = of_property_read_u32(node, "reg", &ps->offset); + if (ret < 0) { + dev_err(dev, "missing reg property\n"); + return ret; + } + + ps->genpd.name = name; + ps->genpd.power_on = apple_pmgr_ps_power_on; + ps->genpd.power_off = apple_pmgr_ps_power_off; + + ret = of_property_read_u32(node, "apple,min-state", &ps->min_state); + if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE) + regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN, + FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state)); + + active = apple_pmgr_ps_is_active(ps); + if (of_property_read_bool(node, "apple,always-on")) { + ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON; + if (!active) { + dev_warn(dev, "always-on domain %s is not on at boot\n", name); + /* Turn it on so pm_genpd_init does not fail */ + active = apple_pmgr_ps_power_on(&ps->genpd) == 0; + } + } + + /* Turn on auto-PM if the domain is already on */ + if (active) + regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE, + APPLE_PMGR_AUTO_ENABLE); + + ret = pm_genpd_init(&ps->genpd, NULL, !active); + if (ret < 0) { + dev_err(dev, "pm_genpd_init failed\n"); + return ret; + } + + ret = of_genpd_add_provider_simple(node, &ps->genpd); + if (ret < 0) { + dev_err(dev, "of_genpd_add_provider_simple failed\n"); + return ret; + } + + of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) { + struct of_phandle_args parent, child; + + parent.np = it.node; + parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS); + child.np = node; + child.args_count = 0; + ret = of_genpd_add_subdomain(&parent, &child); + + if (ret == -EPROBE_DEFER) { + of_node_put(parent.np); + goto err_remove; + } else if (ret < 0) { + dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n", + ret, it.node->name, node->name); + of_node_put(parent.np); + goto err_remove; + } + } + + /* + * Do not participate in regular PM; parent power domains are handled via the + * genpd hierarchy. + */ + pm_genpd_remove_device(dev); + + ps->rcdev.owner = THIS_MODULE; + ps->rcdev.nr_resets = 1; + ps->rcdev.ops = &apple_pmgr_reset_ops; + ps->rcdev.of_node = dev->of_node; + ps->rcdev.of_reset_n_cells = 0; + ps->rcdev.of_xlate = apple_pmgr_reset_xlate; + + ret = devm_reset_controller_register(dev, &ps->rcdev); + if (ret < 0) + goto err_remove; + + return 0; +err_remove: + of_genpd_del_provider(node); + pm_genpd_remove(&ps->genpd); + return ret; +} + +static const struct of_device_id apple_pmgr_ps_of_match[] = { + { .compatible = "apple,pmgr-pwrstate" }, + {} +}; + +MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match); + +static struct platform_driver apple_pmgr_ps_driver = { + .probe = apple_pmgr_ps_probe, + .driver = { + .name = "apple-pmgr-pwrstate", + .of_match_table = apple_pmgr_ps_of_match, + }, +}; + +MODULE_AUTHOR("Hector Martin <marcan@marcan.st>"); +MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs"); +MODULE_LICENSE("GPL v2"); + +module_platform_driver(apple_pmgr_ps_driver); diff --git a/drivers/soc/bcm/brcmstb/pm/pm-mips.c b/drivers/soc/bcm/brcmstb/pm/pm-mips.c index cdc3e387f049..4dfb5a85032b 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-mips.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-mips.c @@ -405,11 +405,14 @@ static int brcmstb_pm_init(void) i = ctrl.num_memc; if (i >= MAX_NUM_MEMC) { pr_warn("Too many MEMCs (max %d)\n", MAX_NUM_MEMC); + of_node_put(dn); break; } base = brcmstb_ioremap_node(dn, 0); - if (IS_ERR(base)) + if (IS_ERR(base)) { + of_node_put(dn); goto ddr_err; + } ctrl.memcs[i].ddr_phy_base = base; ctrl.num_memc++; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index b8d52d8d29db..3e59d479d001 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -377,7 +377,7 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd) } } - pm_runtime_put(domain->dev); + pm_runtime_put_sync_suspend(domain->dev); return 0; @@ -734,6 +734,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = { .map = IMX8MM_VPUH1_A53_DOMAIN, }, .pgc = BIT(IMX8MM_PGC_VPUH1), + .keep_clocks = true, }, [IMX8MM_POWER_DOMAIN_DISPMIX] = { @@ -840,6 +841,32 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = { .hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN, }, .pgc = BIT(IMX8MN_PGC_GPUMIX), + .keep_clocks = true, + }, + + [IMX8MN_POWER_DOMAIN_DISPMIX] = { + .genpd = { + .name = "dispmix", + }, + .bits = { + .pxx = IMX8MN_DISPMIX_SW_Pxx_REQ, + .map = IMX8MN_DISPMIX_A53_DOMAIN, + .hskreq = IMX8MN_DISPMIX_HSK_PWRDNREQN, + .hskack = IMX8MN_DISPMIX_HSK_PWRDNACKN, + }, + .pgc = BIT(IMX8MN_PGC_DISPMIX), + .keep_clocks = true, + }, + + [IMX8MN_POWER_DOMAIN_MIPI] = { + .genpd = { + .name = "mipi", + }, + .bits = { + .pxx = IMX8MN_MIPI_SW_Pxx_REQ, + .map = IMX8MN_MIPI_A53_DOMAIN, + }, + .pgc = BIT(IMX8MN_PGC_MIPI), }, }; diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index c2f076b56e24..511e74f0db8a 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <dt-bindings/power/imx8mm-power.h> +#include <dt-bindings/power/imx8mn-power.h> #define BLK_SFT_RSTN 0x0 #define BLK_CLK_EN 0x4 @@ -517,6 +518,77 @@ static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = { .num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data), }; + +static int imx8mn_disp_power_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl, + power_nb); + + if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF) + return NOTIFY_OK; + + /* Enable bus clock and deassert bus reset */ + regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(8)); + regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(8)); + + /* + * On power up we have no software backchannel to the GPC to + * wait for the ADB handshake to happen, so we just delay for a + * bit. On power down the GPC driver waits for the handshake. + */ + if (action == GENPD_NOTIFY_ON) + udelay(5); + + + return NOTIFY_OK; +} + +static const struct imx8m_blk_ctrl_domain_data imx8mn_disp_blk_ctl_domain_data[] = { + [IMX8MN_DISPBLK_PD_MIPI_DSI] = { + .name = "dispblk-mipi-dsi", + .clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", }, + .num_clks = 2, + .gpc_name = "mipi-dsi", + .rst_mask = BIT(0) | BIT(1), + .clk_mask = BIT(0) | BIT(1), + .mipi_phy_rst_mask = BIT(17), + }, + [IMX8MN_DISPBLK_PD_MIPI_CSI] = { + .name = "dispblk-mipi-csi", + .clk_names = (const char *[]){ "csi-aclk", "csi-pclk" }, + .num_clks = 2, + .gpc_name = "mipi-csi", + .rst_mask = BIT(2) | BIT(3), + .clk_mask = BIT(2) | BIT(3), + .mipi_phy_rst_mask = BIT(16), + }, + [IMX8MN_DISPBLK_PD_LCDIF] = { + .name = "dispblk-lcdif", + .clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", }, + .num_clks = 3, + .gpc_name = "lcdif", + .rst_mask = BIT(4) | BIT(5), + .clk_mask = BIT(4) | BIT(5), + }, + [IMX8MN_DISPBLK_PD_ISI] = { + .name = "dispblk-isi", + .clk_names = (const char *[]){ "disp_axi", "disp_apb", "disp_axi_root", + "disp_apb_root"}, + .num_clks = 4, + .gpc_name = "isi", + .rst_mask = BIT(6) | BIT(7), + .clk_mask = BIT(6) | BIT(7), + }, +}; + +static const struct imx8m_blk_ctrl_data imx8mn_disp_blk_ctl_dev_data = { + .max_reg = 0x84, + .power_notifier_fn = imx8mn_disp_power_notifier, + .domains = imx8mn_disp_blk_ctl_domain_data, + .num_domains = ARRAY_SIZE(imx8mn_disp_blk_ctl_domain_data), +}; + static const struct of_device_id imx8m_blk_ctrl_of_match[] = { { .compatible = "fsl,imx8mm-vpu-blk-ctrl", @@ -524,7 +596,10 @@ static const struct of_device_id imx8m_blk_ctrl_of_match[] = { }, { .compatible = "fsl,imx8mm-disp-blk-ctrl", .data = &imx8mm_disp_blk_ctl_dev_data - } ,{ + }, { + .compatible = "fsl,imx8mn-disp-blk-ctrl", + .data = &imx8mn_disp_blk_ctl_dev_data + }, { /* Sentinel */ } }; diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index 1d818a8ba208..e9b854ed1bdf 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -1010,7 +1010,7 @@ static int cpr_interpolate(const struct corner *corner, int step_volt, return corner->uV; temp = f_diff * (uV_high - uV_low); - do_div(temp, f_high - f_low); + temp = div64_ul(temp, f_high - f_low); /* * max_volt_scale has units of uV/MHz while freq values diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 6bf2f1d1f2c5..ec52f29c8867 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -195,6 +195,28 @@ static const struct llcc_slice_config sm8250_data[] = { { LLCC_WRCACHE, 31, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, }; +static const struct llcc_slice_config sm8350_data[] = { + { LLCC_CPUSS, 1, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 1 }, + { LLCC_VIDSC0, 2, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_AUDIO, 6, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 0 }, + { LLCC_MDMHPGRW, 7, 1024, 3, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MODHW, 9, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CMPT, 10, 3072, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPUHTW, 11, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_GPU, 12, 1024, 1, 0, 0xfff, 0x0, 0, 0, 0, 1, 1, 0 }, + { LLCC_MMUHWT, 13, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, + { LLCC_DISP, 16, 3072, 2, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MDMPNG, 21, 1024, 0, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_AUDHW, 22, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CVP, 28, 512, 3, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_MODPE, 29, 256, 1, 1, 0xf, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_APTCM, 30, 1024, 3, 1, 0x0, 0x1, 1, 0, 0, 0, 1, 0 }, + { LLCC_WRCACHE, 31, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, + { LLCC_CVPFW, 17, 512, 1, 0, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CPUSS1, 3, 1024, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 1, 0 }, + { LLCC_CPUHWT, 5, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 0, 0, 1 }, +}; + static const struct qcom_llcc_config sc7180_cfg = { .sct_data = sc7180_data, .size = ARRAY_SIZE(sc7180_data), @@ -228,6 +250,11 @@ static const struct qcom_llcc_config sm8250_cfg = { .size = ARRAY_SIZE(sm8250_data), }; +static const struct qcom_llcc_config sm8350_cfg = { + .sct_data = sm8350_data, + .size = ARRAY_SIZE(sm8350_data), +}; + static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; /** @@ -644,6 +671,7 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg }, { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg }, { .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg }, + { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfg }, { } }; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 34acf58bbb0d..cbe5e39fdaeb 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -352,7 +352,7 @@ static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev, return ret; } -static struct thermal_cooling_device_ops qmp_cooling_device_ops = { +static const struct thermal_cooling_device_ops qmp_cooling_device_ops = { .get_max_state = qmp_cdev_get_max_state, .get_cur_state = qmp_cdev_get_cur_state, .set_cur_state = qmp_cdev_set_cur_state, diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c index 131d24caabf8..d6bfd1bbdc2a 100644 --- a/drivers/soc/qcom/qcom_stats.c +++ b/drivers/soc/qcom/qcom_stats.c @@ -237,6 +237,15 @@ static const struct stats_config rpm_data = { .subsystem_stats_in_smem = false, }; +/* Older RPM firmwares have the stats at a fixed offset instead */ +static const struct stats_config rpm_data_dba0 = { + .stats_offset = 0xdba0, + .num_records = 2, + .appended_stats_avail = true, + .dynamic_offset = false, + .subsystem_stats_in_smem = false, +}; + static const struct stats_config rpmh_data = { .stats_offset = 0x48, .num_records = 3, @@ -246,6 +255,10 @@ static const struct stats_config rpmh_data = { }; static const struct of_device_id qcom_stats_table[] = { + { .compatible = "qcom,apq8084-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8226-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8916-rpm-stats", .data = &rpm_data_dba0 }, + { .compatible = "qcom,msm8974-rpm-stats", .data = &rpm_data_dba0 }, { .compatible = "qcom,rpm-stats", .data = &rpm_data }, { .compatible = "qcom,rpmh-stats", .data = &rpmh_data }, { } diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c index 1a03eaa38c46..c8c4c730b135 100644 --- a/drivers/soc/qcom/qmi_interface.c +++ b/drivers/soc/qcom/qmi_interface.c @@ -96,7 +96,7 @@ static void qmi_recv_del_server(struct qmi_handle *qmi, * @node: id of the dying node * * Signals the client that all previously registered services on this node are - * now gone and then calls the bye callback to allow the client client further + * now gone and then calls the bye callback to allow the client further * cleaning up resources associated with this remote. */ static void qmi_recv_bye(struct qmi_handle *qmi, diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 3a12a482f6b2..01c2f50cb97e 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -691,7 +691,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, * @drv: The controller. * @msg: The data to be written to the controller. * - * This should only be called for for sleep/wake state, never active-only + * This should only be called for sleep/wake state, never active-only * state. * * The caller must ensure that no other RPMH actions are happening and the diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 1118345d8824..58f1dc9b9cb7 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -63,73 +63,134 @@ struct rpmhpd_desc { static DEFINE_MUTEX(rpmhpd_lock); -/* SDM845 RPMH powerdomains */ +/* RPMH powerdomains */ + +static struct rpmhpd cx_ao; +static struct rpmhpd mx; +static struct rpmhpd mx_ao; +static struct rpmhpd cx = { + .pd = { .name = "cx", }, + .peer = &cx_ao, + .res_name = "cx.lvl", +}; + +static struct rpmhpd cx_ao = { + .pd = { .name = "cx_ao", }, + .active_only = true, + .peer = &cx, + .res_name = "cx.lvl", +}; -static struct rpmhpd sdm845_ebi = { +static struct rpmhpd cx_ao_w_mx_parent; +static struct rpmhpd cx_w_mx_parent = { + .pd = { .name = "cx", }, + .peer = &cx_ao_w_mx_parent, + .parent = &mx.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd cx_ao_w_mx_parent = { + .pd = { .name = "cx_ao", }, + .active_only = true, + .peer = &cx_w_mx_parent, + .parent = &mx_ao.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd ebi = { .pd = { .name = "ebi", }, .res_name = "ebi.lvl", }; -static struct rpmhpd sdm845_lmx = { - .pd = { .name = "lmx", }, - .res_name = "lmx.lvl", +static struct rpmhpd gfx = { + .pd = { .name = "gfx", }, + .res_name = "gfx.lvl", }; -static struct rpmhpd sdm845_lcx = { +static struct rpmhpd lcx = { .pd = { .name = "lcx", }, .res_name = "lcx.lvl", }; -static struct rpmhpd sdm845_gfx = { - .pd = { .name = "gfx", }, - .res_name = "gfx.lvl", +static struct rpmhpd lmx = { + .pd = { .name = "lmx", }, + .res_name = "lmx.lvl", }; -static struct rpmhpd sdm845_mss = { +static struct rpmhpd mmcx_ao; +static struct rpmhpd mmcx = { + .pd = { .name = "mmcx", }, + .peer = &mmcx_ao, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao = { + .pd = { .name = "mmcx_ao", }, + .active_only = true, + .peer = &mmcx, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao_w_cx_parent; +static struct rpmhpd mmcx_w_cx_parent = { + .pd = { .name = "mmcx", }, + .peer = &mmcx_ao_w_cx_parent, + .parent = &cx.pd, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mmcx_ao_w_cx_parent = { + .pd = { .name = "mmcx_ao", }, + .active_only = true, + .peer = &mmcx_w_cx_parent, + .parent = &cx_ao.pd, + .res_name = "mmcx.lvl", +}; + +static struct rpmhpd mss = { .pd = { .name = "mss", }, .res_name = "mss.lvl", }; -static struct rpmhpd sdm845_mx_ao; -static struct rpmhpd sdm845_mx = { +static struct rpmhpd mx_ao; +static struct rpmhpd mx = { .pd = { .name = "mx", }, - .peer = &sdm845_mx_ao, + .peer = &mx_ao, .res_name = "mx.lvl", }; -static struct rpmhpd sdm845_mx_ao = { +static struct rpmhpd mx_ao = { .pd = { .name = "mx_ao", }, .active_only = true, - .peer = &sdm845_mx, + .peer = &mx, .res_name = "mx.lvl", }; -static struct rpmhpd sdm845_cx_ao; -static struct rpmhpd sdm845_cx = { - .pd = { .name = "cx", }, - .peer = &sdm845_cx_ao, - .parent = &sdm845_mx.pd, - .res_name = "cx.lvl", +static struct rpmhpd mxc_ao; +static struct rpmhpd mxc = { + .pd = { .name = "mxc", }, + .peer = &mxc_ao, + .res_name = "mxc.lvl", }; -static struct rpmhpd sdm845_cx_ao = { - .pd = { .name = "cx_ao", }, +static struct rpmhpd mxc_ao = { + .pd = { .name = "mxc_ao", }, .active_only = true, - .peer = &sdm845_cx, - .parent = &sdm845_mx_ao.pd, - .res_name = "cx.lvl", + .peer = &mxc, + .res_name = "mxc.lvl", }; +/* SDM845 RPMH powerdomains */ static struct rpmhpd *sdm845_rpmhpds[] = { - [SDM845_EBI] = &sdm845_ebi, - [SDM845_MX] = &sdm845_mx, - [SDM845_MX_AO] = &sdm845_mx_ao, - [SDM845_CX] = &sdm845_cx, - [SDM845_CX_AO] = &sdm845_cx_ao, - [SDM845_LMX] = &sdm845_lmx, - [SDM845_LCX] = &sdm845_lcx, - [SDM845_GFX] = &sdm845_gfx, - [SDM845_MSS] = &sdm845_mss, + [SDM845_CX] = &cx_w_mx_parent, + [SDM845_CX_AO] = &cx_ao_w_mx_parent, + [SDM845_EBI] = &ebi, + [SDM845_GFX] = &gfx, + [SDM845_LCX] = &lcx, + [SDM845_LMX] = &lmx, + [SDM845_MSS] = &mss, + [SDM845_MX] = &mx, + [SDM845_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sdm845_desc = { @@ -139,9 +200,9 @@ static const struct rpmhpd_desc sdm845_desc = { /* SDX55 RPMH powerdomains */ static struct rpmhpd *sdx55_rpmhpds[] = { - [SDX55_MSS] = &sdm845_mss, - [SDX55_MX] = &sdm845_mx, - [SDX55_CX] = &sdm845_cx, + [SDX55_CX] = &cx_w_mx_parent, + [SDX55_MSS] = &mss, + [SDX55_MX] = &mx, }; static const struct rpmhpd_desc sdx55_desc = { @@ -151,12 +212,12 @@ static const struct rpmhpd_desc sdx55_desc = { /* SM6350 RPMH powerdomains */ static struct rpmhpd *sm6350_rpmhpds[] = { - [SM6350_CX] = &sdm845_cx, - [SM6350_GFX] = &sdm845_gfx, - [SM6350_LCX] = &sdm845_lcx, - [SM6350_LMX] = &sdm845_lmx, - [SM6350_MSS] = &sdm845_mss, - [SM6350_MX] = &sdm845_mx, + [SM6350_CX] = &cx_w_mx_parent, + [SM6350_GFX] = &gfx, + [SM6350_LCX] = &lcx, + [SM6350_LMX] = &lmx, + [SM6350_MSS] = &mss, + [SM6350_MX] = &mx, }; static const struct rpmhpd_desc sm6350_desc = { @@ -165,33 +226,18 @@ static const struct rpmhpd_desc sm6350_desc = { }; /* SM8150 RPMH powerdomains */ - -static struct rpmhpd sm8150_mmcx_ao; -static struct rpmhpd sm8150_mmcx = { - .pd = { .name = "mmcx", }, - .peer = &sm8150_mmcx_ao, - .res_name = "mmcx.lvl", -}; - -static struct rpmhpd sm8150_mmcx_ao = { - .pd = { .name = "mmcx_ao", }, - .active_only = true, - .peer = &sm8150_mmcx, - .res_name = "mmcx.lvl", -}; - static struct rpmhpd *sm8150_rpmhpds[] = { - [SM8150_MSS] = &sdm845_mss, - [SM8150_EBI] = &sdm845_ebi, - [SM8150_LMX] = &sdm845_lmx, - [SM8150_LCX] = &sdm845_lcx, - [SM8150_GFX] = &sdm845_gfx, - [SM8150_MX] = &sdm845_mx, - [SM8150_MX_AO] = &sdm845_mx_ao, - [SM8150_CX] = &sdm845_cx, - [SM8150_CX_AO] = &sdm845_cx_ao, - [SM8150_MMCX] = &sm8150_mmcx, - [SM8150_MMCX_AO] = &sm8150_mmcx_ao, + [SM8150_CX] = &cx_w_mx_parent, + [SM8150_CX_AO] = &cx_ao_w_mx_parent, + [SM8150_EBI] = &ebi, + [SM8150_GFX] = &gfx, + [SM8150_LCX] = &lcx, + [SM8150_LMX] = &lmx, + [SM8150_MMCX] = &mmcx, + [SM8150_MMCX_AO] = &mmcx_ao, + [SM8150_MSS] = &mss, + [SM8150_MX] = &mx, + [SM8150_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sm8150_desc = { @@ -199,17 +245,18 @@ static const struct rpmhpd_desc sm8150_desc = { .num_pds = ARRAY_SIZE(sm8150_rpmhpds), }; +/* SM8250 RPMH powerdomains */ static struct rpmhpd *sm8250_rpmhpds[] = { - [SM8250_CX] = &sdm845_cx, - [SM8250_CX_AO] = &sdm845_cx_ao, - [SM8250_EBI] = &sdm845_ebi, - [SM8250_GFX] = &sdm845_gfx, - [SM8250_LCX] = &sdm845_lcx, - [SM8250_LMX] = &sdm845_lmx, - [SM8250_MMCX] = &sm8150_mmcx, - [SM8250_MMCX_AO] = &sm8150_mmcx_ao, - [SM8250_MX] = &sdm845_mx, - [SM8250_MX_AO] = &sdm845_mx_ao, + [SM8250_CX] = &cx_w_mx_parent, + [SM8250_CX_AO] = &cx_ao_w_mx_parent, + [SM8250_EBI] = &ebi, + [SM8250_GFX] = &gfx, + [SM8250_LCX] = &lcx, + [SM8250_LMX] = &lmx, + [SM8250_MMCX] = &mmcx, + [SM8250_MMCX_AO] = &mmcx_ao, + [SM8250_MX] = &mx, + [SM8250_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sm8250_desc = { @@ -218,34 +265,20 @@ static const struct rpmhpd_desc sm8250_desc = { }; /* SM8350 Power domains */ -static struct rpmhpd sm8350_mxc_ao; -static struct rpmhpd sm8350_mxc = { - .pd = { .name = "mxc", }, - .peer = &sm8350_mxc_ao, - .res_name = "mxc.lvl", -}; - -static struct rpmhpd sm8350_mxc_ao = { - .pd = { .name = "mxc_ao", }, - .active_only = true, - .peer = &sm8350_mxc, - .res_name = "mxc.lvl", -}; - static struct rpmhpd *sm8350_rpmhpds[] = { - [SM8350_CX] = &sdm845_cx, - [SM8350_CX_AO] = &sdm845_cx_ao, - [SM8350_EBI] = &sdm845_ebi, - [SM8350_GFX] = &sdm845_gfx, - [SM8350_LCX] = &sdm845_lcx, - [SM8350_LMX] = &sdm845_lmx, - [SM8350_MMCX] = &sm8150_mmcx, - [SM8350_MMCX_AO] = &sm8150_mmcx_ao, - [SM8350_MX] = &sdm845_mx, - [SM8350_MX_AO] = &sdm845_mx_ao, - [SM8350_MXC] = &sm8350_mxc, - [SM8350_MXC_AO] = &sm8350_mxc_ao, - [SM8350_MSS] = &sdm845_mss, + [SM8350_CX] = &cx_w_mx_parent, + [SM8350_CX_AO] = &cx_ao_w_mx_parent, + [SM8350_EBI] = &ebi, + [SM8350_GFX] = &gfx, + [SM8350_LCX] = &lcx, + [SM8350_LMX] = &lmx, + [SM8350_MMCX] = &mmcx, + [SM8350_MMCX_AO] = &mmcx_ao, + [SM8350_MSS] = &mss, + [SM8350_MX] = &mx, + [SM8350_MX_AO] = &mx_ao, + [SM8350_MXC] = &mxc, + [SM8350_MXC_AO] = &mxc_ao, }; static const struct rpmhpd_desc sm8350_desc = { @@ -253,16 +286,38 @@ static const struct rpmhpd_desc sm8350_desc = { .num_pds = ARRAY_SIZE(sm8350_rpmhpds), }; +/* SM8450 RPMH powerdomains */ +static struct rpmhpd *sm8450_rpmhpds[] = { + [SM8450_CX] = &cx, + [SM8450_CX_AO] = &cx_ao, + [SM8450_EBI] = &ebi, + [SM8450_GFX] = &gfx, + [SM8450_LCX] = &lcx, + [SM8450_LMX] = &lmx, + [SM8450_MMCX] = &mmcx_w_cx_parent, + [SM8450_MMCX_AO] = &mmcx_ao_w_cx_parent, + [SM8450_MSS] = &mss, + [SM8450_MX] = &mx, + [SM8450_MX_AO] = &mx_ao, + [SM8450_MXC] = &mxc, + [SM8450_MXC_AO] = &mxc_ao, +}; + +static const struct rpmhpd_desc sm8450_desc = { + .rpmhpds = sm8450_rpmhpds, + .num_pds = ARRAY_SIZE(sm8450_rpmhpds), +}; + /* SC7180 RPMH powerdomains */ static struct rpmhpd *sc7180_rpmhpds[] = { - [SC7180_CX] = &sdm845_cx, - [SC7180_CX_AO] = &sdm845_cx_ao, - [SC7180_GFX] = &sdm845_gfx, - [SC7180_MX] = &sdm845_mx, - [SC7180_MX_AO] = &sdm845_mx_ao, - [SC7180_LMX] = &sdm845_lmx, - [SC7180_LCX] = &sdm845_lcx, - [SC7180_MSS] = &sdm845_mss, + [SC7180_CX] = &cx_w_mx_parent, + [SC7180_CX_AO] = &cx_ao_w_mx_parent, + [SC7180_GFX] = &gfx, + [SC7180_LCX] = &lcx, + [SC7180_LMX] = &lmx, + [SC7180_MSS] = &mss, + [SC7180_MX] = &mx, + [SC7180_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sc7180_desc = { @@ -272,15 +327,15 @@ static const struct rpmhpd_desc sc7180_desc = { /* SC7280 RPMH powerdomains */ static struct rpmhpd *sc7280_rpmhpds[] = { - [SC7280_CX] = &sdm845_cx, - [SC7280_CX_AO] = &sdm845_cx_ao, - [SC7280_EBI] = &sdm845_ebi, - [SC7280_GFX] = &sdm845_gfx, - [SC7280_MX] = &sdm845_mx, - [SC7280_MX_AO] = &sdm845_mx_ao, - [SC7280_LMX] = &sdm845_lmx, - [SC7280_LCX] = &sdm845_lcx, - [SC7280_MSS] = &sdm845_mss, + [SC7280_CX] = &cx, + [SC7280_CX_AO] = &cx_ao, + [SC7280_EBI] = &ebi, + [SC7280_GFX] = &gfx, + [SC7280_LCX] = &lcx, + [SC7280_LMX] = &lmx, + [SC7280_MSS] = &mss, + [SC7280_MX] = &mx, + [SC7280_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sc7280_desc = { @@ -290,17 +345,17 @@ static const struct rpmhpd_desc sc7280_desc = { /* SC8180x RPMH powerdomains */ static struct rpmhpd *sc8180x_rpmhpds[] = { - [SC8180X_CX] = &sdm845_cx, - [SC8180X_CX_AO] = &sdm845_cx_ao, - [SC8180X_EBI] = &sdm845_ebi, - [SC8180X_GFX] = &sdm845_gfx, - [SC8180X_LCX] = &sdm845_lcx, - [SC8180X_LMX] = &sdm845_lmx, - [SC8180X_MMCX] = &sm8150_mmcx, - [SC8180X_MMCX_AO] = &sm8150_mmcx_ao, - [SC8180X_MSS] = &sdm845_mss, - [SC8180X_MX] = &sdm845_mx, - [SC8180X_MX_AO] = &sdm845_mx_ao, + [SC8180X_CX] = &cx_w_mx_parent, + [SC8180X_CX_AO] = &cx_ao_w_mx_parent, + [SC8180X_EBI] = &ebi, + [SC8180X_GFX] = &gfx, + [SC8180X_LCX] = &lcx, + [SC8180X_LMX] = &lmx, + [SC8180X_MMCX] = &mmcx, + [SC8180X_MMCX_AO] = &mmcx_ao, + [SC8180X_MSS] = &mss, + [SC8180X_MX] = &mx, + [SC8180X_MX_AO] = &mx_ao, }; static const struct rpmhpd_desc sc8180x_desc = { @@ -318,6 +373,7 @@ static const struct of_device_id rpmhpd_match_table[] = { { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, { .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc }, + { .compatible = "qcom,sm8450-rpmhpd", .data = &sm8450_desc }, { } }; MODULE_DEVICE_TABLE(of, rpmhpd_match_table); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 4f69fb9b2e0e..0a8d8d24bfb7 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -102,7 +102,6 @@ struct rpmpd { const bool active_only; unsigned int corner; bool enabled; - const char *res_name; const int res_type; const int res_id; struct qcom_smd_rpm *rpm; @@ -396,6 +395,45 @@ static const struct rpmpd_desc sm6115_desc = { .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, }; +/* sm6125 RPM Power domains */ +DEFINE_RPMPD_PAIR(sm6125, vddcx, vddcx_ao, RWCX, LEVEL, 0); +DEFINE_RPMPD_VFL(sm6125, vddcx_vfl, RWCX, 0); + +DEFINE_RPMPD_PAIR(sm6125, vddmx, vddmx_ao, RWMX, LEVEL, 0); +DEFINE_RPMPD_VFL(sm6125, vddmx_vfl, RWMX, 0); + +static struct rpmpd *sm6125_rpmpds[] = { + [SM6125_VDDCX] = &sm6125_vddcx, + [SM6125_VDDCX_AO] = &sm6125_vddcx_ao, + [SM6125_VDDCX_VFL] = &sm6125_vddcx_vfl, + [SM6125_VDDMX] = &sm6125_vddmx, + [SM6125_VDDMX_AO] = &sm6125_vddmx_ao, + [SM6125_VDDMX_VFL] = &sm6125_vddmx_vfl, +}; + +static const struct rpmpd_desc sm6125_desc = { + .rpmpds = sm6125_rpmpds, + .num_pds = ARRAY_SIZE(sm6125_rpmpds), + .max_state = RPM_SMD_LEVEL_BINNING, +}; + +static struct rpmpd *qcm2290_rpmpds[] = { + [QCM2290_VDDCX] = &sm6115_vddcx, + [QCM2290_VDDCX_AO] = &sm6115_vddcx_ao, + [QCM2290_VDDCX_VFL] = &sm6115_vddcx_vfl, + [QCM2290_VDDMX] = &sm6115_vddmx, + [QCM2290_VDDMX_AO] = &sm6115_vddmx_ao, + [QCM2290_VDDMX_VFL] = &sm6115_vddmx_vfl, + [QCM2290_VDD_LPI_CX] = &sm6115_vdd_lpi_cx, + [QCM2290_VDD_LPI_MX] = &sm6115_vdd_lpi_mx, +}; + +static const struct rpmpd_desc qcm2290_desc = { + .rpmpds = qcm2290_rpmpds, + .num_pds = ARRAY_SIZE(qcm2290_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, +}; + static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, @@ -405,9 +443,11 @@ static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc }, { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, + { .compatible = "qcom,qcm2290-rpmpd", .data = &qcm2290_desc }, { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, + { .compatible = "qcom,sm6125-rpmpd", .data = &sm6125_desc }, { } }; MODULE_DEVICE_TABLE(of, rpmpd_match_table); diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index c7e519bfdc8a..e2057d8f1eff 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -85,7 +85,7 @@ #define SMEM_GLOBAL_HOST 0xfffe /* Max number of processors/hosts in a system */ -#define SMEM_HOST_COUNT 14 +#define SMEM_HOST_COUNT 15 /** * struct smem_proc_comm - proc_comm communication struct (legacy) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 9a0eb59405e8..6dc0f39c0ec3 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -313,8 +313,11 @@ static const struct soc_id soc_id[] = { { 421, "IPQ6000" }, { 422, "IPQ6010" }, { 425, "SC7180" }, + { 434, "SM6350" }, { 453, "IPQ6005" }, { 455, "QRB5165" }, + { 457, "SM8450" }, + { 459, "SM7225" }, }; static const char *socinfo_machine(struct device *dev, unsigned int id) diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index ce16ef5c939c..2cbd03db2cc7 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -235,6 +235,13 @@ config ARCH_R8A77961 This enables support for the Renesas R-Car M3-W+ SoC. This includes different gradings like R-Car M3e and M3e-2G. +config ARCH_R8A779F0 + bool "ARM64 Platform support for R-Car S4-8" + select ARCH_RCAR_GEN3 + select SYSC_R8A779F0 + help + This enables support for the Renesas R-Car S4-8 SoC. + config ARCH_R8A77980 bool "ARM64 Platform support for R-Car V3H" select ARCH_RCAR_GEN3 @@ -297,6 +304,9 @@ config RST_RCAR config SYSC_RCAR bool "System Controller support for R-Car" if COMPILE_TEST +config SYSC_RCAR_GEN4 + bool "System Controller support for R-Car Gen4" if COMPILE_TEST + config SYSC_R8A77995 bool "System Controller support for R-Car D3" if COMPILE_TEST select SYSC_RCAR @@ -337,6 +347,10 @@ config SYSC_R8A77961 bool "System Controller support for R-Car M3-W+" if COMPILE_TEST select SYSC_RCAR +config SYSC_R8A779F0 + bool "System Controller support for R-Car S4-8" if COMPILE_TEST + select SYSC_RCAR_GEN4 + config SYSC_R8A7792 bool "System Controller support for R-Car V2H" if COMPILE_TEST select SYSC_RCAR @@ -351,6 +365,7 @@ config SYSC_R8A77970 config SYSC_R8A779A0 bool "System Controller support for R-Car V3U" if COMPILE_TEST + select SYSC_RCAR_GEN4 config SYSC_RMOBILE bool "System Controller support for R-Mobile" if COMPILE_TEST diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile index 9b29bed2a597..deeb41f84f01 100644 --- a/drivers/soc/renesas/Makefile +++ b/drivers/soc/renesas/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o +obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o ifdef CONFIG_SMP obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o endif @@ -32,4 +33,5 @@ endif # Family obj-$(CONFIG_RST_RCAR) += rcar-rst.o obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o +obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index 7410b9fa9846..fdfc857df334 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -21,35 +21,9 @@ #include <dt-bindings/power/r8a779a0-sysc.h> -/* - * Power Domain flags - */ -#define PD_CPU BIT(0) /* Area contains main CPU core */ -#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */ -#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */ - -#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */ -#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */ - -/* - * Description of a Power Area - */ -struct r8a779a0_sysc_area { - const char *name; - u8 pdr; /* PDRn */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ -}; - -/* - * SoC-specific Power Area Description - */ -struct r8a779a0_sysc_info { - const struct r8a779a0_sysc_area *areas; - unsigned int num_areas; -}; +#include "rcar-gen4-sysc.h" -static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = { +static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = { { "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, { "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU }, { "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU }, @@ -96,355 +70,7 @@ static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = { { "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 }, }; -static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = { +const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = { .areas = r8a779a0_areas, .num_areas = ARRAY_SIZE(r8a779a0_areas), }; - -/* SYSC Common */ -#define SYSCSR 0x000 /* SYSC Status Register */ -#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */ -#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */ -#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */ -#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */ -#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */ - -/* Power Domain Registers */ -#define PDRSR(n) (0x1000 + ((n) * 0x40)) -#define PDRONCR(n) (0x1004 + ((n) * 0x40)) -#define PDROFFCR(n) (0x1008 + ((n) * 0x40)) -#define PDRESR(n) (0x100C + ((n) * 0x40)) - -/* PWRON/PWROFF */ -#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */ - -/* PDRESR */ -#define PDRESR_ERR BIT(0) - -/* PDRSR */ -#define PDRSR_OFF BIT(0) /* Power-OFF state */ -#define PDRSR_ON BIT(4) /* Power-ON state */ -#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */ -#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */ - -#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */ - -#define SYSCSR_TIMEOUT 10000 -#define SYSCSR_DELAY_US 10 - -#define PDRESR_RETRIES 1000 -#define PDRESR_DELAY_US 10 - -#define SYSCISR_TIMEOUT 10000 -#define SYSCISR_DELAY_US 10 - -#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32) - -static void __iomem *r8a779a0_sysc_base; -static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */ - -static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on) -{ - unsigned int reg_offs; - u32 val; - int ret; - - if (on) - reg_offs = PDRONCR(pdr); - else - reg_offs = PDROFFCR(pdr); - - /* Wait until SYSC is ready to accept a power request */ - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val, - (val & SYSCSR_BUSY) == SYSCSR_BUSY, - SYSCSR_DELAY_US, SYSCSR_TIMEOUT); - if (ret < 0) - return -EAGAIN; - - /* Submit power shutoff or power resume request */ - iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs); - - return 0; -} - -static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask) -{ - u32 val; - int ret; - - iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx)); - - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx), - val, !(val & isr_mask), - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); - if (ret < 0) { - pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__); - return -EIO; - } - - return 0; -} - -static int r8a779a0_sysc_power(u8 pdr, bool on) -{ - unsigned int isr_mask; - unsigned int reg_idx, bit_idx; - unsigned int status; - unsigned long flags; - int ret = 0; - u32 val; - int k; - - spin_lock_irqsave(&r8a779a0_sysc_lock, flags); - - reg_idx = pdr / NUM_DOMAINS_EACH_REG; - bit_idx = pdr % NUM_DOMAINS_EACH_REG; - - isr_mask = BIT(bit_idx); - - /* - * The interrupt source needs to be enabled, but masked, to prevent the - * CPU from receiving it. - */ - iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask, - r8a779a0_sysc_base + SYSCIER(reg_idx)); - iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask, - r8a779a0_sysc_base + SYSCIMR(reg_idx)); - - ret = clear_irq_flags(reg_idx, isr_mask); - if (ret) - goto out; - - /* Submit power shutoff or resume request until it was accepted */ - for (k = 0; k < PDRESR_RETRIES; k++) { - ret = r8a779a0_sysc_pwr_on_off(pdr, on); - if (ret) - goto out; - - status = ioread32(r8a779a0_sysc_base + PDRESR(pdr)); - if (!(status & PDRESR_ERR)) - break; - - udelay(PDRESR_DELAY_US); - } - - if (k == PDRESR_RETRIES) { - ret = -EIO; - goto out; - } - - /* Wait until the power shutoff or resume request has completed * */ - ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx), - val, (val & isr_mask), - SYSCISR_DELAY_US, SYSCISR_TIMEOUT); - if (ret < 0) { - ret = -EIO; - goto out; - } - - /* Clear interrupt flags */ - ret = clear_irq_flags(reg_idx, isr_mask); - if (ret) - goto out; - - out: - spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags); - - pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off", - pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret); - return ret; -} - -static bool r8a779a0_sysc_power_is_off(u8 pdr) -{ - unsigned int st; - - st = ioread32(r8a779a0_sysc_base + PDRSR(pdr)); - - if (st & PDRSR_OFF) - return true; - - return false; -} - -struct r8a779a0_sysc_pd { - struct generic_pm_domain genpd; - u8 pdr; - unsigned int flags; - char name[]; -}; - -static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d) -{ - return container_of(d, struct r8a779a0_sysc_pd, genpd); -} - -static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd) -{ - struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd); - - pr_debug("%s: %s\n", __func__, genpd->name); - return r8a779a0_sysc_power(pd->pdr, false); -} - -static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd) -{ - struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd); - - pr_debug("%s: %s\n", __func__, genpd->name); - return r8a779a0_sysc_power(pd->pdr, true); -} - -static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd) -{ - struct generic_pm_domain *genpd = &pd->genpd; - const char *name = pd->genpd.name; - int error; - - if (pd->flags & PD_CPU) { - /* - * This domain contains a CPU core and therefore it should - * only be turned off if the CPU is not in use. - */ - pr_debug("PM domain %s contains %s\n", name, "CPU"); - genpd->flags |= GENPD_FLAG_ALWAYS_ON; - } else if (pd->flags & PD_SCU) { - /* - * This domain contains an SCU and cache-controller, and - * therefore it should only be turned off if the CPU cores are - * not in use. - */ - pr_debug("PM domain %s contains %s\n", name, "SCU"); - genpd->flags |= GENPD_FLAG_ALWAYS_ON; - } else if (pd->flags & PD_NO_CR) { - /* - * This domain cannot be turned off. - */ - genpd->flags |= GENPD_FLAG_ALWAYS_ON; - } - - if (!(pd->flags & (PD_CPU | PD_SCU))) { - /* Enable Clock Domain for I/O devices */ - genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; - genpd->attach_dev = cpg_mssr_attach_dev; - genpd->detach_dev = cpg_mssr_detach_dev; - } - - genpd->power_off = r8a779a0_sysc_pd_power_off; - genpd->power_on = r8a779a0_sysc_pd_power_on; - - if (pd->flags & (PD_CPU | PD_NO_CR)) { - /* Skip CPUs (handled by SMP code) and areas without control */ - pr_debug("%s: Not touching %s\n", __func__, genpd->name); - goto finalize; - } - - if (!r8a779a0_sysc_power_is_off(pd->pdr)) { - pr_debug("%s: %s is already powered\n", __func__, genpd->name); - goto finalize; - } - - r8a779a0_sysc_power(pd->pdr, true); - -finalize: - error = pm_genpd_init(genpd, &simple_qos_governor, false); - if (error) - pr_err("Failed to init PM domain %s: %d\n", name, error); - - return error; -} - -static const struct of_device_id r8a779a0_sysc_matches[] __initconst = { - { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info }, - { /* sentinel */ } -}; - -struct r8a779a0_pm_domains { - struct genpd_onecell_data onecell_data; - struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1]; -}; - -static struct genpd_onecell_data *r8a779a0_sysc_onecell_data; - -static int __init r8a779a0_sysc_pd_init(void) -{ - const struct r8a779a0_sysc_info *info; - const struct of_device_id *match; - struct r8a779a0_pm_domains *domains; - struct device_node *np; - void __iomem *base; - unsigned int i; - int error; - - np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match); - if (!np) - return -ENODEV; - - info = match->data; - - base = of_iomap(np, 0); - if (!base) { - pr_warn("%pOF: Cannot map regs\n", np); - error = -ENOMEM; - goto out_put; - } - - r8a779a0_sysc_base = base; - - domains = kzalloc(sizeof(*domains), GFP_KERNEL); - if (!domains) { - error = -ENOMEM; - goto out_put; - } - - domains->onecell_data.domains = domains->domains; - domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains); - r8a779a0_sysc_onecell_data = &domains->onecell_data; - - for (i = 0; i < info->num_areas; i++) { - const struct r8a779a0_sysc_area *area = &info->areas[i]; - struct r8a779a0_sysc_pd *pd; - size_t n; - - if (!area->name) { - /* Skip NULLified area */ - continue; - } - - n = strlen(area->name) + 1; - pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); - if (!pd) { - error = -ENOMEM; - goto out_put; - } - - memcpy(pd->name, area->name, n); - pd->genpd.name = pd->name; - pd->pdr = area->pdr; - pd->flags = area->flags; - - error = r8a779a0_sysc_pd_setup(pd); - if (error) - goto out_put; - - domains->domains[area->pdr] = &pd->genpd; - - if (area->parent < 0) - continue; - - error = pm_genpd_add_subdomain(domains->domains[area->parent], - &pd->genpd); - if (error) { - pr_warn("Failed to add PM subdomain %s to parent %u\n", - area->name, area->parent); - goto out_put; - } - } - - error = of_genpd_add_provider_onecell(np, &domains->onecell_data); - -out_put: - of_node_put(np); - return error; -} -early_initcall(r8a779a0_sysc_pd_init); diff --git a/drivers/soc/renesas/r8a779f0-sysc.c b/drivers/soc/renesas/r8a779f0-sysc.c new file mode 100644 index 000000000000..5602aa6bd7ed --- /dev/null +++ b/drivers/soc/renesas/r8a779f0-sysc.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas R-Car S4-8 System Controller + * + * Copyright (C) 2021 Renesas Electronics Corp. + */ + +#include <linux/bits.h> +#include <linux/clk/renesas.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include <dt-bindings/power/r8a779f0-sysc.h> + +#include "rcar-gen4-sysc.h" + +static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = { + { "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, + { "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU }, + { "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU }, + { "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU }, + { "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU }, + { "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU }, + { "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU }, + { "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR }, + { "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR }, + { "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR }, + { "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR }, + { "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR }, + { "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR }, + { "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR }, + { "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR }, +}; + +const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = { + .areas = r8a779f0_areas, + .num_areas = ARRAY_SIZE(r8a779f0_areas), +}; diff --git a/drivers/soc/renesas/rcar-gen4-sysc.c b/drivers/soc/renesas/rcar-gen4-sysc.c new file mode 100644 index 000000000000..831162a57f9a --- /dev/null +++ b/drivers/soc/renesas/rcar-gen4-sysc.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * R-Car Gen4 SYSC Power management support + * + * Copyright (C) 2021 Renesas Electronics Corp. + */ + +#include <linux/bits.h> +#include <linux/clk/renesas.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/of_address.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "rcar-gen4-sysc.h" + +/* SYSC Common */ +#define SYSCSR 0x000 /* SYSC Status Register */ +#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */ +#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */ +#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */ +#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */ +#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */ + +/* Power Domain Registers */ +#define PDRSR(n) (0x1000 + ((n) * 0x40)) +#define PDRONCR(n) (0x1004 + ((n) * 0x40)) +#define PDROFFCR(n) (0x1008 + ((n) * 0x40)) +#define PDRESR(n) (0x100C + ((n) * 0x40)) + +/* PWRON/PWROFF */ +#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */ + +/* PDRESR */ +#define PDRESR_ERR BIT(0) + +/* PDRSR */ +#define PDRSR_OFF BIT(0) /* Power-OFF state */ +#define PDRSR_ON BIT(4) /* Power-ON state */ +#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */ +#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */ + +#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */ + +#define SYSCSR_TIMEOUT 10000 +#define SYSCSR_DELAY_US 10 + +#define PDRESR_RETRIES 1000 +#define PDRESR_DELAY_US 10 + +#define SYSCISR_TIMEOUT 10000 +#define SYSCISR_DELAY_US 10 + +#define RCAR_GEN4_PD_ALWAYS_ON 64 +#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32) + +static void __iomem *rcar_gen4_sysc_base; +static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */ + +static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on) +{ + unsigned int reg_offs; + u32 val; + int ret; + + if (on) + reg_offs = PDRONCR(pdr); + else + reg_offs = PDROFFCR(pdr); + + /* Wait until SYSC is ready to accept a power request */ + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val, + (val & SYSCSR_BUSY) == SYSCSR_BUSY, + SYSCSR_DELAY_US, SYSCSR_TIMEOUT); + if (ret < 0) + return -EAGAIN; + + /* Submit power shutoff or power resume request */ + iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs); + + return 0; +} + +static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask) +{ + u32 val; + int ret; + + iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx)); + + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), + val, !(val & isr_mask), + SYSCISR_DELAY_US, SYSCISR_TIMEOUT); + if (ret < 0) { + pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__); + return -EIO; + } + + return 0; +} + +static int rcar_gen4_sysc_power(u8 pdr, bool on) +{ + unsigned int isr_mask; + unsigned int reg_idx, bit_idx; + unsigned int status; + unsigned long flags; + int ret = 0; + u32 val; + int k; + + spin_lock_irqsave(&rcar_gen4_sysc_lock, flags); + + reg_idx = pdr / NUM_DOMAINS_EACH_REG; + bit_idx = pdr % NUM_DOMAINS_EACH_REG; + + isr_mask = BIT(bit_idx); + + /* + * The interrupt source needs to be enabled, but masked, to prevent the + * CPU from receiving it. + */ + iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask, + rcar_gen4_sysc_base + SYSCIER(reg_idx)); + iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask, + rcar_gen4_sysc_base + SYSCIMR(reg_idx)); + + ret = clear_irq_flags(reg_idx, isr_mask); + if (ret) + goto out; + + /* Submit power shutoff or resume request until it was accepted */ + for (k = 0; k < PDRESR_RETRIES; k++) { + ret = rcar_gen4_sysc_pwr_on_off(pdr, on); + if (ret) + goto out; + + status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr)); + if (!(status & PDRESR_ERR)) + break; + + udelay(PDRESR_DELAY_US); + } + + if (k == PDRESR_RETRIES) { + ret = -EIO; + goto out; + } + + /* Wait until the power shutoff or resume request has completed * */ + ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx), + val, (val & isr_mask), + SYSCISR_DELAY_US, SYSCISR_TIMEOUT); + if (ret < 0) { + ret = -EIO; + goto out; + } + + /* Clear interrupt flags */ + ret = clear_irq_flags(reg_idx, isr_mask); + if (ret) + goto out; + + out: + spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags); + + pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off", + pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret); + return ret; +} + +static bool rcar_gen4_sysc_power_is_off(u8 pdr) +{ + unsigned int st; + + st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr)); + + if (st & PDRSR_OFF) + return true; + + return false; +} + +struct rcar_gen4_sysc_pd { + struct generic_pm_domain genpd; + u8 pdr; + unsigned int flags; + char name[]; +}; + +static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct rcar_gen4_sysc_pd, genpd); +} + +static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd) +{ + struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd); + + pr_debug("%s: %s\n", __func__, genpd->name); + return rcar_gen4_sysc_power(pd->pdr, false); +} + +static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd) +{ + struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd); + + pr_debug("%s: %s\n", __func__, genpd->name); + return rcar_gen4_sysc_power(pd->pdr, true); +} + +static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd) +{ + struct generic_pm_domain *genpd = &pd->genpd; + const char *name = pd->genpd.name; + int error; + + if (pd->flags & PD_CPU) { + /* + * This domain contains a CPU core and therefore it should + * only be turned off if the CPU is not in use. + */ + pr_debug("PM domain %s contains %s\n", name, "CPU"); + genpd->flags |= GENPD_FLAG_ALWAYS_ON; + } else if (pd->flags & PD_SCU) { + /* + * This domain contains an SCU and cache-controller, and + * therefore it should only be turned off if the CPU cores are + * not in use. + */ + pr_debug("PM domain %s contains %s\n", name, "SCU"); + genpd->flags |= GENPD_FLAG_ALWAYS_ON; + } else if (pd->flags & PD_NO_CR) { + /* + * This domain cannot be turned off. + */ + genpd->flags |= GENPD_FLAG_ALWAYS_ON; + } + + if (!(pd->flags & (PD_CPU | PD_SCU))) { + /* Enable Clock Domain for I/O devices */ + genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP; + genpd->attach_dev = cpg_mssr_attach_dev; + genpd->detach_dev = cpg_mssr_detach_dev; + } + + genpd->power_off = rcar_gen4_sysc_pd_power_off; + genpd->power_on = rcar_gen4_sysc_pd_power_on; + + if (pd->flags & (PD_CPU | PD_NO_CR)) { + /* Skip CPUs (handled by SMP code) and areas without control */ + pr_debug("%s: Not touching %s\n", __func__, genpd->name); + goto finalize; + } + + if (!rcar_gen4_sysc_power_is_off(pd->pdr)) { + pr_debug("%s: %s is already powered\n", __func__, genpd->name); + goto finalize; + } + + rcar_gen4_sysc_power(pd->pdr, true); + +finalize: + error = pm_genpd_init(genpd, &simple_qos_governor, false); + if (error) + pr_err("Failed to init PM domain %s: %d\n", name, error); + + return error; +} + +static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = { +#ifdef CONFIG_SYSC_R8A779A0 + { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info }, +#endif +#ifdef CONFIG_SYSC_R8A779F0 + { .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info }, +#endif + { /* sentinel */ } +}; + +struct rcar_gen4_pm_domains { + struct genpd_onecell_data onecell_data; + struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1]; +}; + +static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data; + +static int __init rcar_gen4_sysc_pd_init(void) +{ + const struct rcar_gen4_sysc_info *info; + const struct of_device_id *match; + struct rcar_gen4_pm_domains *domains; + struct device_node *np; + void __iomem *base; + unsigned int i; + int error; + + np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match); + if (!np) + return -ENODEV; + + info = match->data; + + base = of_iomap(np, 0); + if (!base) { + pr_warn("%pOF: Cannot map regs\n", np); + error = -ENOMEM; + goto out_put; + } + + rcar_gen4_sysc_base = base; + + domains = kzalloc(sizeof(*domains), GFP_KERNEL); + if (!domains) { + error = -ENOMEM; + goto out_put; + } + + domains->onecell_data.domains = domains->domains; + domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains); + rcar_gen4_sysc_onecell_data = &domains->onecell_data; + + for (i = 0; i < info->num_areas; i++) { + const struct rcar_gen4_sysc_area *area = &info->areas[i]; + struct rcar_gen4_sysc_pd *pd; + size_t n; + + if (!area->name) { + /* Skip NULLified area */ + continue; + } + + n = strlen(area->name) + 1; + pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); + if (!pd) { + error = -ENOMEM; + goto out_put; + } + + memcpy(pd->name, area->name, n); + pd->genpd.name = pd->name; + pd->pdr = area->pdr; + pd->flags = area->flags; + + error = rcar_gen4_sysc_pd_setup(pd); + if (error) + goto out_put; + + domains->domains[area->pdr] = &pd->genpd; + + if (area->parent < 0) + continue; + + error = pm_genpd_add_subdomain(domains->domains[area->parent], + &pd->genpd); + if (error) { + pr_warn("Failed to add PM subdomain %s to parent %u\n", + area->name, area->parent); + goto out_put; + } + } + + error = of_genpd_add_provider_onecell(np, &domains->onecell_data); + +out_put: + of_node_put(np); + return error; +} +early_initcall(rcar_gen4_sysc_pd_init); diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h new file mode 100644 index 000000000000..0e0bd102b1f9 --- /dev/null +++ b/drivers/soc/renesas/rcar-gen4-sysc.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * R-Car Gen4 System Controller + * + * Copyright (C) 2021 Renesas Electronics Corp. + */ +#ifndef __SOC_RENESAS_RCAR_GEN4_SYSC_H__ +#define __SOC_RENESAS_RCAR_GEN4_SYSC_H__ + +#include <linux/types.h> + +/* + * Power Domain flags + */ +#define PD_CPU BIT(0) /* Area contains main CPU core */ +#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */ +#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */ + +#define PD_CPU_NOCR (PD_CPU | PD_NO_CR) /* CPU area lacks CR */ +#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */ + +/* + * Description of a Power Area + */ +struct rcar_gen4_sysc_area { + const char *name; + u8 pdr; /* PDRn */ + int parent; /* -1 if none */ + unsigned int flags; /* See PD_* */ +}; + +/* + * SoC-specific Power Area Description + */ +struct rcar_gen4_sysc_info { + const struct rcar_gen4_sysc_area *areas; + unsigned int num_areas; +}; + +extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info; +extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info; + +#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */ diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c index 8a1e402ea799..4d293eb2d8f3 100644 --- a/drivers/soc/renesas/rcar-rst.c +++ b/drivers/soc/renesas/rcar-rst.c @@ -13,15 +13,43 @@ #define WDTRSTCR_RESET 0xA55A0002 #define WDTRSTCR 0x0054 +#define CR7BAR 0x0070 +#define CR7BAREN BIT(4) +#define CR7BAR_MASK 0xFFFC0000 + +static void __iomem *rcar_rst_base; +static u32 saved_mode __initdata; +static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr); + static int rcar_rst_enable_wdt_reset(void __iomem *base) { iowrite32(WDTRSTCR_RESET, base + WDTRSTCR); return 0; } +/* + * Most of the R-Car Gen3 SoCs have an ARM Realtime Core. + * Firmware boot address has to be set in CR7BAR before + * starting the realtime core. + * Boot address must be aligned on a 256k boundary. + */ +static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr) +{ + if (boot_addr & ~(u64)CR7BAR_MASK) { + pr_err("Invalid boot address got %llx\n", boot_addr); + return -EINVAL; + } + + iowrite32(boot_addr, rcar_rst_base + CR7BAR); + iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR); + + return 0; +} + struct rst_config { unsigned int modemr; /* Mode Monitoring Register Offset */ int (*configure)(void __iomem *base); /* Platform specific config */ + int (*set_rproc_boot_addr)(u64 boot_addr); }; static const struct rst_config rcar_rst_gen1 __initconst = { @@ -35,9 +63,10 @@ static const struct rst_config rcar_rst_gen2 __initconst = { static const struct rst_config rcar_rst_gen3 __initconst = { .modemr = 0x60, + .set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr, }; -static const struct rst_config rcar_rst_r8a779a0 __initconst = { +static const struct rst_config rcar_rst_gen4 __initconst = { .modemr = 0x00, /* MODEMR0 and it has CPG related bits */ }; @@ -71,14 +100,12 @@ static const struct of_device_id rcar_rst_matches[] __initconst = { { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 }, { .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 }, { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 }, - /* R-Car V3U */ - { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 }, + /* R-Car Gen4 */ + { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 }, + { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 }, { /* sentinel */ } }; -static void __iomem *rcar_rst_base __initdata; -static u32 saved_mode __initdata; - static int __init rcar_rst_init(void) { const struct of_device_id *match; @@ -100,6 +127,8 @@ static int __init rcar_rst_init(void) rcar_rst_base = base; cfg = match->data; + rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr; + saved_mode = ioread32(base + cfg->modemr); if (cfg->configure) { error = cfg->configure(base); @@ -130,3 +159,12 @@ int __init rcar_rst_read_mode_pins(u32 *mode) *mode = saved_mode; return 0; } + +int rcar_rst_set_rproc_boot_addr(u64 boot_addr) +{ + if (!rcar_rst_set_rproc_boot_addr_func) + return -EIO; + + return rcar_rst_set_rproc_boot_addr_func(boot_addr); +} +EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr); diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 7961b0be1850..62540ffc581a 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -33,6 +33,10 @@ static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = { .reg = 0xfff00044, /* PRR (Product Register) */ }; +static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = { + .name = "R-Car Gen4", +}; + static const struct renesas_family fam_rmobile __initconst __maybe_unused = { .name = "R-Mobile", .reg = 0xe600101c, /* CCCR (Common Chip Code Register) */ @@ -214,6 +218,11 @@ static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = { .id = 0x59, }; +static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = { + .family = &fam_rcar_gen4, + .id = 0x5a, +}; + static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = { .family = &fam_shmobile, .id = 0x37, @@ -319,6 +328,9 @@ static const struct of_device_id renesas_socs[] __initconst = { #ifdef CONFIG_ARCH_R8A779A0 { .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u }, #endif +#ifdef CONFIG_ARCH_R8A779F0 + { .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 }, +#endif #if defined(CONFIG_ARCH_R9A07G044) { .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l }, #endif @@ -328,94 +340,92 @@ static const struct of_device_id renesas_socs[] __initconst = { { /* sentinel */ } }; +struct renesas_id { + unsigned int offset; + u32 mask; +}; + +static const struct renesas_id id_bsid __initconst = { + .offset = 0, + .mask = 0xff0000, + /* + * TODO: Upper 4 bits of BSID are for chip version, but the format is + * not known at this time so we don't know how to specify eshi and eslo + */ +}; + +static const struct renesas_id id_rzg2l __initconst = { + .offset = 0xa04, + .mask = 0xfffffff, +}; + +static const struct renesas_id id_prr __initconst = { + .offset = 0, + .mask = 0xff00, +}; + +static const struct of_device_id renesas_ids[] __initconst = { + { .compatible = "renesas,bsid", .data = &id_bsid }, + { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l }, + { .compatible = "renesas,prr", .data = &id_prr }, + { /* sentinel */ } +}; + static int __init renesas_soc_init(void) { struct soc_device_attribute *soc_dev_attr; + unsigned int product, eshi = 0, eslo; const struct renesas_family *family; const struct of_device_id *match; const struct renesas_soc *soc; + const struct renesas_id *id; void __iomem *chipid = NULL; struct soc_device *soc_dev; struct device_node *np; - unsigned int product, eshi = 0, eslo; + const char *soc_id; match = of_match_node(renesas_socs, of_root); if (!match) return -ENODEV; + soc_id = strchr(match->compatible, ',') + 1; soc = match->data; family = soc->family; - np = of_find_compatible_node(NULL, NULL, "renesas,bsid"); + np = of_find_matching_node_and_match(NULL, renesas_ids, &match); if (np) { + id = match->data; chipid = of_iomap(np, 0); of_node_put(np); - - if (chipid) { - product = readl(chipid); - iounmap(chipid); - - if (soc->id && ((product >> 16) & 0xff) != soc->id) { - pr_warn("SoC mismatch (product = 0x%x)\n", - product); - return -ENODEV; - } - } - - /* - * TODO: Upper 4 bits of BSID are for chip version, but the - * format is not known at this time so we don't know how to - * specify eshi and eslo - */ - - goto done; + } else if (soc->id && family->reg) { + /* Try hardcoded CCCR/PRR fallback */ + id = &id_prr; + chipid = ioremap(family->reg, 4); } - np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc"); - if (np) { - chipid = of_iomap(np, 0); - of_node_put(np); + if (chipid) { + product = readl(chipid + id->offset); + iounmap(chipid); - if (chipid) { - product = readl(chipid + 0x0a04); - iounmap(chipid); + if (id == &id_prr) { + /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ + if ((product & 0x7fff) == 0x5210) + product ^= 0x11; + /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ + if ((product & 0x7fff) == 0x5211) + product ^= 0x12; - if (soc->id && (product & 0xfffffff) != soc->id) { - pr_warn("SoC mismatch (product = 0x%x)\n", - product); - return -ENODEV; - } + eshi = ((product >> 4) & 0x0f) + 1; + eslo = product & 0xf; } - goto done; - } - - /* Try PRR first, then hardcoded fallback */ - np = of_find_compatible_node(NULL, NULL, "renesas,prr"); - if (np) { - chipid = of_iomap(np, 0); - of_node_put(np); - } else if (soc->id && family->reg) { - chipid = ioremap(family->reg, 4); - } - if (chipid) { - product = readl(chipid); - iounmap(chipid); - /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */ - if ((product & 0x7fff) == 0x5210) - product ^= 0x11; - /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */ - if ((product & 0x7fff) == 0x5211) - product ^= 0x12; - if (soc->id && ((product >> 8) & 0xff) != soc->id) { + if (soc->id && + ((product & id->mask) >> __ffs(id->mask)) != soc->id) { pr_warn("SoC mismatch (product = 0x%x)\n", product); return -ENODEV; } - eshi = ((product >> 4) & 0x0f) + 1; - eslo = product & 0xf; } -done: soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; @@ -425,8 +435,7 @@ done: of_node_put(np); soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); - soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1, - GFP_KERNEL); + soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL); if (eshi) soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi, eslo); diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index e2cedef1e8d1..a9f8b224322e 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -23,6 +23,20 @@ config EXYNOS_CHIPID Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage. This driver can also be built as module (exynos_chipid). +config EXYNOS_USI + tristate "Exynos USI (Universal Serial Interface) driver" + default ARCH_EXYNOS && ARM64 + depends on ARCH_EXYNOS || COMPILE_TEST + select MFD_SYSCON + help + Enable support for USI block. USI (Universal Serial Interface) is an + IP-core found in modern Samsung Exynos SoCs, like Exynos850 and + ExynosAutoV0. USI block can be configured to provide one of the + following serial protocols: UART, SPI or High Speed I2C. + + This driver allows one to configure USI for desired protocol, which + is usually done in USI node in Device Tree. + config EXYNOS_PMU bool "Exynos PMU controller driver" if COMPILE_TEST depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index 2ae4bea804cf..9f59d1905ab0 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile @@ -4,6 +4,8 @@ obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o exynos_chipid-y += exynos-chipid.o exynos-asv.o +obj-$(CONFIG_EXYNOS_USI) += exynos-usi.o + obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index a28053ec7e6a..2746d05936d3 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -42,6 +42,7 @@ static const struct exynos_soc_id { unsigned int id; } soc_ids[] = { /* List ordered by SoC name */ + /* Compatible with: samsung,exynos4210-chipid */ { "EXYNOS3250", 0xE3472000 }, { "EXYNOS4210", 0x43200000 }, /* EVT0 revision */ { "EXYNOS4210", 0x43210000 }, @@ -55,6 +56,8 @@ static const struct exynos_soc_id { { "EXYNOS5440", 0xE5440000 }, { "EXYNOS5800", 0xE5422000 }, { "EXYNOS7420", 0xE7420000 }, + /* Compatible with: samsung,exynos850-chipid */ + { "EXYNOS7885", 0xE7885000 }, { "EXYNOS850", 0xE3830000 }, { "EXYNOSAUTOV9", 0xAAA80000 }, }; diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index a18c93a4646c..732c86ce2be8 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -94,6 +94,8 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = { .compatible = "samsung,exynos5433-pmu", }, { .compatible = "samsung,exynos7-pmu", + }, { + .compatible = "samsung,exynos850-pmu", }, { /*sentinel*/ }, }; diff --git a/drivers/soc/samsung/exynos-usi.c b/drivers/soc/samsung/exynos-usi.c new file mode 100644 index 000000000000..114352695ac2 --- /dev/null +++ b/drivers/soc/samsung/exynos-usi.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021 Linaro Ltd. + * Author: Sam Protsenko <semen.protsenko@linaro.org> + * + * Samsung Exynos USI driver (Universal Serial Interface). + */ + +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/soc/samsung,exynos-usi.h> + +/* USIv2: System Register: SW_CONF register bits */ +#define USI_V2_SW_CONF_NONE 0x0 +#define USI_V2_SW_CONF_UART BIT(0) +#define USI_V2_SW_CONF_SPI BIT(1) +#define USI_V2_SW_CONF_I2C BIT(2) +#define USI_V2_SW_CONF_MASK (USI_V2_SW_CONF_UART | USI_V2_SW_CONF_SPI | \ + USI_V2_SW_CONF_I2C) + +/* USIv2: USI register offsets */ +#define USI_CON 0x04 +#define USI_OPTION 0x08 + +/* USIv2: USI register bits */ +#define USI_CON_RESET BIT(0) +#define USI_OPTION_CLKREQ_ON BIT(1) +#define USI_OPTION_CLKSTOP_ON BIT(2) + +enum exynos_usi_ver { + USI_VER2 = 2, +}; + +struct exynos_usi_variant { + enum exynos_usi_ver ver; /* USI IP-core version */ + unsigned int sw_conf_mask; /* SW_CONF mask for all protocols */ + size_t min_mode; /* first index in exynos_usi_modes[] */ + size_t max_mode; /* last index in exynos_usi_modes[] */ + size_t num_clks; /* number of clocks to assert */ + const char * const *clk_names; /* clock names to assert */ +}; + +struct exynos_usi { + struct device *dev; + void __iomem *regs; /* USI register map */ + struct clk_bulk_data *clks; /* USI clocks */ + + size_t mode; /* current USI SW_CONF mode index */ + bool clkreq_on; /* always provide clock to IP */ + + /* System Register */ + struct regmap *sysreg; /* System Register map */ + unsigned int sw_conf; /* SW_CONF register offset in sysreg */ + + const struct exynos_usi_variant *data; +}; + +struct exynos_usi_mode { + const char *name; /* mode name */ + unsigned int val; /* mode register value */ +}; + +static const struct exynos_usi_mode exynos_usi_modes[] = { + [USI_V2_NONE] = { .name = "none", .val = USI_V2_SW_CONF_NONE }, + [USI_V2_UART] = { .name = "uart", .val = USI_V2_SW_CONF_UART }, + [USI_V2_SPI] = { .name = "spi", .val = USI_V2_SW_CONF_SPI }, + [USI_V2_I2C] = { .name = "i2c", .val = USI_V2_SW_CONF_I2C }, +}; + +static const char * const exynos850_usi_clk_names[] = { "pclk", "ipclk" }; +static const struct exynos_usi_variant exynos850_usi_data = { + .ver = USI_VER2, + .sw_conf_mask = USI_V2_SW_CONF_MASK, + .min_mode = USI_V2_NONE, + .max_mode = USI_V2_I2C, + .num_clks = ARRAY_SIZE(exynos850_usi_clk_names), + .clk_names = exynos850_usi_clk_names, +}; + +static const struct of_device_id exynos_usi_dt_match[] = { + { + .compatible = "samsung,exynos850-usi", + .data = &exynos850_usi_data, + }, + { } /* sentinel */ +}; +MODULE_DEVICE_TABLE(of, exynos_usi_dt_match); + +/** + * exynos_usi_set_sw_conf - Set USI block configuration mode + * @usi: USI driver object + * @mode: Mode index + * + * Select underlying serial protocol (UART/SPI/I2C) in USI IP-core. + * + * Return: 0 on success, or negative error code on failure. + */ +static int exynos_usi_set_sw_conf(struct exynos_usi *usi, size_t mode) +{ + unsigned int val; + int ret; + + if (mode < usi->data->min_mode || mode > usi->data->max_mode) + return -EINVAL; + + val = exynos_usi_modes[mode].val; + ret = regmap_update_bits(usi->sysreg, usi->sw_conf, + usi->data->sw_conf_mask, val); + if (ret) + return ret; + + usi->mode = mode; + dev_dbg(usi->dev, "protocol: %s\n", exynos_usi_modes[usi->mode].name); + + return 0; +} + +/** + * exynos_usi_enable - Initialize USI block + * @usi: USI driver object + * + * USI IP-core start state is "reset" (on startup and after CPU resume). This + * routine enables the USI block by clearing the reset flag. It also configures + * HWACG behavior (needed e.g. for UART Rx). It should be performed before + * underlying protocol becomes functional. + * + * Return: 0 on success, or negative error code on failure. + */ +static int exynos_usi_enable(const struct exynos_usi *usi) +{ + u32 val; + int ret; + + ret = clk_bulk_prepare_enable(usi->data->num_clks, usi->clks); + if (ret) + return ret; + + /* Enable USI block */ + val = readl(usi->regs + USI_CON); + val &= ~USI_CON_RESET; + writel(val, usi->regs + USI_CON); + udelay(1); + + /* Continuously provide the clock to USI IP w/o gating */ + if (usi->clkreq_on) { + val = readl(usi->regs + USI_OPTION); + val &= ~USI_OPTION_CLKSTOP_ON; + val |= USI_OPTION_CLKREQ_ON; + writel(val, usi->regs + USI_OPTION); + } + + clk_bulk_disable_unprepare(usi->data->num_clks, usi->clks); + + return ret; +} + +static int exynos_usi_configure(struct exynos_usi *usi) +{ + int ret; + + ret = exynos_usi_set_sw_conf(usi, usi->mode); + if (ret) + return ret; + + if (usi->data->ver == USI_VER2) + return exynos_usi_enable(usi); + + return 0; +} + +static int exynos_usi_parse_dt(struct device_node *np, struct exynos_usi *usi) +{ + int ret; + u32 mode; + + ret = of_property_read_u32(np, "samsung,mode", &mode); + if (ret) + return ret; + if (mode < usi->data->min_mode || mode > usi->data->max_mode) + return -EINVAL; + usi->mode = mode; + + usi->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg"); + if (IS_ERR(usi->sysreg)) + return PTR_ERR(usi->sysreg); + + ret = of_property_read_u32_index(np, "samsung,sysreg", 1, + &usi->sw_conf); + if (ret) + return ret; + + usi->clkreq_on = of_property_read_bool(np, "samsung,clkreq-on"); + + return 0; +} + +static int exynos_usi_get_clocks(struct exynos_usi *usi) +{ + const size_t num = usi->data->num_clks; + struct device *dev = usi->dev; + size_t i; + + if (num == 0) + return 0; + + usi->clks = devm_kcalloc(dev, num, sizeof(*usi->clks), GFP_KERNEL); + if (!usi->clks) + return -ENOMEM; + + for (i = 0; i < num; ++i) + usi->clks[i].id = usi->data->clk_names[i]; + + return devm_clk_bulk_get(dev, num, usi->clks); +} + +static int exynos_usi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct exynos_usi *usi; + int ret; + + usi = devm_kzalloc(dev, sizeof(*usi), GFP_KERNEL); + if (!usi) + return -ENOMEM; + + usi->dev = dev; + platform_set_drvdata(pdev, usi); + + usi->data = of_device_get_match_data(dev); + if (!usi->data) + return -EINVAL; + + ret = exynos_usi_parse_dt(np, usi); + if (ret) + return ret; + + ret = exynos_usi_get_clocks(usi); + if (ret) + return ret; + + if (usi->data->ver == USI_VER2) { + usi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(usi->regs)) + return PTR_ERR(usi->regs); + } + + ret = exynos_usi_configure(usi); + if (ret) + return ret; + + /* Make it possible to embed protocol nodes into USI np */ + return of_platform_populate(np, NULL, NULL, dev); +} + +static int __maybe_unused exynos_usi_resume_noirq(struct device *dev) +{ + struct exynos_usi *usi = dev_get_drvdata(dev); + + return exynos_usi_configure(usi); +} + +static const struct dev_pm_ops exynos_usi_pm = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, exynos_usi_resume_noirq) +}; + +static struct platform_driver exynos_usi_driver = { + .driver = { + .name = "exynos-usi", + .pm = &exynos_usi_pm, + .of_match_table = exynos_usi_dt_match, + }, + .probe = exynos_usi_probe, +}; +module_platform_driver(exynos_usi_driver); + +MODULE_DESCRIPTION("Samsung USI driver"); +MODULE_AUTHOR("Sam Protsenko <semen.protsenko@linaro.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c index cd33e99249c3..32c346b72635 100644 --- a/drivers/soc/tegra/common.c +++ b/drivers/soc/tegra/common.c @@ -10,6 +10,7 @@ #include <linux/export.h> #include <linux/of.h> #include <linux/pm_opp.h> +#include <linux/pm_runtime.h> #include <soc/tegra/common.h> #include <soc/tegra/fuse.h> @@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev) { unsigned long rate; struct clk *clk; + bool rpm_enabled; int err; clk = devm_clk_get(dev, NULL); @@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev) return -EINVAL; } + /* + * Runtime PM of the device must be enabled in order to set up + * GENPD's performance properly because GENPD core checks whether + * device is suspended and this check doesn't work while RPM is + * disabled. This makes sure the OPP vote below gets cached in + * GENPD for the device. Instead, the vote is done the next time + * the device gets runtime resumed. + */ + rpm_enabled = pm_runtime_enabled(dev); + if (!rpm_enabled) + pm_runtime_enable(dev); + + /* should never happen in practice */ + if (!pm_runtime_enabled(dev)) { + dev_WARN(dev, "failed to enable runtime PM\n"); + pm_runtime_disable(dev); + return -EINVAL; + } + /* first dummy rate-setting initializes voltage vote */ err = dev_pm_opp_set_rate(dev, rate); + + if (!rpm_enabled) + pm_runtime_disable(dev); + if (err) { dev_err(dev, "failed to initialize OPP clock: %d\n", err); return err; @@ -111,9 +136,7 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev, */ err = devm_pm_opp_of_add_table(dev); if (err) { - if (err == -ENODEV) - dev_err_once(dev, "OPP table not found, please update device-tree\n"); - else + if (err != -ENODEV) dev_err(dev, "failed to add OPP table: %d\n", err); return err; diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index e714ed3b61bc..913103ee5432 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -14,6 +14,7 @@ #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> #include <linux/sys_soc.h> @@ -181,6 +182,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = { }, }; +static void tegra_fuse_restore(void *base) +{ + fuse->clk = NULL; + fuse->base = base; +} + static int tegra_fuse_probe(struct platform_device *pdev) { void __iomem *base = fuse->base; @@ -188,13 +195,16 @@ static int tegra_fuse_probe(struct platform_device *pdev) struct resource *res; int err; + err = devm_add_action(&pdev->dev, tegra_fuse_restore, base); + if (err) + return err; + /* take over the memory region from the early initialization */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fuse->phys = res->start; fuse->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(fuse->base)) { err = PTR_ERR(fuse->base); - fuse->base = base; return err; } @@ -204,19 +214,20 @@ static int tegra_fuse_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get FUSE clock: %ld", PTR_ERR(fuse->clk)); - fuse->base = base; return PTR_ERR(fuse->clk); } platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; - pm_runtime_enable(&pdev->dev); + err = devm_pm_runtime_enable(&pdev->dev); + if (err) + return err; if (fuse->soc->probe) { err = fuse->soc->probe(fuse); if (err < 0) - goto restore; + return err; } memset(&nvmem, 0, sizeof(nvmem)); @@ -240,19 +251,37 @@ static int tegra_fuse_probe(struct platform_device *pdev) err = PTR_ERR(fuse->nvmem); dev_err(&pdev->dev, "failed to register NVMEM device: %d\n", err); - goto restore; + return err; + } + + fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse"); + if (IS_ERR(fuse->rst)) { + err = PTR_ERR(fuse->rst); + dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n", + fuse->rst); + return err; + } + + /* + * FUSE clock is enabled at a boot time, hence this resume/suspend + * disables the clock besides the h/w resetting. + */ + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + return err; + + err = reset_control_reset(fuse->rst); + pm_runtime_put(&pdev->dev); + + if (err < 0) { + dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err); + return err; } /* release the early I/O memory mapping */ iounmap(base); return 0; - -restore: - fuse->clk = NULL; - fuse->base = base; - pm_runtime_disable(&pdev->dev); - return err; } static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev) diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c index 8ec9fc5e5e4b..12503f563e36 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra20.c +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c @@ -94,9 +94,28 @@ static bool dma_filter(struct dma_chan *chan, void *filter_param) return of_device_is_compatible(np, "nvidia,tegra20-apbdma"); } +static void tegra20_fuse_release_channel(void *data) +{ + struct tegra_fuse *fuse = data; + + dma_release_channel(fuse->apbdma.chan); + fuse->apbdma.chan = NULL; +} + +static void tegra20_fuse_free_coherent(void *data) +{ + struct tegra_fuse *fuse = data; + + dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt, + fuse->apbdma.phys); + fuse->apbdma.virt = NULL; + fuse->apbdma.phys = 0x0; +} + static int tegra20_fuse_probe(struct tegra_fuse *fuse) { dma_cap_mask_t mask; + int err; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -105,13 +124,21 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse) if (!fuse->apbdma.chan) return -EPROBE_DEFER; + err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel, + fuse); + if (err) + return err; + fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32), &fuse->apbdma.phys, GFP_KERNEL); - if (!fuse->apbdma.virt) { - dma_release_channel(fuse->apbdma.chan); + if (!fuse->apbdma.virt) return -ENOMEM; - } + + err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent, + fuse); + if (err) + return err; fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index ecff0c08e959..2bb1f9d6a6e6 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -43,6 +43,7 @@ struct tegra_fuse { void __iomem *base; phys_addr_t phys; struct clk *clk; + struct reset_control *rst; u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset); u32 (*read)(struct tegra_fuse *fuse, unsigned int offset); diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 575d6d5b4294..5aceacbd8ce0 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1064,10 +1064,8 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid) return tegra_powergate_remove_clamping(id); } -static int tegra_pmc_restart_notify(struct notifier_block *this, - unsigned long action, void *data) +static void tegra_pmc_program_reboot_reason(const char *cmd) { - const char *cmd = data; u32 value; value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0); @@ -1085,6 +1083,25 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, } tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0); +} + +static int tegra_pmc_reboot_notify(struct notifier_block *this, + unsigned long action, void *data) +{ + if (action == SYS_RESTART) + tegra_pmc_program_reboot_reason(data); + + return NOTIFY_DONE; +} + +static struct notifier_block tegra_pmc_reboot_notifier = { + .notifier_call = tegra_pmc_reboot_notify, +}; + +static int tegra_pmc_restart_notify(struct notifier_block *this, + unsigned long action, void *data) +{ + u32 value; /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ value = tegra_pmc_readl(pmc, PMC_CNTRL); @@ -1353,7 +1370,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np) if (!genpd) return -ENOMEM; - genpd->name = np->name; + genpd->name = "core"; genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state; genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state; @@ -2890,6 +2907,14 @@ static int tegra_pmc_probe(struct platform_device *pdev) goto cleanup_sysfs; } + err = devm_register_reboot_notifier(&pdev->dev, + &tegra_pmc_reboot_notifier); + if (err) { + dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", + err); + goto cleanup_debugfs; + } + err = register_restart_handler(&tegra_pmc_restart_handler); if (err) { dev_err(&pdev->dev, "unable to register restart handler, %d\n", @@ -2963,7 +2988,7 @@ static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume); static const char * const tegra20_powergates[] = { [TEGRA_POWERGATE_CPU] = "cpu", - [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_3D] = "td", [TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_PCIE] = "pcie", @@ -3071,7 +3096,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { static const char * const tegra30_powergates[] = { [TEGRA_POWERGATE_CPU] = "cpu0", - [TEGRA_POWERGATE_3D] = "3d0", + [TEGRA_POWERGATE_3D] = "td", [TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_PCIE] = "pcie", @@ -3083,7 +3108,7 @@ static const char * const tegra30_powergates[] = { [TEGRA_POWERGATE_CPU2] = "cpu2", [TEGRA_POWERGATE_CPU3] = "cpu3", [TEGRA_POWERGATE_CELP] = "celp", - [TEGRA_POWERGATE_3D1] = "3d1", + [TEGRA_POWERGATE_3D1] = "td2", }; static const u8 tegra30_cpu_powergates[] = { @@ -3132,7 +3157,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { static const char * const tegra114_powergates[] = { [TEGRA_POWERGATE_CPU] = "crail", - [TEGRA_POWERGATE_3D] = "3d", + [TEGRA_POWERGATE_3D] = "td", [TEGRA_POWERGATE_VENC] = "venc", [TEGRA_POWERGATE_VDEC] = "vdec", [TEGRA_POWERGATE_MPE] = "mpe", diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c index b8ce9fd0650d..6a2f90ab9d3e 100644 --- a/drivers/soc/tegra/regulators-tegra20.c +++ b/drivers/soc/tegra/regulators-tegra20.c @@ -16,7 +16,9 @@ #include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/suspend.h> +#include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> struct tegra_regulator_coupler { @@ -25,9 +27,12 @@ struct tegra_regulator_coupler { struct regulator_dev *cpu_rdev; struct regulator_dev *rtc_rdev; struct notifier_block reboot_notifier; + struct notifier_block suspend_notifier; int core_min_uV, cpu_min_uV; bool sys_reboot_mode_req; bool sys_reboot_mode; + bool sys_suspend_mode_req; + bool sys_suspend_mode; }; static inline struct tegra_regulator_coupler * @@ -105,6 +110,28 @@ static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev, return 150000; } +static int tegra20_cpu_nominal_uV(void) +{ + switch (tegra_sku_info.soc_speedo_id) { + case 0: + return 1100000; + case 1: + return 1025000; + default: + return 1125000; + } +} + +static int tegra20_core_nominal_uV(void) +{ + switch (tegra_sku_info.soc_speedo_id) { + default: + return 1225000; + case 2: + return 1300000; + } +} + static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, struct regulator_dev *core_rdev, struct regulator_dev *rtc_rdev, @@ -144,6 +171,11 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra, if (err) return err; + /* prepare voltage level for suspend */ + if (tegra->sys_suspend_mode) + core_min_uV = clamp(tegra20_core_nominal_uV(), + core_min_uV, core_max_uV); + core_uV = regulator_get_voltage_rdev(core_rdev); if (core_uV < 0) return core_uV; @@ -279,6 +311,11 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra, if (tegra->sys_reboot_mode) cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); + /* prepare voltage level for suspend */ + if (tegra->sys_suspend_mode) + cpu_min_uV = clamp(tegra20_cpu_nominal_uV(), + cpu_min_uV, cpu_max_uV); + if (cpu_min_uV > cpu_uV) { err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev, cpu_uV, cpu_min_uV); @@ -320,6 +357,7 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler, } tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); + tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req); if (rdev == cpu_rdev) return tegra20_cpu_voltage_update(tegra, cpu_rdev, @@ -334,6 +372,63 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler, return -EPERM; } +static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra, + bool sys_suspend_mode) +{ + int err; + + if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev) + return 0; + + /* + * All power domains are enabled early during resume from suspend + * by GENPD core. Domains like VENC may require a higher voltage + * when enabled during resume from suspend. This also prepares + * hardware for resuming from LP0. + */ + + WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode); + + err = regulator_sync_voltage_rdev(tegra->cpu_rdev); + if (err) + return err; + + err = regulator_sync_voltage_rdev(tegra->core_rdev); + if (err) + return err; + + return 0; +} + +static int tegra20_regulator_suspend(struct notifier_block *notifier, + unsigned long mode, void *arg) +{ + struct tegra_regulator_coupler *tegra; + int ret = 0; + + tegra = container_of(notifier, struct tegra_regulator_coupler, + suspend_notifier); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + case PM_SUSPEND_PREPARE: + ret = tegra20_regulator_prepare_suspend(tegra, true); + break; + + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + ret = tegra20_regulator_prepare_suspend(tegra, false); + break; + } + + if (ret) + pr_err("failed to prepare regulators: %d\n", ret); + + return notifier_from_errno(ret); +} + static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, bool sys_reboot_mode) { @@ -444,6 +539,7 @@ static struct tegra_regulator_coupler tegra20_coupler = { .balance_voltage = tegra20_regulator_balance_voltage, }, .reboot_notifier.notifier_call = tegra20_regulator_reboot, + .suspend_notifier.notifier_call = tegra20_regulator_suspend, }; static int __init tegra_regulator_coupler_init(void) @@ -456,6 +552,9 @@ static int __init tegra_regulator_coupler_init(void) err = register_reboot_notifier(&tegra20_coupler.reboot_notifier); WARN_ON(err); + err = register_pm_notifier(&tegra20_coupler.suspend_notifier); + WARN_ON(err); + return regulator_coupler_register(&tegra20_coupler.coupler); } arch_initcall(tegra_regulator_coupler_init); diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c index e74bbc9c7859..8fd43c689134 100644 --- a/drivers/soc/tegra/regulators-tegra30.c +++ b/drivers/soc/tegra/regulators-tegra30.c @@ -16,6 +16,7 @@ #include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/suspend.h> #include <soc/tegra/fuse.h> #include <soc/tegra/pmc.h> @@ -25,9 +26,12 @@ struct tegra_regulator_coupler { struct regulator_dev *core_rdev; struct regulator_dev *cpu_rdev; struct notifier_block reboot_notifier; + struct notifier_block suspend_notifier; int core_min_uV, cpu_min_uV; bool sys_reboot_mode_req; bool sys_reboot_mode; + bool sys_suspend_mode_req; + bool sys_suspend_mode; }; static inline struct tegra_regulator_coupler * @@ -113,6 +117,52 @@ static int tegra30_core_cpu_limit(int cpu_uV) return -EINVAL; } +static int tegra30_cpu_nominal_uV(void) +{ + switch (tegra_sku_info.cpu_speedo_id) { + case 10 ... 11: + return 850000; + + case 9: + return 912000; + + case 1 ... 3: + case 7 ... 8: + return 1050000; + + default: + return 1125000; + + case 4 ... 6: + case 12 ... 13: + return 1237000; + } +} + +static int tegra30_core_nominal_uV(void) +{ + switch (tegra_sku_info.soc_speedo_id) { + case 0: + return 1200000; + + case 1: + if (tegra_sku_info.cpu_speedo_id != 7 && + tegra_sku_info.cpu_speedo_id != 8) + return 1200000; + + fallthrough; + + case 2: + if (tegra_sku_info.cpu_speedo_id != 13) + return 1300000; + + return 1350000; + + default: + return 1250000; + } +} + static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, struct regulator_dev *cpu_rdev, struct regulator_dev *core_rdev) @@ -168,6 +218,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, if (err) return err; + /* prepare voltage level for suspend */ + if (tegra->sys_suspend_mode) + core_min_uV = clamp(tegra30_core_nominal_uV(), + core_min_uV, core_max_uV); + core_uV = regulator_get_voltage_rdev(core_rdev); if (core_uV < 0) return core_uV; @@ -223,6 +278,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra, if (tegra->sys_reboot_mode) cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV); + /* prepare voltage level for suspend */ + if (tegra->sys_suspend_mode) + cpu_min_uV = clamp(tegra30_cpu_nominal_uV(), + cpu_min_uV, cpu_max_uV); + if (core_min_limited_uV > core_uV) { pr_err("core voltage constraint violated: %d %d %d\n", core_uV, core_min_limited_uV, cpu_uV); @@ -292,10 +352,68 @@ static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler, } tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req); + tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req); return tegra30_voltage_update(tegra, cpu_rdev, core_rdev); } +static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra, + bool sys_suspend_mode) +{ + int err; + + if (!tegra->core_rdev || !tegra->cpu_rdev) + return 0; + + /* + * All power domains are enabled early during resume from suspend + * by GENPD core. Domains like VENC may require a higher voltage + * when enabled during resume from suspend. This also prepares + * hardware for resuming from LP0. + */ + + WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode); + + err = regulator_sync_voltage_rdev(tegra->cpu_rdev); + if (err) + return err; + + err = regulator_sync_voltage_rdev(tegra->core_rdev); + if (err) + return err; + + return 0; +} + +static int tegra30_regulator_suspend(struct notifier_block *notifier, + unsigned long mode, void *arg) +{ + struct tegra_regulator_coupler *tegra; + int ret = 0; + + tegra = container_of(notifier, struct tegra_regulator_coupler, + suspend_notifier); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_RESTORE_PREPARE: + case PM_SUSPEND_PREPARE: + ret = tegra30_regulator_prepare_suspend(tegra, true); + break; + + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + ret = tegra30_regulator_prepare_suspend(tegra, false); + break; + } + + if (ret) + pr_err("failed to prepare regulators: %d\n", ret); + + return notifier_from_errno(ret); +} + static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra, bool sys_reboot_mode) { @@ -395,6 +513,7 @@ static struct tegra_regulator_coupler tegra30_coupler = { .balance_voltage = tegra30_regulator_balance_voltage, }, .reboot_notifier.notifier_call = tegra30_regulator_reboot, + .suspend_notifier.notifier_call = tegra30_regulator_suspend, }; static int __init tegra_regulator_coupler_init(void) @@ -407,6 +526,9 @@ static int __init tegra_regulator_coupler_init(void) err = register_reboot_notifier(&tegra30_coupler.reboot_notifier); WARN_ON(err); + err = register_pm_notifier(&tegra30_coupler.suspend_notifier); + WARN_ON(err); + return regulator_coupler_register(&tegra30_coupler.coupler); } arch_initcall(tegra_regulator_coupler_init); diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index fd91129de6e5..b6b2150aca4e 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -40,7 +40,8 @@ static const struct k3_soc_id { { 0xBB5A, "AM65X" }, { 0xBB64, "J721E" }, { 0xBB6D, "J7200" }, - { 0xBB38, "AM64X" } + { 0xBB38, "AM64X" }, + { 0xBB75, "J721S2"}, }; static int diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index 591d14ebcb11..700d8eecd8c4 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -646,31 +646,31 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node) } dma->reg_global = pktdma_get_regs(dma, node, 0, &size); - if (!dma->reg_global) - return -ENODEV; + if (IS_ERR(dma->reg_global)) + return PTR_ERR(dma->reg_global); if (size < sizeof(struct reg_global)) { dev_err(kdev->dev, "bad size %pa for global regs\n", &size); return -ENODEV; } dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size); - if (!dma->reg_tx_chan) - return -ENODEV; + if (IS_ERR(dma->reg_tx_chan)) + return PTR_ERR(dma->reg_tx_chan); max_tx_chan = size / sizeof(struct reg_chan); dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size); - if (!dma->reg_rx_chan) - return -ENODEV; + if (IS_ERR(dma->reg_rx_chan)) + return PTR_ERR(dma->reg_rx_chan); max_rx_chan = size / sizeof(struct reg_chan); dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size); - if (!dma->reg_tx_sched) - return -ENODEV; + if (IS_ERR(dma->reg_tx_sched)) + return PTR_ERR(dma->reg_tx_sched); max_tx_sched = size / sizeof(struct reg_tx_sched); dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size); - if (!dma->reg_rx_flow) - return -ENODEV; + if (IS_ERR(dma->reg_rx_flow)) + return PTR_ERR(dma->reg_rx_flow); max_rx_flow = size / sizeof(struct reg_rx_flow); dma->rx_priority = DMA_PRIO_DEFAULT; diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index 49da387d7749..b36779309e49 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -129,7 +129,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node) clks_np = of_get_child_by_name(cfg_node, "clocks"); if (!clks_np) { - dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np); + dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node); return -ENODEV; } diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c index 226d343f0a6a..fcce2433bd6d 100644 --- a/drivers/soc/xilinx/zynqmp_pm_domains.c +++ b/drivers/soc/xilinx/zynqmp_pm_domains.c @@ -20,8 +20,6 @@ #include <linux/firmware/xlnx-zynqmp.h> #define ZYNQMP_NUM_DOMAINS (100) -/* Flag stating if PM nodes mapped to the PM domain has been requested */ -#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0) static int min_capability; @@ -29,14 +27,17 @@ static int min_capability; * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain * @gpd: Generic power domain * @node_id: PM node ID corresponding to device inside PM domain - * @flags: ZynqMP PM domain flags + * @requested: The PM node mapped to the PM domain has been requested */ struct zynqmp_pm_domain { struct generic_pm_domain gpd; u32 node_id; - u8 flags; + bool requested; }; +#define to_zynqmp_pm_domain(pm_domain) \ + container_of(pm_domain, struct zynqmp_pm_domain, gpd) + /** * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source * path @@ -71,21 +72,23 @@ static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used) */ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) { + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; - struct zynqmp_pm_domain *pd; - pd = container_of(domain, struct zynqmp_pm_domain, gpd); ret = zynqmp_pm_set_requirement(pd->node_id, ZYNQMP_PM_CAPABILITY_ACCESS, ZYNQMP_PM_MAX_QOS, ZYNQMP_PM_REQUEST_ACK_BLOCKING); if (ret) { - pr_err("%s() %s set requirement for node %d failed: %d\n", - __func__, domain->name, pd->node_id, ret); + dev_err(&domain->dev, + "failed to set requirement to 0x%x for PM node id %d: %d\n", + ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret); return ret; } - pr_debug("%s() Powered on %s domain\n", __func__, domain->name); + dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", + ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id); + return 0; } @@ -100,18 +103,16 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) */ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) { + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; struct pm_domain_data *pdd, *tmp; - struct zynqmp_pm_domain *pd; u32 capabilities = min_capability; bool may_wakeup; - pd = container_of(domain, struct zynqmp_pm_domain, gpd); - /* If domain is already released there is nothing to be done */ - if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) { - pr_debug("%s() %s domain is already released\n", - __func__, domain->name); + if (!pd->requested) { + dev_dbg(&domain->dev, "PM node id %d is already released\n", + pd->node_id); return 0; } @@ -128,17 +129,16 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0, ZYNQMP_PM_REQUEST_ACK_NO); - /** - * If powering down of any node inside this domain fails, - * report and return the error - */ if (ret) { - pr_err("%s() %s set requirement for node %d failed: %d\n", - __func__, domain->name, pd->node_id, ret); + dev_err(&domain->dev, + "failed to set requirement to 0x%x for PM node id %d: %d\n", + capabilities, pd->node_id, ret); return ret; } - pr_debug("%s() Powered off %s domain\n", __func__, domain->name); + dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n", + capabilities, pd->node_id); + return 0; } @@ -152,10 +152,14 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, struct device *dev) { + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); + struct device_link *link; int ret; - struct zynqmp_pm_domain *pd; - pd = container_of(domain, struct zynqmp_pm_domain, gpd); + link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY); + if (!link) + dev_dbg(&domain->dev, "failed to create device link for %s\n", + dev_name(dev)); /* If this is not the first device to attach there is nothing to do */ if (domain->device_count) @@ -163,17 +167,17 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, ret = zynqmp_pm_request_node(pd->node_id, 0, 0, ZYNQMP_PM_REQUEST_ACK_BLOCKING); - /* If requesting a node fails print and return the error */ if (ret) { - pr_err("%s() %s request failed for node %d: %d\n", - __func__, domain->name, pd->node_id, ret); + dev_err(&domain->dev, "%s request failed for node %d: %d\n", + domain->name, pd->node_id, ret); return ret; } - pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED; + pd->requested = true; + + dev_dbg(&domain->dev, "%s requested PM node id %d\n", + dev_name(dev), pd->node_id); - pr_debug("%s() %s attached to %s domain\n", __func__, - dev_name(dev), domain->name); return 0; } @@ -185,27 +189,24 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, struct device *dev) { + struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain); int ret; - struct zynqmp_pm_domain *pd; - - pd = container_of(domain, struct zynqmp_pm_domain, gpd); /* If this is not the last device to detach there is nothing to do */ if (domain->device_count) return; ret = zynqmp_pm_release_node(pd->node_id); - /* If releasing a node fails print the error and return */ if (ret) { - pr_err("%s() %s release failed for node %d: %d\n", - __func__, domain->name, pd->node_id, ret); + dev_err(&domain->dev, "failed to release PM node id %d: %d\n", + pd->node_id, ret); return; } - pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED; + pd->requested = false; - pr_debug("%s() %s detached from %s domain\n", __func__, - dev_name(dev), domain->name); + dev_dbg(&domain->dev, "%s released PM node id %d\n", + dev_name(dev), pd->node_id); } static struct generic_pm_domain *zynqmp_gpd_xlate @@ -215,7 +216,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate unsigned int i, idx = genpdspec->args[0]; struct zynqmp_pm_domain *pd; - pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd); + pd = to_zynqmp_pm_domain(genpd_data->domains[0]); if (genpdspec->args_count != 1) return ERR_PTR(-EINVAL); @@ -299,9 +300,19 @@ static int zynqmp_gpd_remove(struct platform_device *pdev) return 0; } +static void zynqmp_gpd_sync_state(struct device *dev) +{ + int ret; + + ret = zynqmp_pm_init_finalize(); + if (ret) + dev_warn(dev, "failed to release power management to firmware\n"); +} + static struct platform_driver zynqmp_power_domain_driver = { .driver = { .name = "zynqmp_power_controller", + .sync_state = zynqmp_gpd_sync_state, }, .probe = zynqmp_gpd_probe, .remove = zynqmp_gpd_remove, diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index c556623dae02..f8c301984d4f 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -178,7 +178,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev) u32 pm_api_version; struct mbox_client *client; - zynqmp_pm_init_finalize(); zynqmp_pm_get_api_version(&pm_api_version); /* Check PM API version number */ diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index 83796a4ead34..fe82f3575df4 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -156,7 +156,9 @@ static int rpcif_spi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; - rpcif_hw_init(rpc, false); + error = rpcif_hw_init(rpc, false); + if (error) + return error; error = spi_register_controller(ctlr); if (error) { diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index e8204e155484..2a03739a0c60 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -18,12 +18,15 @@ #include <linux/kthread.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/reset.h> #include <linux/spi/spi.h> +#include <soc/tegra/common.h> + #define SLINK_COMMAND 0x000 #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) @@ -680,7 +683,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi, bits_per_word = t->bits_per_word; speed = t->speed_hz; if (speed != tspi->cur_speed) { - clk_set_rate(tspi->clk, speed * 4); + dev_pm_opp_set_rate(tspi->dev, speed * 4); tspi->cur_speed = speed; } @@ -1066,6 +1069,10 @@ static int tegra_slink_probe(struct platform_device *pdev) goto exit_free_master; } + ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (ret) + goto exit_free_master; + tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c index ed4c1250b303..859f60a70904 100644 --- a/drivers/staging/media/tegra-vde/vde.c +++ b/drivers/staging/media/tegra-vde/vde.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/uaccess.h> +#include <soc/tegra/common.h> #include <soc/tegra/pmc.h> #include "uapi.h" @@ -920,13 +921,17 @@ static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev) struct tegra_vde *vde = dev_get_drvdata(dev); int err; - err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC); - if (err) { - dev_err(dev, "Failed to power down HW: %d\n", err); - return err; + if (!dev->pm_domain) { + err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC); + if (err) { + dev_err(dev, "Failed to power down HW: %d\n", err); + return err; + } } clk_disable_unprepare(vde->clk); + reset_control_release(vde->rst); + reset_control_release(vde->rst_mc); return 0; } @@ -936,14 +941,45 @@ static __maybe_unused int tegra_vde_runtime_resume(struct device *dev) struct tegra_vde *vde = dev_get_drvdata(dev); int err; - err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC, - vde->clk, vde->rst); + err = reset_control_acquire(vde->rst_mc); if (err) { - dev_err(dev, "Failed to power up HW : %d\n", err); + dev_err(dev, "Failed to acquire mc reset: %d\n", err); return err; } + err = reset_control_acquire(vde->rst); + if (err) { + dev_err(dev, "Failed to acquire reset: %d\n", err); + goto release_mc_reset; + } + + if (!dev->pm_domain) { + err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC, + vde->clk, vde->rst); + if (err) { + dev_err(dev, "Failed to power up HW : %d\n", err); + goto release_reset; + } + } else { + /* + * tegra_powergate_sequence_power_up() leaves clocks enabled, + * while GENPD not. + */ + err = clk_prepare_enable(vde->clk); + if (err) { + dev_err(dev, "Failed to enable clock: %d\n", err); + goto release_reset; + } + } + return 0; + +release_reset: + reset_control_release(vde->rst); +release_mc_reset: + reset_control_release(vde->rst_mc); + + return err; } static int tegra_vde_probe(struct platform_device *pdev) @@ -1001,14 +1037,14 @@ static int tegra_vde_probe(struct platform_device *pdev) return err; } - vde->rst = devm_reset_control_get(dev, NULL); + vde->rst = devm_reset_control_get_exclusive_released(dev, NULL); if (IS_ERR(vde->rst)) { err = PTR_ERR(vde->rst); dev_err(dev, "Could not get VDE reset %d\n", err); return err; } - vde->rst_mc = devm_reset_control_get_optional(dev, "mc"); + vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc"); if (IS_ERR(vde->rst_mc)) { err = PTR_ERR(vde->rst_mc); dev_err(dev, "Could not get MC reset %d\n", err); @@ -1026,6 +1062,12 @@ static int tegra_vde_probe(struct platform_device *pdev) return err; } + err = devm_tegra_core_dev_init_opp_table_common(dev); + if (err) { + dev_err(dev, "Could initialize OPP table %d\n", err); + return err; + } + vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0); if (!vde->iram_pool) { dev_err(dev, "Could not get IRAM pool\n"); @@ -1133,8 +1175,7 @@ static void tegra_vde_shutdown(struct platform_device *pdev) * On some devices bootloader isn't ready to a power-gated VDE on * a warm-reboot, machine will hang in that case. */ - if (pm_runtime_status_suspended(&pdev->dev)) - tegra_vde_runtime_resume(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); } static __maybe_unused int tegra_vde_pm_suspend(struct device *dev) diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile index 66b8a17f14c4..a6eff388d300 100644 --- a/drivers/tee/optee/Makefile +++ b/drivers/tee/optee/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_OPTEE) += optee.o optee-objs += core.o optee-objs += call.o +optee-objs += notif.o optee-objs += rpc.o optee-objs += supp.o optee-objs += device.o diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 2a66a5203d2f..1ca320885fad 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -157,6 +157,7 @@ void optee_remove_common(struct optee *optee) /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); + optee_notif_uninit(optee); /* * The two devices have to be unregistered before we can free the * other resources. @@ -165,7 +166,6 @@ void optee_remove_common(struct optee *optee) tee_device_unregister(optee->teedev); tee_shm_pool_free(optee->pool); - optee_wait_queue_exit(&optee->wait_queue); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); } diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index d8c8683863aa..20a1b1a3d965 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -855,9 +855,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) mutex_init(&optee->ffa.mutex); mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); - optee_wait_queue_init(&optee->wait_queue); optee_supp_init(&optee->supp); ffa_dev_set_drvdata(ffa_dev, optee); + rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE); + if (rc) { + optee_ffa_remove(ffa_dev); + return rc; + } rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); if (rc) { diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c new file mode 100644 index 000000000000..a28fa03dcd0e --- /dev/null +++ b/drivers/tee/optee/notif.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2021, Linaro Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/arm-smccc.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/tee_drv.h> +#include "optee_private.h" + +struct notif_entry { + struct list_head link; + struct completion c; + u_int key; +}; + +static bool have_key(struct optee *optee, u_int key) +{ + struct notif_entry *entry; + + list_for_each_entry(entry, &optee->notif.db, link) + if (entry->key == key) + return true; + + return false; +} + +int optee_notif_wait(struct optee *optee, u_int key) +{ + unsigned long flags; + struct notif_entry *entry; + int rc = 0; + + if (key > optee->notif.max_key) + return -EINVAL; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + init_completion(&entry->c); + entry->key = key; + + spin_lock_irqsave(&optee->notif.lock, flags); + + /* + * If the bit is already set it means that the key has already + * been posted and we must not wait. + */ + if (test_bit(key, optee->notif.bitmap)) { + clear_bit(key, optee->notif.bitmap); + goto out; + } + + /* + * Check if someone is already waiting for this key. If there is + * it's a programming error. + */ + if (have_key(optee, key)) { + rc = -EBUSY; + goto out; + } + + list_add_tail(&entry->link, &optee->notif.db); + + /* + * Unlock temporarily and wait for completion. + */ + spin_unlock_irqrestore(&optee->notif.lock, flags); + wait_for_completion(&entry->c); + spin_lock_irqsave(&optee->notif.lock, flags); + + list_del(&entry->link); +out: + spin_unlock_irqrestore(&optee->notif.lock, flags); + + kfree(entry); + + return rc; +} + +int optee_notif_send(struct optee *optee, u_int key) +{ + unsigned long flags; + struct notif_entry *entry; + + if (key > optee->notif.max_key) + return -EINVAL; + + spin_lock_irqsave(&optee->notif.lock, flags); + + list_for_each_entry(entry, &optee->notif.db, link) + if (entry->key == key) { + complete(&entry->c); + goto out; + } + + /* Only set the bit in case there where nobody waiting */ + set_bit(key, optee->notif.bitmap); +out: + spin_unlock_irqrestore(&optee->notif.lock, flags); + + return 0; +} + +int optee_notif_init(struct optee *optee, u_int max_key) +{ + spin_lock_init(&optee->notif.lock); + INIT_LIST_HEAD(&optee->notif.db); + optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL); + if (!optee->notif.bitmap) + return -ENOMEM; + + optee->notif.max_key = max_key; + + return 0; +} + +void optee_notif_uninit(struct optee *optee) +{ + kfree(optee->notif.bitmap); +} diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h index 2422e185d400..70e9cc2ee96b 100644 --- a/drivers/tee/optee/optee_msg.h +++ b/drivers/tee/optee/optee_msg.h @@ -318,6 +318,13 @@ struct optee_msg_arg { * [in] param[0].u.rmem.shm_ref holds shared memory reference * [in] param[0].u.rmem.offs 0 * [in] param[0].u.rmem.size 0 + * + * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing + * of a driver. + * + * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is + * normal world unable to process asynchronous notifications. Typically + * used when the driver is shut down. */ #define OPTEE_MSG_CMD_OPEN_SESSION 0 #define OPTEE_MSG_CMD_INVOKE_COMMAND 1 @@ -325,6 +332,8 @@ struct optee_msg_arg { #define OPTEE_MSG_CMD_CANCEL 3 #define OPTEE_MSG_CMD_REGISTER_SHM 4 #define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 +#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 #define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 #endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 6660e05298db..46f74ab07c7e 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -28,6 +28,13 @@ #define TEEC_ORIGIN_COMMS 0x00000002 +/* + * This value should be larger than the number threads in secure world to + * meet the need from secure world. The number of threads in secure world + * are usually not even close to 255 so we should be safe for now. + */ +#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255 + typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, @@ -44,10 +51,13 @@ struct optee_call_queue { struct list_head waiters; }; -struct optee_wait_queue { - /* Serializes access to this struct */ - struct mutex mu; +struct optee_notif { + u_int max_key; + struct tee_context *ctx; + /* Serializes access to the elements below in this struct */ + spinlock_t lock; struct list_head db; + u_long *bitmap; }; /** @@ -79,6 +89,7 @@ struct optee_smc { optee_invoke_fn *invoke_fn; void *memremaped_shm; u32 sec_caps; + unsigned int notif_irq; }; /** @@ -129,8 +140,7 @@ struct optee_ops { * @smc: specific to SMC ABI * @ffa: specific to FF-A ABI * @call_queue: queue of threads waiting to call @invoke_fn - * @wait_queue: queue of threads from secure world waiting for a - * secure world sync object + * @notif: notification synchronization struct * @supp: supplicant synchronization struct for RPC to supplicant * @pool: shared memory pool * @rpc_arg_count: If > 0 number of RPC parameters to make room for @@ -147,7 +157,7 @@ struct optee { struct optee_ffa ffa; }; struct optee_call_queue call_queue; - struct optee_wait_queue wait_queue; + struct optee_notif notif; struct optee_supp supp; struct tee_shm_pool *pool; unsigned int rpc_arg_count; @@ -185,8 +195,10 @@ struct optee_call_ctx { size_t num_entries; }; -void optee_wait_queue_init(struct optee_wait_queue *wq); -void optee_wait_queue_exit(struct optee_wait_queue *wq); +int optee_notif_init(struct optee *optee, u_int max_key); +void optee_notif_uninit(struct optee *optee); +int optee_notif_wait(struct optee *optee, u_int key); +int optee_notif_send(struct optee *optee, u_int key); u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct tee_param *param); diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h index b8275140cef8..f3f06e0994a7 100644 --- a/drivers/tee/optee/optee_rpc_cmd.h +++ b/drivers/tee/optee/optee_rpc_cmd.h @@ -28,24 +28,27 @@ #define OPTEE_RPC_CMD_GET_TIME 3 /* - * Wait queue primitive, helper for secure world to implement a wait queue. + * Notification from/to secure world. * - * If secure world needs to wait for a secure world mutex it issues a sleep - * request instead of spinning in secure world. Conversely is a wakeup - * request issued when a secure world mutex with a thread waiting thread is - * unlocked. + * If secure world needs to wait for something, for instance a mutex, it + * does a notification wait request instead of spinning in secure world. + * Conversely can a synchronous notification can be sent when a secure + * world mutex with a thread waiting thread is unlocked. * - * Waiting on a key - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP - * [in] value[0].b Wait key + * This interface can also be used to wait for a asynchronous notification + * which instead is sent via a non-secure interrupt. * - * Waking up a key - * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP - * [in] value[0].b Wakeup key + * Waiting on notification + * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT + * [in] value[0].b notification value + * + * Sending a synchronous notification + * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND + * [in] value[0].b notification value */ -#define OPTEE_RPC_CMD_WAIT_QUEUE 4 -#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0 -#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1 +#define OPTEE_RPC_CMD_NOTIFICATION 4 +#define OPTEE_RPC_NOTIFICATION_WAIT 0 +#define OPTEE_RPC_NOTIFICATION_SEND 1 /* * Suspend execution diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 80eb763a8a80..d44a6ae994f8 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -107,6 +107,12 @@ struct optee_smc_call_get_os_revision_result { /* * Call with struct optee_msg_arg as argument * + * When calling this function normal world has a few responsibilities: + * 1. It must be able to handle eventual RPCs + * 2. Non-secure interrupts should not be masked + * 3. If asynchronous notifications has been negotiated successfully, then + * asynchronous notifications should be unmasked during this call. + * * Call register usage: * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG * a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg @@ -195,7 +201,8 @@ struct optee_smc_get_shm_config_result { * Normal return register usage: * a0 OPTEE_SMC_RETURN_OK * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_* - * a2-7 Preserved + * a2 The maximum secure world notification number + * a3-7 Preserved * * Error return register usage: * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world @@ -218,6 +225,8 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3) /* Secure world supports Shared Memory with a NULL reference */ #define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4) +/* Secure world supports asynchronous notification of normal world */ +#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5) #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ @@ -226,8 +235,8 @@ struct optee_smc_get_shm_config_result { struct optee_smc_exchange_capabilities_result { unsigned long status; unsigned long capabilities; + unsigned long max_notif_value; unsigned long reserved0; - unsigned long reserved1; }; /* @@ -320,6 +329,68 @@ struct optee_smc_disable_shm_cache_result { OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT) /* + * Inform OP-TEE that normal world is able to receive asynchronous + * notifications. + * + * Call requests usage: + * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1-7 Preserved + * + * Not supported return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + */ +#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16 +#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF) + +/* + * Retrieve a value of notifications pending since the last call of this + * function. + * + * OP-TEE keeps a record of all posted values. When an interrupt is + * received which indicates that there are posted values this function + * should be called until all pended values have been retrieved. When a + * value is retrieved, it's cleared from the record in secure world. + * + * Call requests usage: + * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE + * a1-6 Not used + * a7 Hypervisor Client ID register + * + * Normal return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 value + * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is + * valid, else 0 if no values where pending + * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is + * pending, else 0. + * Bit[31:2]: MBZ + * a3-7 Preserved + * + * Not supported return register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-7 Preserved + */ +#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0) +#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1) + +/* + * Notification that OP-TEE expects a yielding call to do some bottom half + * work in a driver. + */ +#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0 + +#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17 +#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE) + +/* * Resume from RPC (for example after processing a foreign interrupt) * * Call register usage: diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index cd642e340eaf..e69bc6380683 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -12,23 +12,6 @@ #include "optee_private.h" #include "optee_rpc_cmd.h" -struct wq_entry { - struct list_head link; - struct completion c; - u32 key; -}; - -void optee_wait_queue_init(struct optee_wait_queue *priv) -{ - mutex_init(&priv->mu); - INIT_LIST_HEAD(&priv->db); -} - -void optee_wait_queue_exit(struct optee_wait_queue *priv) -{ - mutex_destroy(&priv->mu); -} - static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) { struct timespec64 ts; @@ -144,48 +127,6 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx, } #endif -static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) -{ - struct wq_entry *w; - - mutex_lock(&wq->mu); - - list_for_each_entry(w, &wq->db, link) - if (w->key == key) - goto out; - - w = kmalloc(sizeof(*w), GFP_KERNEL); - if (w) { - init_completion(&w->c); - w->key = key; - list_add_tail(&w->link, &wq->db); - } -out: - mutex_unlock(&wq->mu); - return w; -} - -static void wq_sleep(struct optee_wait_queue *wq, u32 key) -{ - struct wq_entry *w = wq_entry_get(wq, key); - - if (w) { - wait_for_completion(&w->c); - mutex_lock(&wq->mu); - list_del(&w->link); - mutex_unlock(&wq->mu); - kfree(w); - } -} - -static void wq_wakeup(struct optee_wait_queue *wq, u32 key) -{ - struct wq_entry *w = wq_entry_get(wq, key); - - if (w) - complete(&w->c); -} - static void handle_rpc_func_cmd_wq(struct optee *optee, struct optee_msg_arg *arg) { @@ -197,11 +138,13 @@ static void handle_rpc_func_cmd_wq(struct optee *optee, goto bad; switch (arg->params[0].u.value.a) { - case OPTEE_RPC_WAIT_QUEUE_SLEEP: - wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); + case OPTEE_RPC_NOTIFICATION_WAIT: + if (optee_notif_wait(optee, arg->params[0].u.value.b)) + goto bad; break; - case OPTEE_RPC_WAIT_QUEUE_WAKEUP: - wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); + case OPTEE_RPC_NOTIFICATION_SEND: + if (optee_notif_send(optee, arg->params[0].u.value.b)) + goto bad; break; default: goto bad; @@ -319,7 +262,7 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee, case OPTEE_RPC_CMD_GET_TIME: handle_rpc_func_cmd_get_time(arg); break; - case OPTEE_RPC_CMD_WAIT_QUEUE: + case OPTEE_RPC_CMD_NOTIFICATION: handle_rpc_func_cmd_wq(optee, arg); break; case OPTEE_RPC_CMD_SUSPEND: diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index cf2e3293567d..449d6a72d289 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -8,13 +8,16 @@ #include <linux/arm-smccc.h> #include <linux/errno.h> +#include <linux/interrupt.h> #include <linux/io.h> -#include <linux/sched.h> +#include <linux/irqdomain.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/tee_drv.h> @@ -35,7 +38,8 @@ * 2. Low level support functions to register shared memory in secure world * 3. Dynamic shared memory pool based on alloc_pages() * 4. Do a normal scheduled call into secure world - * 5. Driver initialization. + * 5. Asynchronous notification + * 6. Driver initialization. */ #define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES @@ -877,10 +881,137 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx, return rc; } +static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) +{ + struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + + shm = optee_get_msg_arg(ctx, 0, &msg_arg); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + msg_arg->cmd = cmd; + optee_smc_do_call_with_arg(ctx, shm); + + tee_shm_free(shm); + return 0; +} + +static int optee_smc_do_bottom_half(struct tee_context *ctx) +{ + return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF); +} + +static int optee_smc_stop_async_notif(struct tee_context *ctx) +{ + return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF); +} + /* - * 5. Driver initialization + * 5. Asynchronous notification + */ + +static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid, + bool *value_pending) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0) + return 0; + *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID); + *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING); + return res.a1; +} + +static irqreturn_t notif_irq_handler(int irq, void *dev_id) +{ + struct optee *optee = dev_id; + bool do_bottom_half = false; + bool value_valid; + bool value_pending; + u32 value; + + do { + value = get_async_notif_value(optee->smc.invoke_fn, + &value_valid, &value_pending); + if (!value_valid) + break; + + if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF) + do_bottom_half = true; + else + optee_notif_send(optee, value); + } while (value_pending); + + if (do_bottom_half) + return IRQ_WAKE_THREAD; + return IRQ_HANDLED; +} + +static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) +{ + struct optee *optee = dev_id; + + optee_smc_do_bottom_half(optee->notif.ctx); + + return IRQ_HANDLED; +} + +static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) +{ + struct tee_context *ctx; + int rc; + + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + optee->notif.ctx = ctx; + rc = request_threaded_irq(irq, notif_irq_handler, + notif_irq_thread_fn, + 0, "optee_notification", optee); + if (rc) + goto err_close_ctx; + + optee->smc.notif_irq = irq; + + return 0; + +err_close_ctx: + teedev_close_context(optee->notif.ctx); + optee->notif.ctx = NULL; + + return rc; +} + +static void optee_smc_notif_uninit_irq(struct optee *optee) +{ + if (optee->notif.ctx) { + optee_smc_stop_async_notif(optee->notif.ctx); + if (optee->smc.notif_irq) { + free_irq(optee->smc.notif_irq, optee); + irq_dispose_mapping(optee->smc.notif_irq); + } + + /* + * The thread normally working with optee->notif.ctx was + * stopped with free_irq() above. + * + * Note we're not using teedev_close_context() or + * tee_client_close_context() since we have already called + * tee_device_put() while initializing to avoid a circular + * reference counting. + */ + teedev_close_context(optee->notif.ctx); + } +} + +/* + * 6. Driver initialization * - * During driver inititialization is secure world probed to find out which + * During driver initialization is secure world probed to find out which * features it supports so the driver can be initialized with a matching * configuration. This involves for instance support for dynamic shared * memory instead of a static memory carvout. @@ -952,6 +1083,17 @@ static const struct optee_ops optee_ops = { .from_msg_param = optee_from_msg_param, }; +static int enable_async_notif(optee_invoke_fn *invoke_fn) +{ + struct arm_smccc_res res; + + invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res); + + if (res.a0) + return -EINVAL; + return 0; +} + static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) { struct arm_smccc_res res; @@ -1001,7 +1143,7 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) } static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, - u32 *sec_caps) + u32 *sec_caps, u32 *max_notif_value) { union { struct arm_smccc_res smccc; @@ -1024,6 +1166,11 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, return false; *sec_caps = res.result.capabilities; + if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) + *max_notif_value = res.result.max_notif_value; + else + *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE; + return true; } @@ -1188,6 +1335,8 @@ static int optee_smc_remove(struct platform_device *pdev) */ optee_disable_shm_cache(optee); + optee_smc_notif_uninit_irq(optee); + optee_remove_common(optee); if (optee->smc.memremaped_shm) @@ -1217,6 +1366,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + u32 max_notif_value; u32 sec_caps; int rc; @@ -1236,7 +1386,8 @@ static int optee_probe(struct platform_device *pdev) return -EINVAL; } - if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { + if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps, + &max_notif_value)) { pr_warn("capabilities mismatch\n"); return -EINVAL; } @@ -1259,7 +1410,7 @@ static int optee_probe(struct platform_device *pdev) optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { rc = -ENOMEM; - goto err; + goto err_free_pool; } optee->ops = &optee_ops; @@ -1269,32 +1420,55 @@ static int optee_probe(struct platform_device *pdev) teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_free_optee; } optee->teedev = teedev; teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err; + goto err_unreg_teedev; } optee->supp_teedev = teedev; rc = tee_device_register(optee->teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; rc = tee_device_register(optee->supp_teedev); if (rc) - goto err; + goto err_unreg_supp_teedev; mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); - optee_wait_queue_init(&optee->wait_queue); optee_supp_init(&optee->supp); optee->smc.memremaped_shm = memremaped_shm; optee->pool = pool; + platform_set_drvdata(pdev, optee); + rc = optee_notif_init(optee, max_notif_value); + if (rc) + goto err_supp_uninit; + + if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { + unsigned int irq; + + rc = platform_get_irq(pdev, 0); + if (rc < 0) { + pr_err("platform_get_irq: ret %d\n", rc); + goto err_notif_uninit; + } + irq = rc; + + rc = optee_smc_notif_init_irq(optee, irq); + if (rc) { + irq_dispose_mapping(irq); + goto err_notif_uninit; + } + enable_async_notif(optee->smc.invoke_fn); + pr_info("Asynchronous notifications enabled\n"); + } + /* * Ensure that there are no pre-existing shm objects before enabling * the shm cache so that there's no chance of receiving an invalid @@ -1309,29 +1483,30 @@ static int optee_probe(struct platform_device *pdev) if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) pr_info("dynamic shared memory is enabled\n"); - platform_set_drvdata(pdev, optee); - rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); - if (rc) { - optee_smc_remove(pdev); - return rc; - } + if (rc) + goto err_disable_shm_cache; pr_info("initialized driver\n"); return 0; -err: - if (optee) { - /* - * tee_device_unregister() is safe to call even if the - * devices hasn't been registered with - * tee_device_register() yet. - */ - tee_device_unregister(optee->supp_teedev); - tee_device_unregister(optee->teedev); - kfree(optee); - } - if (pool) - tee_shm_pool_free(pool); + +err_disable_shm_cache: + optee_disable_shm_cache(optee); + optee_smc_notif_uninit_irq(optee); + optee_unregister_devices(); +err_notif_uninit: + optee_notif_uninit(optee); +err_supp_uninit: + optee_supp_uninit(&optee->supp); + mutex_destroy(&optee->call_queue.mutex); +err_unreg_supp_teedev: + tee_device_unregister(optee->supp_teedev); +err_unreg_teedev: + tee_device_unregister(optee->teedev); +err_free_optee: + kfree(optee); +err_free_pool: + tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); return rc; diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 2b37bc408fc3..3fc426dad2df 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static struct tee_context *teedev_open(struct tee_device *teedev) +struct tee_context *teedev_open(struct tee_device *teedev) { int rc; struct tee_context *ctx; @@ -70,6 +70,7 @@ err: return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(teedev_open); void teedev_ctx_get(struct tee_context *ctx) { @@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } -static void teedev_close_context(struct tee_context *ctx) +void teedev_close_context(struct tee_context *ctx) { - tee_device_put(ctx->teedev); + struct tee_device *teedev = ctx->teedev; + teedev_ctx_put(ctx); + tee_device_put(teedev); } +EXPORT_SYMBOL_GPL(teedev_close_context); static int tee_open(struct inode *inode, struct file *filp) { diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 53f57c3b9f42..1769808031c5 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -414,6 +414,8 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) if (of_device_is_compatible(np, "marvell,armada-38x-uart")) p->serial_out = dw8250_serial_out38x; + if (of_device_is_compatible(np, "starfive,jh7100-uart")) + p->set_termios = dw8250_do_set_termios; } else if (acpi_dev_present("APMC0D08", NULL, -1)) { p->iotype = UPIO_MEM32; @@ -696,6 +698,7 @@ static const struct of_device_id dw8250_of_match[] = { { .compatible = "cavium,octeon-3860-uart" }, { .compatible = "marvell,armada-38x-uart" }, { .compatible = "renesas,rzn1-uart" }, + { .compatible = "starfive,jh7100-uart" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, dw8250_of_match); diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c index 60361141ac04..a72a9474afea 100644 --- a/drivers/usb/chipidea/ci_hdrc_tegra.c +++ b/drivers/usb/chipidea/ci_hdrc_tegra.c @@ -7,6 +7,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/usb.h> @@ -15,6 +16,8 @@ #include <linux/usb/of.h> #include <linux/usb/phy.h> +#include <soc/tegra/common.h> + #include "../host/ehci.h" #include "ci.h" @@ -278,6 +281,8 @@ static int tegra_usb_probe(struct platform_device *pdev) if (!usb) return -ENOMEM; + platform_set_drvdata(pdev, usb); + soc = of_device_get_match_data(&pdev->dev); if (!soc) { dev_err(&pdev->dev, "failed to match OF data\n"); @@ -296,11 +301,14 @@ static int tegra_usb_probe(struct platform_device *pdev) return err; } - err = clk_prepare_enable(usb->clk); - if (err < 0) { - dev_err(&pdev->dev, "failed to enable clock: %d\n", err); + err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); + if (err) + return err; + + pm_runtime_enable(&pdev->dev); + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) return err; - } if (device_property_present(&pdev->dev, "nvidia,needs-double-reset")) usb->needs_double_reset = true; @@ -320,8 +328,6 @@ static int tegra_usb_probe(struct platform_device *pdev) if (err) goto fail_power_off; - platform_set_drvdata(pdev, usb); - /* setup and register ChipIdea HDRC device */ usb->soc = soc; usb->data.name = "tegra-usb"; @@ -350,7 +356,9 @@ static int tegra_usb_probe(struct platform_device *pdev) phy_shutdown: usb_phy_shutdown(usb->phy); fail_power_off: - clk_disable_unprepare(usb->clk); + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); + return err; } @@ -360,15 +368,46 @@ static int tegra_usb_remove(struct platform_device *pdev) ci_hdrc_remove_device(usb->dev); usb_phy_shutdown(usb->phy); + + pm_runtime_put_sync_suspend(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); + + return 0; +} + +static int __maybe_unused tegra_usb_runtime_resume(struct device *dev) +{ + struct tegra_usb *usb = dev_get_drvdata(dev); + int err; + + err = clk_prepare_enable(usb->clk); + if (err < 0) { + dev_err(dev, "failed to enable clock: %d\n", err); + return err; + } + + return 0; +} + +static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev) +{ + struct tegra_usb *usb = dev_get_drvdata(dev); + clk_disable_unprepare(usb->clk); return 0; } +static const struct dev_pm_ops tegra_usb_pm = { + SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume, + NULL) +}; + static struct platform_driver tegra_usb_driver = { .driver = { .name = "tegra-usb", .of_match_table = tegra_usb_of_match, + .pm = &tegra_usb_pm, }, .probe = tegra_usb_probe, .remove = tegra_usb_remove, diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 826175ad88a2..0fa7ede94fa6 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1763,6 +1763,53 @@ int remove_conflicting_framebuffers(struct apertures_struct *a, EXPORT_SYMBOL(remove_conflicting_framebuffers); /** + * is_firmware_framebuffer - detect if firmware-configured framebuffer matches + * @a: memory range, users of which are to be checked + * + * This function checks framebuffer devices (initialized by firmware/bootloader) + * which use memory range described by @a. If @a matchesm the function returns + * true, otherwise false. + */ +bool is_firmware_framebuffer(struct apertures_struct *a) +{ + bool do_free = false; + bool found = false; + int i; + + if (!a) { + a = alloc_apertures(1); + if (!a) + return false; + + a->ranges[0].base = 0; + a->ranges[0].size = ~0; + do_free = true; + } + + mutex_lock(®istration_lock); + /* check all firmware fbs and kick off if the base addr overlaps */ + for_each_registered_fb(i) { + struct apertures_struct *gen_aper; + + if (!(registered_fb[i]->flags & FBINFO_MISC_FIRMWARE)) + continue; + + gen_aper = registered_fb[i]->apertures; + if (fb_do_apertures_overlap(gen_aper, a)) { + found = true; + break; + } + } + mutex_unlock(®istration_lock); + + if (do_free) + kfree(a); + + return found; +} +EXPORT_SYMBOL(is_firmware_framebuffer); + +/** * remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices * @pdev: PCI device * @name: requesting driver name |