summaryrefslogtreecommitdiff
path: root/drivers/bus
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2020-11-18 05:13:23 +0300
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2020-11-18 05:13:23 +0300
commit05909cd9a0c8811731b38697af13075e8954314f (patch)
treeaa5306e756666407c95067b20b21b32ff27fd11a /drivers/bus
parentc7f0169e3bd274e576f6aaeee86ad2adf7bb14b5 (diff)
parentbbf5c979011a099af5dc76498918ed7df445635b (diff)
downloadlinux-05909cd9a0c8811731b38697af13075e8954314f.tar.xz
Merge tag 'v5.9' into next
Sync up with mainline to bring in the latest DTS files.
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig43
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-integrator-lm.c128
-rw-r--r--drivers/bus/bt1-apb.c421
-rw-r--r--drivers/bus/bt1-axi.c314
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c88
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c109
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c36
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h6
-rw-r--r--drivers/bus/fsl-mc/mc-io.c2
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c4
-rw-r--r--drivers/bus/mhi/core/boot.c75
-rw-r--r--drivers/bus/mhi/core/init.c8
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c197
-rw-r--r--drivers/bus/mhi/core/pm.c229
-rw-r--r--drivers/bus/mips_cdmm.c15
-rw-r--r--drivers/bus/ti-sysc.c162
-rw-r--r--drivers/bus/vexpress-config.c354
19 files changed, 1862 insertions, 342 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 6d4e4497b59b..0c262c2aeaf2 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -20,6 +20,15 @@ config ARM_CCI400_PORT_CTRL
Low level power management driver for CCI400 cache coherent
interconnect for ARM platforms.
+config ARM_INTEGRATOR_LM
+ bool "ARM Integrator Logic Module bus"
+ depends on HAS_IOMEM
+ depends on ARCH_INTEGRATOR || COMPILE_TEST
+ default ARCH_INTEGRATOR
+ help
+ Say y here to enable support for the ARM Logic Module bus
+ found on the ARM Integrator AP (Application Platform)
+
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
@@ -29,6 +38,36 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config BT1_APB
+ bool "Baikal-T1 APB-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Baikal-T1 AXI-APB bridge is used to access the SoC subsystem CSRs.
+ IO requests are routed to this bus by means of the DW AMBA 3 AXI
+ Interconnect. In case of any APB protocol collisions, slave device
+ not responding on timeout an IRQ is raised with an erroneous address
+ reported to the APB terminator (APB Errors Handler Block). This
+ driver provides the interrupt handler to detect the erroneous
+ address, prints an error message about the address fault, updates an
+ errors counter. The counter and the APB-bus operations timeout can be
+ accessed via corresponding sysfs nodes.
+
+config BT1_AXI
+ bool "Baikal-T1 AXI-bus driver"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ select MFD_SYSCON
+ help
+ AXI3-bus is the main communication bus connecting all high-speed
+ peripheral IP-cores with RAM controller and with MIPS P5600 cores on
+ Baikal-T1 SoC. Traffic arbitration is done by means of DW AMBA 3 AXI
+ Interconnect (so called AXI Main Interconnect) routing IO requests
+ from one SoC block to another. This driver provides a way to detect
+ any bus protocol errors and device not responding situations by
+ means of an embedded on top of the interconnect errors handler
+ block (EHB). AXI Interconnect QoS arbitration tuning is currently
+ unsupported.
+
config MOXTET
tristate "CZ.NIC Turris Mox module configuration bus"
depends on SPI_MASTER && OF
@@ -58,7 +97,7 @@ config IMX_WEIM
config MIPS_CDMM
bool "MIPS Common Device Memory Map (CDMM) Driver"
- depends on CPU_MIPSR2
+ depends on CPU_MIPSR2 || CPU_MIPSR5
help
Driver needed for the MIPS Common Device Memory Map bus in MIPS
cores. This bus is for per-CPU tightly coupled devices such as the
@@ -183,7 +222,7 @@ config UNIPHIER_SYSTEM_BUS
needed to use on-board devices connected to UniPhier SoCs.
config VEXPRESS_CONFIG
- bool "Versatile Express configuration bus"
+ tristate "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
depends on ARM || ARM64
depends on OF
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 05f32cd694a4..397e35392bff 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -5,7 +5,7 @@
# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
-
+obj-$(CONFIG_ARM_INTEGRATOR_LM) += arm-integrator-lm.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
obj-$(CONFIG_MOXTET) += moxtet.o
@@ -13,6 +13,8 @@ obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
+obj-$(CONFIG_BT1_APB) += bt1-apb.o
+obj-$(CONFIG_BT1_AXI) += bt1-axi.o
obj-$(CONFIG_IMX_WEIM) += imx-weim.o
obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o
obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c
new file mode 100644
index 000000000000..845b6c43fef8
--- /dev/null
+++ b/drivers/bus/arm-integrator-lm.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Integrator Logical Module bus driver
+ * Copyright (C) 2020 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* All information about the connected logic modules are in here */
+#define INTEGRATOR_SC_DEC_OFFSET 0x10
+
+/* Base address for the expansion modules */
+#define INTEGRATOR_AP_EXP_BASE 0xc0000000
+#define INTEGRATOR_AP_EXP_STRIDE 0x10000000
+
+static int integrator_lm_populate(int num, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *child;
+ u32 base;
+ int ret;
+
+ base = INTEGRATOR_AP_EXP_BASE + (num * INTEGRATOR_AP_EXP_STRIDE);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ struct resource res;
+
+ ret = of_address_to_resource(child, 0, &res);
+ if (ret) {
+ dev_info(dev, "no valid address on child\n");
+ continue;
+ }
+
+ /* First populate the syscon then any devices */
+ if (res.start == base) {
+ dev_info(dev, "populate module @0x%08x from DT\n",
+ base);
+ ret = of_platform_default_populate(child, NULL, dev);
+ if (ret) {
+ dev_err(dev, "failed to populate module\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_syscon_match[] = {
+ { .compatible = "arm,integrator-ap-syscon"},
+ { },
+};
+
+static int integrator_ap_lm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *syscon;
+ static struct regmap *map;
+ u32 val;
+ int ret;
+ int i;
+
+ /* Look up the system controller */
+ syscon = of_find_matching_node(NULL, integrator_ap_syscon_match);
+ if (!syscon) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return -ENODEV;
+ }
+ map = syscon_node_to_regmap(syscon);
+ if (IS_ERR(map)) {
+ dev_err(dev,
+ "could not find Integrator/AP system controller\n");
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_read(map, INTEGRATOR_SC_DEC_OFFSET, &val);
+ if (ret) {
+ dev_err(dev, "could not read from Integrator/AP syscon\n");
+ return ret;
+ }
+
+ /* Loop over the connected modules */
+ for (i = 0; i < 4; i++) {
+ if (!(val & BIT(4 + i)))
+ continue;
+
+ dev_info(dev, "detected module in slot %d\n", i);
+ ret = integrator_lm_populate(i, dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id integrator_ap_lm_match[] = {
+ { .compatible = "arm,integrator-ap-lm"},
+ { },
+};
+
+static struct platform_driver integrator_ap_lm_driver = {
+ .probe = integrator_ap_lm_probe,
+ .driver = {
+ .name = "integratorap-lm",
+ .of_match_table = integrator_ap_lm_match,
+ },
+};
+module_platform_driver(integrator_ap_lm_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Integrator AP Logical Module driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c
new file mode 100644
index 000000000000..b25ff941e7c7
--- /dev/null
+++ b/drivers/bus/bt1-apb.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 APB-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+#include <linux/clk.h>
+#include <linux/sysfs.h>
+
+#define APB_EHB_ISR 0x00
+#define APB_EHB_ISR_PENDING BIT(0)
+#define APB_EHB_ISR_MASK BIT(1)
+#define APB_EHB_ADDR 0x04
+#define APB_EHB_TIMEOUT 0x08
+
+#define APB_EHB_TIMEOUT_MIN 0x000003FFU
+#define APB_EHB_TIMEOUT_MAX 0xFFFFFFFFU
+
+/*
+ * struct bt1_apb - Baikal-T1 APB EHB private data
+ * @dev: Pointer to the device structure.
+ * @regs: APB EHB registers map.
+ * @res: No-device error injection memory region.
+ * @irq: Errors IRQ number.
+ * @rate: APB-bus reference clock rate.
+ * @pclk: APB-reference clock.
+ * @prst: APB domain reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_apb {
+ struct device *dev;
+
+ struct regmap *regs;
+ void __iomem *res;
+ int irq;
+
+ unsigned long rate;
+ struct clk *pclk;
+
+ struct reset_control *prst;
+
+ atomic_t count;
+};
+
+static const struct regmap_config bt1_apb_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = APB_EHB_TIMEOUT,
+ .fast_io = true
+};
+
+static inline unsigned long bt1_apb_n_to_timeout_us(struct bt1_apb *apb, u32 n)
+{
+ u64 timeout = (u64)n * USEC_PER_SEC;
+
+ do_div(timeout, apb->rate);
+
+ return timeout;
+
+}
+
+static inline unsigned long bt1_apb_timeout_to_n_us(struct bt1_apb *apb,
+ unsigned long timeout)
+{
+ u64 n = (u64)timeout * apb->rate;
+
+ do_div(n, USEC_PER_SEC);
+
+ return n;
+
+}
+
+static irqreturn_t bt1_apb_isr(int irq, void *data)
+{
+ struct bt1_apb *apb = data;
+ u32 addr = 0;
+
+ regmap_read(apb->regs, APB_EHB_ADDR, &addr);
+
+ dev_crit_ratelimited(apb->dev,
+ "APB-bus fault %d: Slave access timeout at 0x%08x\n",
+ atomic_inc_return(&apb->count),
+ addr);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_apb_clear_data(void *data)
+{
+ struct bt1_apb *apb = data;
+ struct platform_device *pdev = to_platform_device(apb->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_apb *bt1_apb_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = devm_kzalloc(dev, sizeof(*apb), GFP_KERNEL);
+ if (!apb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_apb_clear_data, apb);
+ if (ret) {
+ dev_err(dev, "Can't add APB EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ apb->dev = dev;
+ atomic_set(&apb->count, 0);
+ platform_set_drvdata(pdev, apb);
+
+ return apb;
+}
+
+static int bt1_apb_request_regs(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ void __iomem *regs;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "ehb");
+ if (IS_ERR(regs)) {
+ dev_err(apb->dev, "Couldn't map APB EHB registers\n");
+ return PTR_ERR(regs);
+ }
+
+ apb->regs = devm_regmap_init_mmio(apb->dev, regs, &bt1_apb_regmap_cfg);
+ if (IS_ERR(apb->regs)) {
+ dev_err(apb->dev, "Couldn't create APB EHB regmap\n");
+ return PTR_ERR(apb->regs);
+ }
+
+ apb->res = devm_platform_ioremap_resource_byname(pdev, "nodev");
+ if (IS_ERR(apb->res))
+ dev_err(apb->dev, "Couldn't map reserved region\n");
+
+ return PTR_ERR_OR_ZERO(apb->res);
+}
+
+static int bt1_apb_request_rst(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst");
+ if (IS_ERR(apb->prst)) {
+ dev_warn(apb->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(apb->prst);
+ }
+
+ ret = reset_control_deassert(apb->prst);
+ if (ret)
+ dev_err(apb->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_apb_disable_clk(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ clk_disable_unprepare(apb->pclk);
+}
+
+static int bt1_apb_request_clk(struct bt1_apb *apb)
+{
+ int ret;
+
+ apb->pclk = devm_clk_get(apb->dev, "pclk");
+ if (IS_ERR(apb->pclk)) {
+ dev_err(apb->dev, "Couldn't get APB clock descriptor\n");
+ return PTR_ERR(apb->pclk);
+ }
+
+ ret = clk_prepare_enable(apb->pclk);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't enable the APB clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_disable_clk, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB clocks disable action\n");
+ return ret;
+ }
+
+ apb->rate = clk_get_rate(apb->pclk);
+ if (!apb->rate) {
+ dev_err(apb->dev, "Invalid clock rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bt1_apb_clear_irq(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_MASK, 0);
+}
+
+static int bt1_apb_request_irq(struct bt1_apb *apb)
+{
+ struct platform_device *pdev = to_platform_device(apb->dev);
+ int ret;
+
+ apb->irq = platform_get_irq(pdev, 0);
+ if (apb->irq < 0)
+ return apb->irq;
+
+ ret = devm_request_irq(apb->dev, apb->irq, bt1_apb_isr, IRQF_SHARED,
+ "bt1-apb", apb);
+ if (ret) {
+ dev_err(apb->dev, "Couldn't request APB EHB IRQ\n");
+ return ret;
+ }
+
+ ret = devm_add_action(apb->dev, bt1_apb_clear_irq, apb);
+ if (ret) {
+ dev_err(apb->dev, "Can't add APB EHB IRQs clear action\n");
+ return ret;
+ }
+
+ /* Unmask IRQ and clear it' pending flag. */
+ regmap_update_bits(apb->regs, APB_EHB_ISR,
+ APB_EHB_ISR_PENDING | APB_EHB_ISR_MASK,
+ APB_EHB_ISR_MASK);
+
+ return 0;
+}
+
+static ssize_t count_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&apb->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ ret = regmap_read(apb->regs, APB_EHB_TIMEOUT, &n);
+ if (ret)
+ return ret;
+
+ timeout = bt1_apb_n_to_timeout_us(apb, n);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", timeout);
+}
+
+static ssize_t timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+ unsigned long timeout;
+ int ret;
+ u32 n;
+
+ if (kstrtoul(buf, 0, &timeout) < 0)
+ return -EINVAL;
+
+ n = bt1_apb_timeout_to_n_us(apb, timeout);
+ n = clamp(n, APB_EHB_TIMEOUT_MIN, APB_EHB_TIMEOUT_MAX);
+
+ ret = regmap_write(apb->regs, APB_EHB_TIMEOUT, n);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(timeout);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: nodev irq\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_apb *apb = dev_get_drvdata(dev);
+
+ /*
+ * Either dummy read from the unmapped address in the APB IO area
+ * or manually set the IRQ status.
+ */
+ if (sysfs_streq(data, "nodev"))
+ readl(apb->res);
+ else if (sysfs_streq(data, "irq"))
+ regmap_update_bits(apb->regs, APB_EHB_ISR, APB_EHB_ISR_PENDING,
+ APB_EHB_ISR_PENDING);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_apb_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_timeout.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_apb_sysfs);
+
+static void bt1_apb_remove_sysfs(void *data)
+{
+ struct bt1_apb *apb = data;
+
+ device_remove_groups(apb->dev, bt1_apb_sysfs_groups);
+}
+
+static int bt1_apb_init_sysfs(struct bt1_apb *apb)
+{
+ int ret;
+
+ ret = device_add_groups(apb->dev, bt1_apb_sysfs_groups);
+ if (ret) {
+ dev_err(apb->dev, "Failed to create EHB APB sysfs nodes\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(apb->dev, bt1_apb_remove_sysfs, apb);
+ if (ret)
+ dev_err(apb->dev, "Can't add APB EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_apb_probe(struct platform_device *pdev)
+{
+ struct bt1_apb *apb;
+ int ret;
+
+ apb = bt1_apb_create_data(pdev);
+ if (IS_ERR(apb))
+ return PTR_ERR(apb);
+
+ ret = bt1_apb_request_regs(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_rst(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_clk(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_request_irq(apb);
+ if (ret)
+ return ret;
+
+ ret = bt1_apb_init_sysfs(apb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_apb_of_match[] = {
+ { .compatible = "baikal,bt1-apb" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_apb_of_match);
+
+static struct platform_driver bt1_apb_driver = {
+ .probe = bt1_apb_probe,
+ .driver = {
+ .name = "bt1-apb",
+ .of_match_table = bt1_apb_of_match
+ }
+};
+module_platform_driver(bt1_apb_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 APB-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c
new file mode 100644
index 000000000000..e7a6744acc7b
--- /dev/null
+++ b/drivers/bus/bt1-axi.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 AXI-bus driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/atomic.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/nmi.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/sysfs.h>
+
+#define BT1_AXI_WERRL 0x110
+#define BT1_AXI_WERRH 0x114
+#define BT1_AXI_WERRH_TYPE BIT(23)
+#define BT1_AXI_WERRH_ADDR_FLD 24
+#define BT1_AXI_WERRH_ADDR_MASK GENMASK(31, BT1_AXI_WERRH_ADDR_FLD)
+
+/*
+ * struct bt1_axi - Baikal-T1 AXI-bus private data
+ * @dev: Pointer to the device structure.
+ * @qos_regs: AXI Interconnect QoS tuning registers.
+ * @sys_regs: Baikal-T1 System Controller registers map.
+ * @irq: Errors IRQ number.
+ * @aclk: AXI reference clock.
+ * @arst: AXI Interconnect reset line.
+ * @count: Number of errors detected.
+ */
+struct bt1_axi {
+ struct device *dev;
+
+ void __iomem *qos_regs;
+ struct regmap *sys_regs;
+ int irq;
+
+ struct clk *aclk;
+
+ struct reset_control *arst;
+
+ atomic_t count;
+};
+
+static irqreturn_t bt1_axi_isr(int irq, void *data)
+{
+ struct bt1_axi *axi = data;
+ u32 low = 0, high = 0;
+
+ regmap_read(axi->sys_regs, BT1_AXI_WERRL, &low);
+ regmap_read(axi->sys_regs, BT1_AXI_WERRH, &high);
+
+ dev_crit_ratelimited(axi->dev,
+ "AXI-bus fault %d: %s at 0x%x%08x\n",
+ atomic_inc_return(&axi->count),
+ high & BT1_AXI_WERRH_TYPE ? "no slave" : "slave protocol error",
+ high, low);
+
+ /*
+ * Print backtrace on each CPU. This might be pointless if the fault
+ * has happened on the same CPU as the IRQ handler is executed or
+ * the other core proceeded further execution despite the error.
+ * But if it's not, by looking at the trace we would get straight to
+ * the cause of the problem.
+ */
+ trigger_all_cpu_backtrace();
+
+ return IRQ_HANDLED;
+}
+
+static void bt1_axi_clear_data(void *data)
+{
+ struct bt1_axi *axi = data;
+ struct platform_device *pdev = to_platform_device(axi->dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static struct bt1_axi *bt1_axi_create_data(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = devm_kzalloc(dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi)
+ return ERR_PTR(-ENOMEM);
+
+ ret = devm_add_action(dev, bt1_axi_clear_data, axi);
+ if (ret) {
+ dev_err(dev, "Can't add AXI EHB data clear action\n");
+ return ERR_PTR(ret);
+ }
+
+ axi->dev = dev;
+ atomic_set(&axi->count, 0);
+ platform_set_drvdata(pdev, axi);
+
+ return axi;
+}
+
+static int bt1_axi_request_regs(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ struct device *dev = axi->dev;
+
+ axi->sys_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(axi->sys_regs)) {
+ dev_err(dev, "Couldn't find syscon registers\n");
+ return PTR_ERR(axi->sys_regs);
+ }
+
+ axi->qos_regs = devm_platform_ioremap_resource_byname(pdev, "qos");
+ if (IS_ERR(axi->qos_regs))
+ dev_err(dev, "Couldn't map AXI-bus QoS registers\n");
+
+ return PTR_ERR_OR_ZERO(axi->qos_regs);
+}
+
+static int bt1_axi_request_rst(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst");
+ if (IS_ERR(axi->arst)) {
+ dev_warn(axi->dev, "Couldn't get reset control line\n");
+ return PTR_ERR(axi->arst);
+ }
+
+ ret = reset_control_deassert(axi->arst);
+ if (ret)
+ dev_err(axi->dev, "Failed to deassert the reset line\n");
+
+ return ret;
+}
+
+static void bt1_axi_disable_clk(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ clk_disable_unprepare(axi->aclk);
+}
+
+static int bt1_axi_request_clk(struct bt1_axi *axi)
+{
+ int ret;
+
+ axi->aclk = devm_clk_get(axi->dev, "aclk");
+ if (IS_ERR(axi->aclk)) {
+ dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n");
+ return PTR_ERR(axi->aclk);
+ }
+
+ ret = clk_prepare_enable(axi->aclk);
+ if (ret) {
+ dev_err(axi->dev, "Couldn't enable the AXI clock\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_disable_clk, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI clock disable action\n");
+
+ return ret;
+}
+
+static int bt1_axi_request_irq(struct bt1_axi *axi)
+{
+ struct platform_device *pdev = to_platform_device(axi->dev);
+ int ret;
+
+ axi->irq = platform_get_irq(pdev, 0);
+ if (axi->irq < 0)
+ return axi->irq;
+
+ ret = devm_request_irq(axi->dev, axi->irq, bt1_axi_isr, IRQF_SHARED,
+ "bt1-axi", axi);
+ if (ret)
+ dev_err(axi->dev, "Couldn't request AXI EHB IRQ\n");
+
+ return ret;
+}
+
+static ssize_t count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&axi->count));
+}
+static DEVICE_ATTR_RO(count);
+
+static ssize_t inject_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "Error injection: bus unaligned\n");
+}
+
+static ssize_t inject_error_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *data, size_t count)
+{
+ struct bt1_axi *axi = dev_get_drvdata(dev);
+
+ /*
+ * Performing unaligned read from the memory will cause the CM2 bus
+ * error while unaligned writing - the AXI bus write error handled
+ * by this driver.
+ */
+ if (sysfs_streq(data, "bus"))
+ readb(axi->qos_regs);
+ else if (sysfs_streq(data, "unaligned"))
+ writeb(0, axi->qos_regs);
+ else
+ return -EINVAL;
+
+ return count;
+}
+static DEVICE_ATTR_RW(inject_error);
+
+static struct attribute *bt1_axi_sysfs_attrs[] = {
+ &dev_attr_count.attr,
+ &dev_attr_inject_error.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bt1_axi_sysfs);
+
+static void bt1_axi_remove_sysfs(void *data)
+{
+ struct bt1_axi *axi = data;
+
+ device_remove_groups(axi->dev, bt1_axi_sysfs_groups);
+}
+
+static int bt1_axi_init_sysfs(struct bt1_axi *axi)
+{
+ int ret;
+
+ ret = device_add_groups(axi->dev, bt1_axi_sysfs_groups);
+ if (ret) {
+ dev_err(axi->dev, "Failed to add sysfs files group\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(axi->dev, bt1_axi_remove_sysfs, axi);
+ if (ret)
+ dev_err(axi->dev, "Can't add AXI EHB sysfs remove action\n");
+
+ return ret;
+}
+
+static int bt1_axi_probe(struct platform_device *pdev)
+{
+ struct bt1_axi *axi;
+ int ret;
+
+ axi = bt1_axi_create_data(pdev);
+ if (IS_ERR(axi))
+ return PTR_ERR(axi);
+
+ ret = bt1_axi_request_regs(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_rst(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_clk(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_request_irq(axi);
+ if (ret)
+ return ret;
+
+ ret = bt1_axi_init_sysfs(axi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bt1_axi_of_match[] = {
+ { .compatible = "baikal,bt1-axi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bt1_axi_of_match);
+
+static struct platform_driver bt1_axi_driver = {
+ .probe = bt1_axi_probe,
+ .driver = {
+ .name = "bt1-axi",
+ .of_match_table = bt1_axi_of_match
+ }
+};
+module_platform_driver(bt1_axi_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 AXI-bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index c8b1c3842c1a..2a473c09bc33 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -27,7 +27,16 @@ static bool fsl_mc_device_match(struct fsl_mc_device *mc_dev,
{
return mc_dev->obj_desc.id == obj_desc->id &&
strcmp(mc_dev->obj_desc.type, obj_desc->type) == 0;
+}
+static bool fsl_mc_obj_desc_is_allocatable(struct fsl_mc_obj_desc *obj)
+{
+ if (strcmp(obj->type, "dpmcp") == 0 ||
+ strcmp(obj->type, "dpcon") == 0 ||
+ strcmp(obj->type, "dpbp") == 0)
+ return true;
+ else
+ return false;
}
static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data)
@@ -150,6 +159,27 @@ static void check_plugged_state_change(struct fsl_mc_device *mc_dev,
}
}
+static void fsl_mc_obj_device_add(struct fsl_mc_device *mc_bus_dev,
+ struct fsl_mc_obj_desc *obj_desc)
+{
+ int error;
+ struct fsl_mc_device *child_dev;
+
+ /*
+ * Check if device is already known to Linux:
+ */
+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
+ if (child_dev) {
+ check_plugged_state_change(child_dev, obj_desc);
+ put_device(&child_dev->dev);
+ } else {
+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
+ &child_dev);
+ if (error < 0)
+ return;
+ }
+}
+
/**
* dprc_add_new_devices - Adds devices to the logical bus for a DPRC
*
@@ -166,30 +196,23 @@ static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev,
struct fsl_mc_obj_desc *obj_desc_array,
int num_child_objects_in_mc)
{
- int error;
int i;
+ /* probe the allocable objects first */
for (i = 0; i < num_child_objects_in_mc; i++) {
- struct fsl_mc_device *child_dev;
struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- if (strlen(obj_desc->type) == 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
+ }
- /*
- * Check if device is already known to Linux:
- */
- child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev);
- if (child_dev) {
- check_plugged_state_change(child_dev, obj_desc);
- put_device(&child_dev->dev);
- continue;
- }
+ for (i = 0; i < num_child_objects_in_mc; i++) {
+ struct fsl_mc_obj_desc *obj_desc = &obj_desc_array[i];
- error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev,
- &child_dev);
- if (error < 0)
- continue;
+ if (strlen(obj_desc->type) > 0 &&
+ !fsl_mc_obj_desc_is_allocatable(obj_desc))
+ fsl_mc_obj_device_add(mc_bus_dev, obj_desc);
}
}
@@ -592,6 +615,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
bool mc_io_created = false;
bool msi_domain_set = false;
u16 major_ver, minor_ver;
+ struct irq_domain *mc_msi_domain;
if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL;
@@ -621,31 +645,15 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
return error;
mc_io_created = true;
+ }
- /*
- * Inherit parent MSI domain:
- */
- dev_set_msi_domain(&mc_dev->dev,
- dev_get_msi_domain(parent_dev));
- msi_domain_set = true;
+ mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
+ if (!mc_msi_domain) {
+ dev_warn(&mc_dev->dev,
+ "WARNING: MC bus without interrupt support\n");
} else {
- /*
- * This is a root DPRC
- */
- struct irq_domain *mc_msi_domain;
-
- if (dev_is_fsl_mc(parent_dev))
- return -EINVAL;
-
- error = fsl_mc_find_msi_domain(parent_dev,
- &mc_msi_domain);
- if (error < 0) {
- dev_warn(&mc_dev->dev,
- "WARNING: MC bus without interrupt support\n");
- } else {
- dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
- msi_domain_set = true;
- }
+ dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
+ msi_domain_set = true;
}
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 40526da5c6a6..b69794e7364d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -18,6 +18,8 @@
#include <linux/bitops.h>
#include <linux/msi.h>
#include <linux/dma-mapping.h>
+#include <linux/acpi.h>
+#include <linux/iommu.h>
#include "fsl-mc-private.h"
@@ -38,6 +40,7 @@ struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges;
+ void *fsl_mc_regs;
};
/**
@@ -56,6 +59,10 @@ struct fsl_mc_addr_translation_range {
phys_addr_t start_phys_addr;
};
+#define FSL_MC_FAPR 0x28
+#define MC_FAPR_PL BIT(18)
+#define MC_FAPR_BMT BIT(17)
+
/**
* fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against
@@ -118,11 +125,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int fsl_mc_dma_configure(struct device *dev)
{
struct device *dma_dev = dev;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ u32 input_id = mc_dev->icid;
while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent;
- return of_dma_configure(dev, dma_dev->of_node, 0);
+ if (dev_of_node(dma_dev))
+ return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
+
+ return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -201,6 +213,31 @@ struct device_type fsl_mc_bus_dpseci_type = {
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_dpseci_type);
+struct device_type fsl_mc_bus_dpdmux_type = {
+ .name = "fsl_mc_bus_dpdmux"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmux_type);
+
+struct device_type fsl_mc_bus_dpdcei_type = {
+ .name = "fsl_mc_bus_dpdcei"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdcei_type);
+
+struct device_type fsl_mc_bus_dpaiop_type = {
+ .name = "fsl_mc_bus_dpaiop"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpaiop_type);
+
+struct device_type fsl_mc_bus_dpci_type = {
+ .name = "fsl_mc_bus_dpci"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpci_type);
+
+struct device_type fsl_mc_bus_dpdmai_type = {
+ .name = "fsl_mc_bus_dpdmai"
+};
+EXPORT_SYMBOL_GPL(fsl_mc_bus_dpdmai_type);
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -217,6 +254,11 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
{ &fsl_mc_bus_dpseci_type, "dpseci" },
+ { &fsl_mc_bus_dpdmux_type, "dpdmux" },
+ { &fsl_mc_bus_dpdcei_type, "dpdcei" },
+ { &fsl_mc_bus_dpaiop_type, "dpaiop" },
+ { &fsl_mc_bus_dpci_type, "dpci" },
+ { &fsl_mc_bus_dpdmai_type, "dpdmai" },
{ NULL, NULL }
};
int i;
@@ -368,8 +410,8 @@ EXPORT_SYMBOL_GPL(fsl_mc_get_version);
/**
* fsl_mc_get_root_dprc - function to traverse to the root dprc
*/
-static void fsl_mc_get_root_dprc(struct device *dev,
- struct device **root_dprc_dev)
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev)
{
if (!dev) {
*root_dprc_dev = NULL;
@@ -863,8 +905,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct fsl_mc_io *mc_io = NULL;
int container_id;
phys_addr_t mc_portal_phys_addr;
- u32 mc_portal_size;
- struct resource res;
+ u32 mc_portal_size, mc_stream_id;
+ struct resource *plat_res;
+
+ if (!iommu_present(&fsl_mc_bus_type))
+ return -EPROBE_DEFER;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
@@ -872,19 +917,33 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc);
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
+ if (IS_ERR(mc->fsl_mc_regs))
+ return PTR_ERR(mc->fsl_mc_regs);
+
+ if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
+ mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
+ /*
+ * HW ORs the PL and BMT bit, places the result in bit 15 of
+ * the StreamID and ORs in the ICID. Calculate it accordingly.
+ */
+ mc_stream_id = (mc_stream_id & 0xffff) |
+ ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
+ 0x4000 : 0);
+ error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
+ &mc_stream_id);
+ if (error)
+ dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
+ error);
+ }
+
/*
* Get physical address of MC portal for the root DPRC:
*/
- error = of_address_to_resource(pdev->dev.of_node, 0, &res);
- if (error < 0) {
- dev_err(&pdev->dev,
- "of_address_to_resource() failed for %pOF\n",
- pdev->dev.of_node);
- return error;
- }
-
- mc_portal_phys_addr = res.start;
- mc_portal_size = resource_size(&res);
+ plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mc_portal_phys_addr = plat_res->start;
+ mc_portal_size = resource_size(plat_res);
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
mc_portal_size, NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
@@ -901,11 +960,13 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- error = get_mc_addr_translation_ranges(&pdev->dev,
- &mc->translation_ranges,
- &mc->num_translation_ranges);
- if (error < 0)
- goto error_cleanup_mc_io;
+ if (dev_of_node(&pdev->dev)) {
+ error = get_mc_addr_translation_ranges(&pdev->dev,
+ &mc->translation_ranges,
+ &mc->num_translation_ranges);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+ }
error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) {
@@ -932,6 +993,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
mc->root_mc_bus_dev = mc_bus_dev;
+ mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
return 0;
error_cleanup_mc_io:
@@ -965,11 +1027,18 @@ static const struct of_device_id fsl_mc_bus_match_table[] = {
MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
+static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
+ {"NXP0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
+
static struct platform_driver fsl_mc_bus_driver = {
.driver = {
.name = "fsl_mc_bus",
.pm = NULL,
.of_match_table = fsl_mc_bus_match_table,
+ .acpi_match_table = fsl_mc_bus_acpi_match_table,
},
.probe = fsl_mc_bus_probe,
.remove = fsl_mc_bus_remove,
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index 8b9c66d7c4ff..8edadf05cbb7 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
+#include <linux/acpi_iort.h>
#include "fsl-mc-private.h"
@@ -177,23 +178,36 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain)
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
{
+ struct device *root_dprc_dev;
+ struct device *bus_dev;
struct irq_domain *msi_domain;
- struct device_node *mc_of_node = mc_platform_dev->of_node;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node,
- DOMAIN_BUS_FSL_MC_MSI);
- if (!msi_domain) {
- pr_err("Unable to find fsl-mc MSI domain for %pOF\n",
- mc_of_node);
+ fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+ bus_dev = root_dprc_dev->parent;
+
+ if (bus_dev->of_node) {
+ msi_domain = of_msi_map_get_device_domain(dev,
+ mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
- return -ENOENT;
+ /*
+ * if the msi-map property is missing assume that all the
+ * child containers inherit the domain from the parent
+ */
+ if (!msi_domain)
+
+ msi_domain = of_msi_get_domain(bus_dev,
+ bus_dev->of_node,
+ DOMAIN_BUS_FSL_MC_MSI);
+ } else {
+ msi_domain = iort_get_device_domain(dev, mc_dev->icid,
+ DOMAIN_BUS_FSL_MC_MSI);
}
- *mc_msi_domain = msi_domain;
- return 0;
+ return msi_domain;
}
static void fsl_mc_msi_free_descs(struct device *dev)
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 21ca8c756ee7..7a46a12eb747 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -595,8 +595,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
void fsl_mc_msi_domain_free_irqs(struct device *dev);
-int fsl_mc_find_msi_domain(struct device *mc_platform_dev,
- struct irq_domain **mc_msi_domain);
+struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
unsigned int irq_count);
@@ -613,6 +612,9 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev);
+void fsl_mc_get_root_dprc(struct device *dev,
+ struct device **root_dprc_dev);
+
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 6ae48ad80409..a30b53f1d87d 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -82,7 +82,7 @@ int __must_check fsl_create_mc_io(struct device *dev,
mc_io->portal_phys_addr = mc_portal_phys_addr;
mc_io->portal_size = mc_portal_size;
if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_init(&mc_io->spinlock);
+ raw_spin_lock_init(&mc_io->spinlock);
else
mutex_init(&mc_io->mutex);
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index 3221a7fbaf0a..85a0225db522 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -251,7 +251,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_lock_irqsave(&mc_io->spinlock, irq_flags);
+ raw_spin_lock_irqsave(&mc_io->spinlock, irq_flags);
else
mutex_lock(&mc_io->mutex);
@@ -287,7 +287,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
error = 0;
common_exit:
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
- spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
+ raw_spin_unlock_irqrestore(&mc_io->spinlock, irq_flags);
else
mutex_unlock(&mc_io->mutex);
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index ebad5eb48e5a..0b38014d040e 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -43,10 +43,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
-
- if (unlikely(!sequence_id))
- sequence_id = 1;
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK);
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
@@ -121,7 +118,8 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
ee = mhi_get_exec_env(mhi_cntrl);
}
- dev_dbg(dev, "Waiting for image download completion, current EE: %s\n",
+ dev_dbg(dev,
+ "Waiting for RDDM image download via BHIe, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
@@ -152,11 +150,14 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 rx_status;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
+ dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
+
/* Wait for the image download to complete */
wait_event_timeout(mhi_cntrl->state_event,
mhi_read_reg_field(mhi_cntrl, base,
@@ -174,8 +175,10 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status, sequence_id;
+ int ret;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
@@ -183,6 +186,9 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
return -EIO;
}
+ sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n",
+ sequence_id);
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
@@ -191,26 +197,25 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
- sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
/* Wait for the image download to complete */
- wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl, base,
- BHIE_TXVECSTATUS_OFFS,
- BHIE_TXVECSTATUS_STATUS_BMSK,
- BHIE_TXVECSTATUS_STATUS_SHFT,
- &tx_status) || tx_status,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl, base,
+ BHIE_TXVECSTATUS_OFFS,
+ BHIE_TXVECSTATUS_STATUS_BMSK,
+ BHIE_TXVECSTATUS_STATUS_SHFT,
+ &tx_status) || tx_status,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
+ tx_status != BHIE_TXVECSTATUS_STATUS_XFER_COMPL)
return -EIO;
- return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+ return (!ret) ? -ETIMEDOUT : 0;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
@@ -239,14 +244,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
goto invalid_pm_state;
}
- dev_dbg(dev, "Starting SBL download via BHI\n");
+ session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK);
+ dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n",
+ session_id);
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
- session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, session_id);
read_unlock_bh(pm_lock);
@@ -377,30 +383,18 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
}
}
-void mhi_fw_load_worker(struct work_struct *work)
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl;
const struct firmware *firmware = NULL;
struct image_info *image_info;
- struct device *dev;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
const char *fw_name;
void *buf;
dma_addr_t dma_addr;
size_t size;
int ret;
- mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
- dev = &mhi_cntrl->mhi_dev->dev;
-
- dev_dbg(dev, "Waiting for device to enter PBL from: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee));
-
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_IN_PBL(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
-
- if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
return;
}
@@ -446,7 +440,12 @@ void mhi_fw_load_worker(struct work_struct *work)
release_firmware(firmware);
/* Error or in EDL mode, we're done */
- if (ret || mhi_cntrl->ee == MHI_EE_EDL)
+ if (ret) {
+ dev_err(dev, "MHI did not load SBL, ret:%d\n", ret);
+ return;
+ }
+
+ if (mhi_cntrl->ee == MHI_EE_EDL)
return;
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -474,8 +473,10 @@ fw_load_ee_pthru:
if (!mhi_cntrl->fbc_download)
return;
- if (ret)
+ if (ret) {
+ dev_err(dev, "MHI did not enter READY state\n");
goto error_read;
+ }
/* Wait for the SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
@@ -493,6 +494,8 @@ fw_load_ee_pthru:
ret = mhi_fw_load_amss(mhi_cntrl,
/* Vector table is the last entry */
&image_info->mhi_buf[image_info->entries - 1]);
+ if (ret)
+ dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret);
release_firmware(firmware);
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 1f8c82603179..e43a190a7a36 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -34,6 +34,8 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_READY] = "READY",
[DEV_ST_TRANSITION_SBL] = "SBL",
[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
+ [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
+ [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
const char * const mhi_state_str[MHI_STATE_MAX] = {
@@ -835,8 +837,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
spin_lock_init(&mhi_cntrl->transition_lock);
spin_lock_init(&mhi_cntrl->wlock);
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
- INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
- INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -864,6 +864,10 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mutex_init(&mhi_chan->mutex);
init_completion(&mhi_chan->completion);
rwlock_init(&mhi_chan->lock);
+
+ /* used in setting bei field of TRE */
+ mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
+ mhi_chan->intmod = mhi_event->intmod;
}
if (mhi_cntrl->bounce_buf) {
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 095d95bc0e37..b1f640b75a94 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -386,6 +386,8 @@ enum dev_st_transition {
DEV_ST_TRANSITION_READY,
DEV_ST_TRANSITION_SBL,
DEV_ST_TRANSITION_MISSION_MODE,
+ DEV_ST_TRANSITION_SYS_ERR,
+ DEV_ST_TRANSITION_DISABLE,
DEV_ST_TRANSITION_MAX,
};
@@ -452,6 +454,7 @@ enum mhi_pm_state {
#define PRIMARY_CMD_RING 0
#define MHI_DEV_WAKE_DB 127
#define MHI_MAX_MTU 0xffff
+#define MHI_RANDOM_U32_NONZERO(bmsk) (prandom_u32_max(bmsk) + 1)
enum mhi_er_type {
MHI_ER_TYPE_INVALID = 0x0,
@@ -586,7 +589,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
enum dev_st_transition state);
void mhi_pm_st_worker(struct work_struct *work);
-void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl);
void mhi_fw_load_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
@@ -627,6 +630,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl);
void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
+void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
@@ -670,8 +674,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev);
irqreturn_t mhi_intvec_handler(int irq_number, void *dev);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags);
-
+ struct mhi_buf_info *info, enum mhi_flags flags);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info);
int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 97e06cc586e4..1f622ce6be8b 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -258,7 +258,7 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
-static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
+void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
@@ -270,6 +270,7 @@ static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
if (mhi_drv->status_cb)
mhi_drv->status_cb(mhi_dev, cb_reason);
}
+EXPORT_SYMBOL_GPL(mhi_notify);
/* Bind MHI channels to MHI devices */
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
@@ -368,30 +369,37 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
return IRQ_HANDLED;
}
-irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
+irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
{
- struct mhi_controller *mhi_cntrl = dev;
+ struct mhi_controller *mhi_cntrl = priv;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state state = MHI_STATE_MAX;
enum mhi_pm_state pm_state = 0;
enum mhi_ee_type ee = 0;
write_lock_irq(&mhi_cntrl->pm_lock);
- if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
- state = mhi_get_mhi_state(mhi_cntrl);
- ee = mhi_cntrl->ee;
- mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ goto exit_intvec;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ ee = mhi_cntrl->ee;
+ mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
+ dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
+ TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
+ TO_MHI_STATE_STR(state));
+
if (state == MHI_STATE_SYS_ERR) {
- dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
+ dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- /* If device in RDDM don't bother processing SYS error */
- if (mhi_cntrl->ee == MHI_EE_RDDM) {
- if (mhi_cntrl->ee != ee) {
+ /* If device supports RDDM don't bother processing SYS error */
+ if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
wake_up_all(&mhi_cntrl->state_event);
}
@@ -405,7 +413,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
if (MHI_IN_PBL(ee))
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
else
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
exit_intvec:
@@ -513,7 +521,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd =
+ min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
@@ -597,7 +608,9 @@ static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
-EOVERFLOW : 0;
- result.bytes_xferd = xfer_len;
+
+ /* truncate to buf len if xfer_len is larger */
+ result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
@@ -722,13 +735,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
{
enum mhi_pm_state new_state;
+ /* skip SYS_ERROR handling if RDDM supported */
+ if (mhi_cntrl->ee == MHI_EE_RDDM ||
+ mhi_cntrl->rddm_image)
+ break;
+
dev_dbg(dev, "System error detected\n");
write_lock_irq(&mhi_cntrl->pm_lock);
new_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (new_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
break;
}
default:
@@ -774,9 +792,18 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
}
case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
break;
default:
dev_err(dev, "Unhandled event type: %d\n", type);
@@ -819,14 +846,23 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
- mhi_chan = &mhi_cntrl->mhi_chan[chan];
-
- if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
- parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
- } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
- parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
+
+ WARN_ON(chan >= mhi_cntrl->max_chan);
+
+ /*
+ * Only process the event ring elements whose channel
+ * ID is within the maximum supported range.
+ */
+ if (chan < mhi_cntrl->max_chan) {
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+
+ if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
+ parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
+ parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
+ event_quota--;
+ }
}
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
@@ -896,7 +932,7 @@ void mhi_ctrl_ev_task(unsigned long data)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
if (pm_state == MHI_PM_SYS_ERR_DETECT)
- schedule_work(&mhi_cntrl->syserr_worker);
+ mhi_pm_sys_err_handler(mhi_cntrl);
}
}
@@ -918,9 +954,7 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
@@ -945,27 +979,15 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
+ buf_info.v_addr = skb->data;
+ buf_info.cb_buf = skb;
+ buf_info.len = len;
- buf_info->v_addr = skb->data;
- buf_info->cb_buf = skb;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- goto map_error;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
-
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -979,11 +1001,6 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0;
-
-map_error:
- read_unlock_bh(&mhi_cntrl->pm_lock);
-
- return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
@@ -995,9 +1012,8 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
- struct mhi_buf_info *buf_info;
- struct mhi_tre *mhi_tre;
+ struct mhi_buf_info buf_info = { };
+ int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
@@ -1024,25 +1040,16 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
- /* Generate the TRE */
- buf_info = buf_ring->wp;
- WARN_ON(buf_info->used);
- buf_info->p_addr = mhi_buf->dma_addr;
- buf_info->pre_mapped = true;
- buf_info->cb_buf = mhi_buf;
- buf_info->wp = tre_ring->wp;
- buf_info->dir = mhi_chan->dir;
- buf_info->len = len;
-
- mhi_tre = tre_ring->wp;
-
- mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
- mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
+ buf_info.p_addr = mhi_buf->dma_addr;
+ buf_info.cb_buf = mhi_buf;
+ buf_info.pre_mapped = true;
+ buf_info.len = len;
- /* increment WP */
- mhi_add_ring_element(mhi_cntrl, tre_ring);
- mhi_add_ring_element(mhi_cntrl, buf_ring);
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
+ if (unlikely(ret)) {
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ return ret;
+ }
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
@@ -1060,7 +1067,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
EXPORT_SYMBOL_GPL(mhi_queue_dma);
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
- void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
+ struct mhi_buf_info *info, enum mhi_flags flags)
{
struct mhi_ring *buf_ring, *tre_ring;
struct mhi_tre *mhi_tre;
@@ -1072,15 +1079,22 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
tre_ring = &mhi_chan->tre_ring;
buf_info = buf_ring->wp;
- buf_info->v_addr = buf;
- buf_info->cb_buf = cb;
+ WARN_ON(buf_info->used);
+ buf_info->pre_mapped = info->pre_mapped;
+ if (info->pre_mapped)
+ buf_info->p_addr = info->p_addr;
+ else
+ buf_info->v_addr = info->v_addr;
+ buf_info->cb_buf = info->cb_buf;
buf_info->wp = tre_ring->wp;
buf_info->dir = mhi_chan->dir;
- buf_info->len = buf_len;
+ buf_info->len = info->len;
- ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
- if (ret)
- return ret;
+ if (!info->pre_mapped) {
+ ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
+ if (ret)
+ return ret;
+ }
eob = !!(flags & MHI_EOB);
eot = !!(flags & MHI_EOT);
@@ -1089,7 +1103,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre = tre_ring->wp;
mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
- mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
+ mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
/* increment WP */
@@ -1106,6 +1120,7 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
+ struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
@@ -1121,7 +1136,11 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
+ buf_info.v_addr = buf;
+ buf_info.cb_buf = buf;
+ buf_info.len = len;
+
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;
@@ -1322,7 +1341,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
-
+ struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1330,8 +1349,10 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
}
/* Prepare transfer descriptors */
- ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
- len, MHI_EOT);
+ info.v_addr = buf;
+ info.cb_buf = buf;
+ info.len = len;
+ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
if (ret) {
kfree(buf);
goto error_pre_alloc;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index dc83d65f7784..796098078083 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -288,14 +288,18 @@ int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
- write_lock_irq(&mhi_chan->lock);
- if (mhi_chan->db_cfg.reset_req)
+ if (mhi_chan->db_cfg.reset_req) {
+ write_lock_irq(&mhi_chan->lock);
mhi_chan->db_cfg.db_mode = true;
+ write_unlock_irq(&mhi_chan->lock);
+ }
+
+ read_lock_irq(&mhi_chan->lock);
/* Only ring DB if ring is not empty */
if (tre_ring->base && tre_ring->wp != tre_ring->rp)
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
- write_unlock_irq(&mhi_chan->lock);
+ read_unlock_irq(&mhi_chan->lock);
}
mhi_cntrl->wake_put(mhi_cntrl, false);
@@ -449,19 +453,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
to_mhi_pm_state_str(transition_state));
/* We must notify MHI control driver so it can clean up first */
- if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
- /*
- * If controller supports RDDM, we do not process
- * SYS error state, instead we will jump directly
- * to RDDM state
- */
- if (mhi_cntrl->rddm_image) {
- dev_dbg(dev,
- "Controller supports RDDM, so skip SYS_ERR\n");
- return;
- }
+ if (transition_state == MHI_PM_SYS_ERR_PROCESS)
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
- }
mutex_lock(&mhi_cntrl->pm_mutex);
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -527,8 +520,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mutex_unlock(&mhi_cntrl->pm_mutex);
dev_dbg(dev, "Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
- flush_work(&mhi_cntrl->st_worker);
- flush_work(&mhi_cntrl->fw_worker);
dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device);
@@ -608,13 +599,17 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
}
/* SYS_ERR worker */
-void mhi_pm_sys_err_worker(struct work_struct *work)
+void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
{
- struct mhi_controller *mhi_cntrl = container_of(work,
- struct mhi_controller,
- syserr_worker);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ /* skip if controller supports RDDM */
+ if (mhi_cntrl->rddm_image) {
+ dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
+ return;
+ }
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
}
/* Device State Transition worker */
@@ -643,7 +638,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
write_unlock_irq(&mhi_cntrl->pm_lock);
if (MHI_IN_PBL(mhi_cntrl->ee))
- wake_up_all(&mhi_cntrl->state_event);
+ mhi_fw_load_handler(mhi_cntrl);
break;
case DEV_ST_TRANSITION_SBL:
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -662,6 +657,14 @@ void mhi_pm_st_worker(struct work_struct *work)
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
break;
+ case DEV_ST_TRANSITION_SYS_ERR:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
+ break;
+ case DEV_ST_TRANSITION_DISABLE:
+ mhi_pm_disable_transition
+ (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ break;
default:
break;
}
@@ -669,6 +672,149 @@ void mhi_pm_st_worker(struct work_struct *work)
}
}
+int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state new_state;
+ int ret;
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return -EINVAL;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Return busy if there are any pending resources */
+ if (atomic_read(&mhi_cntrl->dev_wake))
+ return -EBUSY;
+
+ /* Take MHI out of M2 state */
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_get(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ mhi_cntrl->dev_state == MHI_STATE_M1 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Could not enter M0/M1 state");
+ return -EIO;
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+
+ if (atomic_read(&mhi_cntrl->dev_wake)) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ return -EBUSY;
+ }
+
+ dev_info(dev, "Allowing M3 transition\n");
+ new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
+ if (new_state != MHI_PM_M3_ENTER) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_err(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_ENTER),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M3 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev, "Wait for M3 completion\n");
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M3 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M3 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Notify clients about entering LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
+ mutex_unlock(&itr->mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_suspend);
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_chan *itr, *tmp;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_pm_state cur_state;
+ int ret;
+
+ dev_info(dev, "Entered with PM state: %s, MHI state: %s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state));
+
+ if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
+ return 0;
+
+ if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
+ return -EIO;
+
+ /* Notify clients about exiting LPM */
+ list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
+ mutex_lock(&itr->mutex);
+ if (itr->mhi_dev)
+ mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
+ mutex_unlock(&itr->mutex);
+ }
+
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
+ if (cur_state != MHI_PM_M3_EXIT) {
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ dev_info(dev,
+ "Error setting to PM state: %s from: %s\n",
+ to_mhi_pm_state_str(MHI_PM_M3_EXIT),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ /* Set MHI to M0 and wait for completion */
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ mhi_cntrl->dev_state == MHI_STATE_M0 ||
+ MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+
+ if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
+ dev_err(dev,
+ "Did not enter M0 state, MHI state: %s, PM state: %s\n",
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
@@ -760,6 +906,7 @@ static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
{
+ enum mhi_state state;
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -829,13 +976,36 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
goto error_bhi_offset;
}
+ state = mhi_get_mhi_state(mhi_cntrl);
+ if (state == MHI_STATE_SYS_ERR) {
+ mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
+ ret = wait_event_timeout(mhi_cntrl->state_event,
+ MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
+ mhi_read_reg_field(mhi_cntrl,
+ mhi_cntrl->regs,
+ MHICTRL,
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT,
+ &val) ||
+ !val,
+ msecs_to_jiffies(mhi_cntrl->timeout_ms));
+ if (ret) {
+ ret = -EIO;
+ dev_info(dev, "Failed to reset MHI due to syserr state\n");
+ goto error_bhi_offset;
+ }
+
+ /*
+ * device cleares INTVEC as part of RESET processing,
+ * re-program it
+ */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ }
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
- if (next_state == DEV_ST_TRANSITION_PBL)
- schedule_work(&mhi_cntrl->fw_worker);
-
mhi_queue_state_transition(mhi_cntrl, next_state);
mutex_unlock(&mhi_cntrl->pm_mutex);
@@ -876,7 +1046,12 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
}
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+
+ mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
+
+ /* Wait for shutdown to complete */
+ flush_work(&mhi_cntrl->st_worker);
+
mhi_deinit_free_irq(mhi_cntrl);
if (!mhi_cntrl->pre_init) {
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 1b14256376d2..9f7ed1fcd428 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -13,6 +13,8 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -337,9 +339,22 @@ static phys_addr_t mips_cdmm_cur_base(void)
* Picking a suitable physical address at which to map the CDMM region is
* platform specific, so this weak function can be overridden by platform
* code to pick a suitable value if none is configured by the bootloader.
+ * By default this method tries to find a CDMM-specific node in the system
+ * dtb. Note that this won't work for early serial console.
*/
phys_addr_t __weak mips_cdmm_phys_base(void)
{
+ struct device_node *np;
+ struct resource res;
+ int err;
+
+ np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm");
+ if (np) {
+ err = of_address_to_resource(np, 0, &res);
+ if (!err)
+ return res.start;
+ }
+
return 0;
}
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e5f5f48d69d2..efb088df1276 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -221,6 +221,34 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ u32 sysc_mask, syss_done, rstval;
+ int syss_offset, error = 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (syss_offset >= 0) {
+ error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+ rstval, (rstval & ddata->cfg.syss_mask) ==
+ syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+ rstval, !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ }
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -925,18 +953,47 @@ static int sysc_enable_module(struct device *dev)
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
+ int error;
ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
- /* Set CLOCKACTIVITY, we only use it for ick */
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
if (regbits->clkact_shift >= 0 &&
- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
/* Set SIDLE mode */
@@ -991,6 +1048,9 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
@@ -1071,6 +1131,9 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
return 0;
}
@@ -1215,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
return pm_runtime_force_suspend(dev);
@@ -1227,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
return pm_runtime_force_resume(dev);
@@ -1275,13 +1340,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
SYSC_QUIRK_LEGACY_IDLE),
- SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
- 0),
- /* Some timers on omap4 and later */
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
- 0),
- SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
- 0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1337,6 +1395,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
+ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
@@ -1404,12 +1466,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff, 0),
+ SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000040, 0xffffffff, 0),
+ SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000011, 0xffffffff, 0),
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
- SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
#endif
@@ -1488,7 +1555,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
int manager_count;
- bool framedonetv_irq;
+ bool framedonetv_irq = true;
u32 val, irq_mask = 0;
switch (sysc_soc->soc) {
@@ -1505,6 +1572,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
break;
case SOC_AM4:
manager_count = 1;
+ framedonetv_irq = false;
break;
case SOC_UNKNOWN:
default:
@@ -1663,8 +1731,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
local_irq_save(flags);
/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
- error = readl_poll_timeout(ddata->module_va + 0x44, val,
- !(val & BIT(0)), 100, 50);
+ error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
if (error)
dev_warn(ddata->dev, "rtc busy timeout\n");
/* Now we have ~15 microseconds to read/write various registers */
@@ -1781,7 +1849,7 @@ static int sysc_clockdomain_init(struct sysc *ddata)
switch (ddata->nr_clocks) {
case 2:
ick = ddata->clocks[SYSC_ICK];
- /* fallthrough */
+ fallthrough;
case 1:
fck = ddata->clocks[SYSC_FCK];
break;
@@ -1822,11 +1890,10 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
- u32 sysc_mask, syss_done;
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
@@ -1835,11 +1902,6 @@ static int sysc_reset(struct sysc *ddata)
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
- syss_done = 0;
- else
- syss_done = ddata->cfg.syss_mask;
-
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
@@ -1856,18 +1918,9 @@ static int sysc_reset(struct sysc *ddata)
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
- /* Poll on reset status */
- if (syss_offset >= 0) {
- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
- (rstval & ddata->cfg.syss_mask) ==
- syss_done,
- 100, MAX_MODULE_SOFTRESET_WAIT);
-
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
- !(rstval & sysc_mask),
- 100, MAX_MODULE_SOFTRESET_WAIT);
- }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
@@ -2744,6 +2797,17 @@ static int sysc_init_soc(struct sysc *ddata)
if (match && match->data)
sysc_soc->soc = (int)match->data;
+ /* Ignore devices that are not available on HS and EMU SoCs */
+ if (!sysc_soc->general_purpose) {
+ switch (sysc_soc->soc) {
+ case SOC_3430 ... SOC_3630:
+ sysc_add_disabled(0x48304000); /* timer12 */
+ break;
+ default:
+ break;
+ };
+ }
+
match = soc_device_match(sysc_soc_feat_match);
if (!match)
return 0;
@@ -2803,6 +2867,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
return error;
}
+/*
+ * Ignore timers tagged with no-reset and no-idle. These are likely in use,
+ * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
+ * are needed, we could also look at the timer register configuration.
+ */
+static int sysc_check_active_timer(struct sysc *ddata)
+{
+ if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
+ ddata->cap->type != TI_SYSC_OMAP4_TIMER)
+ return 0;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+ return -EBUSY;
+
+ return 0;
+}
+
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@@ -2859,6 +2941,10 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
return error;
+ error = sysc_check_active_timer(ddata);
+ if (error)
+ return error;
+
error = sysc_get_clocks(ddata);
if (error)
return error;
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index ff70575b2db6..a58ac0c8e282 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -6,10 +6,61 @@
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
#include <linux/vexpress.h>
+#define SYS_MISC 0x0
+#define SYS_MISC_MASTERSITE (1 << 14)
+
+#define SYS_PROCID0 0x24
+#define SYS_PROCID1 0x28
+#define SYS_HBI_MASK 0xfff
+#define SYS_PROCIDx_HBI_SHIFT 0
+
+#define SYS_CFGDATA 0x40
+
+#define SYS_CFGCTRL 0x44
+#define SYS_CFGCTRL_START (1 << 31)
+#define SYS_CFGCTRL_WRITE (1 << 30)
+#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26)
+#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20)
+#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16)
+#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12)
+#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0)
+
+#define SYS_CFGSTAT 0x48
+#define SYS_CFGSTAT_ERR (1 << 1)
+#define SYS_CFGSTAT_COMPLETE (1 << 0)
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+struct vexpress_syscfg {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head funcs;
+};
+
+struct vexpress_syscfg_func {
+ struct list_head list;
+ struct vexpress_syscfg *syscfg;
+ struct regmap *regmap;
+ int num_templates;
+ u32 template[]; /* Keep it last! */
+};
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
struct vexpress_config_bridge {
struct vexpress_config_bridge_ops *ops;
@@ -18,26 +69,20 @@ struct vexpress_config_bridge {
static DEFINE_MUTEX(vexpress_config_mutex);
-static struct class *vexpress_config_class;
static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER;
-void vexpress_config_set_master(u32 site)
+static void vexpress_config_set_master(u32 site)
{
vexpress_config_site_master = site;
}
-u32 vexpress_config_get_master(void)
-{
- return vexpress_config_site_master;
-}
-
-void vexpress_config_lock(void *arg)
+static void vexpress_config_lock(void *arg)
{
mutex_lock(&vexpress_config_mutex);
}
-void vexpress_config_unlock(void *arg)
+static void vexpress_config_unlock(void *arg)
{
mutex_unlock(&vexpress_config_mutex);
}
@@ -59,7 +104,7 @@ static void vexpress_config_find_prop(struct device_node *node,
}
}
-int vexpress_config_get_topo(struct device_node *node, u32 *site,
+static int vexpress_config_get_topo(struct device_node *node, u32 *site,
u32 *position, u32 *dcc)
{
vexpress_config_find_prop(node, "arm,vexpress,site", site);
@@ -88,9 +133,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
struct regmap *regmap;
struct regmap **res;
- if (WARN_ON(dev->parent->class != vexpress_config_class))
- return ERR_PTR(-ENODEV);
-
bridge = dev_get_drvdata(dev->parent);
if (WARN_ON(!bridge))
return ERR_PTR(-EINVAL);
@@ -113,91 +155,265 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config);
-struct device *vexpress_config_bridge_register(struct device *parent,
- struct vexpress_config_bridge_ops *ops, void *context)
+static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
+ int index, bool write, u32 *data)
{
- struct device *dev;
- struct vexpress_config_bridge *bridge;
+ struct vexpress_syscfg *syscfg = func->syscfg;
+ u32 command, status;
+ int tries;
+ long timeout;
- if (!vexpress_config_class) {
- vexpress_config_class = class_create(THIS_MODULE,
- "vexpress-config");
- if (IS_ERR(vexpress_config_class))
- return (void *)vexpress_config_class;
+ if (WARN_ON(index >= func->num_templates))
+ return -EINVAL;
+
+ command = readl(syscfg->base + SYS_CFGCTRL);
+ if (WARN_ON(command & SYS_CFGCTRL_START))
+ return -EBUSY;
+
+ command = func->template[index];
+ command |= SYS_CFGCTRL_START;
+ command |= write ? SYS_CFGCTRL_WRITE : 0;
+
+ /* Use a canary for reads */
+ if (!write)
+ *data = 0xdeadbeef;
+
+ dev_dbg(syscfg->dev, "func %p, command %x, data %x\n",
+ func, command, *data);
+ writel(*data, syscfg->base + SYS_CFGDATA);
+ writel(0, syscfg->base + SYS_CFGSTAT);
+ writel(command, syscfg->base + SYS_CFGCTRL);
+ mb();
+
+ /* The operation can take ages... Go to sleep, 100us initially */
+ tries = 100;
+ timeout = 100;
+ do {
+ if (!irqs_disabled()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(timeout));
+ if (signal_pending(current))
+ return -EINTR;
+ } else {
+ udelay(timeout);
+ }
+
+ status = readl(syscfg->base + SYS_CFGSTAT);
+ if (status & SYS_CFGSTAT_ERR)
+ return -EFAULT;
+
+ if (timeout > 20)
+ timeout -= 20;
+ } while (--tries && !(status & SYS_CFGSTAT_COMPLETE));
+ if (WARN_ON_ONCE(!tries))
+ return -ETIMEDOUT;
+
+ if (!write) {
+ *data = readl(syscfg->base + SYS_CFGDATA);
+ dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data);
}
- dev = device_create(vexpress_config_class, parent, 0,
- NULL, "%s.bridge", dev_name(parent));
+ return 0;
+}
+
+static int vexpress_syscfg_read(void *context, unsigned int index,
+ unsigned int *val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, false, val);
+}
+
+static int vexpress_syscfg_write(void *context, unsigned int index,
+ unsigned int val)
+{
+ struct vexpress_syscfg_func *func = context;
+
+ return vexpress_syscfg_exec(func, index, true, &val);
+}
+
+static struct regmap_config vexpress_syscfg_regmap_config = {
+ .lock = vexpress_config_lock,
+ .unlock = vexpress_config_unlock,
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_read = vexpress_syscfg_read,
+ .reg_write = vexpress_syscfg_write,
+ .reg_format_endian = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
- if (IS_ERR(dev))
- return dev;
+static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
+ void *context)
+{
+ int err;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func;
+ struct property *prop;
+ const __be32 *val = NULL;
+ __be32 energy_quirk[4];
+ int num;
+ u32 site, position, dcc;
+ int i;
+
+ err = vexpress_config_get_topo(dev->of_node, &site,
+ &position, &dcc);
+ if (err)
+ return ERR_PTR(err);
+
+ prop = of_find_property(dev->of_node,
+ "arm,vexpress-sysreg,func", NULL);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
- bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- put_device(dev);
- device_unregister(dev);
+ num = prop->length / sizeof(u32) / 2;
+ val = prop->value;
+
+ /*
+ * "arm,vexpress-energy" function used to be described
+ * by its first device only, now it requires both
+ */
+ if (num == 1 && of_device_is_compatible(dev->of_node,
+ "arm,vexpress-energy")) {
+ num = 2;
+ energy_quirk[0] = *val;
+ energy_quirk[2] = *val++;
+ energy_quirk[1] = *val;
+ energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1);
+ val = energy_quirk;
+ }
+
+ func = kzalloc(struct_size(func, template, num), GFP_KERNEL);
+ if (!func)
return ERR_PTR(-ENOMEM);
+
+ func->syscfg = syscfg;
+ func->num_templates = num;
+
+ for (i = 0; i < num; i++) {
+ u32 function, device;
+
+ function = be32_to_cpup(val++);
+ device = be32_to_cpup(val++);
+
+ dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n",
+ func, site, position, dcc,
+ function, device);
+
+ func->template[i] = SYS_CFGCTRL_DCC(dcc);
+ func->template[i] |= SYS_CFGCTRL_SITE(site);
+ func->template[i] |= SYS_CFGCTRL_POSITION(position);
+ func->template[i] |= SYS_CFGCTRL_FUNC(function);
+ func->template[i] |= SYS_CFGCTRL_DEVICE(device);
}
- bridge->ops = ops;
- bridge->context = context;
- dev_set_drvdata(dev, bridge);
+ vexpress_syscfg_regmap_config.max_register = num - 1;
- dev_dbg(parent, "Registered bridge '%s', parent node %p\n",
- dev_name(dev), parent->of_node);
+ func->regmap = regmap_init(dev, NULL, func,
+ &vexpress_syscfg_regmap_config);
- return dev;
-}
+ if (IS_ERR(func->regmap)) {
+ void *err = func->regmap;
+ kfree(func);
+ return err;
+ }
+
+ list_add(&func->list, &syscfg->funcs);
-static int vexpress_config_node_match(struct device *dev, const void *data)
+ return func->regmap;
+}
+
+static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context)
{
- const struct device_node *node = data;
+ struct vexpress_syscfg *syscfg = context;
+ struct vexpress_syscfg_func *func, *tmp;
- dev_dbg(dev, "Parent node %p, looking for %p\n",
- dev->parent->of_node, node);
+ regmap_exit(regmap);
- return dev->parent->of_node == node;
+ list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) {
+ if (func->regmap == regmap) {
+ list_del(&syscfg->funcs);
+ kfree(func);
+ break;
+ }
+ }
}
-static int vexpress_config_populate(struct device_node *node)
+static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = {
+ .regmap_init = vexpress_syscfg_regmap_init,
+ .regmap_exit = vexpress_syscfg_regmap_exit,
+};
+
+
+static int vexpress_syscfg_probe(struct platform_device *pdev)
{
- struct device_node *bridge;
- struct device *parent;
- int ret;
+ struct vexpress_syscfg *syscfg;
+ struct resource *res;
+ struct vexpress_config_bridge *bridge;
+ struct device_node *node;
+ int master;
+ u32 dt_hbi;
+
+ syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL);
+ if (!syscfg)
+ return -ENOMEM;
+ syscfg->dev = &pdev->dev;
+ INIT_LIST_HEAD(&syscfg->funcs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ syscfg->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(syscfg->base))
+ return PTR_ERR(syscfg->base);
- bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ bridge = devm_kmalloc(&pdev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -EINVAL;
+ return -ENOMEM;
- parent = class_find_device(vexpress_config_class, NULL, bridge,
- vexpress_config_node_match);
- of_node_put(bridge);
- if (WARN_ON(!parent))
- return -ENODEV;
+ bridge->ops = &vexpress_syscfg_bridge_ops;
+ bridge->context = syscfg;
- ret = of_platform_populate(node, NULL, NULL, parent);
+ dev_set_drvdata(&pdev->dev, bridge);
- put_device(parent);
+ master = readl(syscfg->base + SYS_MISC) & SYS_MISC_MASTERSITE ?
+ VEXPRESS_SITE_DB2 : VEXPRESS_SITE_DB1;
+ vexpress_config_set_master(master);
- return ret;
-}
+ /* Confirm board type against DT property, if available */
+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) {
+ u32 id = readl(syscfg->base + (master == VEXPRESS_SITE_DB1 ?
+ SYS_PROCID0 : SYS_PROCID1));
+ u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK;
-static int __init vexpress_config_init(void)
-{
- int err = 0;
- struct device_node *node;
+ if (WARN_ON(dt_hbi != hbi))
+ dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n",
+ dt_hbi, hbi);
+ }
- /* Need the config devices early, before the "normal" devices... */
for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
- err = vexpress_config_populate(node);
- if (err) {
- of_node_put(node);
- break;
- }
+ struct device_node *bridge_np;
+
+ bridge_np = of_parse_phandle(node, "arm,vexpress,config-bridge", 0);
+ if (bridge_np != pdev->dev.parent->of_node)
+ continue;
+
+ of_platform_populate(node, NULL, NULL, &pdev->dev);
}
- return err;
+ return 0;
}
-postcore_initcall(vexpress_config_init);
+static const struct platform_device_id vexpress_syscfg_id_table[] = {
+ { "vexpress-syscfg", },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, vexpress_syscfg_id_table);
+
+static struct platform_driver vexpress_syscfg_driver = {
+ .driver.name = "vexpress-syscfg",
+ .id_table = vexpress_syscfg_id_table,
+ .probe = vexpress_syscfg_probe,
+};
+module_platform_driver(vexpress_syscfg_driver);
+MODULE_LICENSE("GPL v2");