summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/hisi_lpc.c5
-rw-r--r--drivers/bus/imx-weim.c70
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/optee-rng.c306
-rw-r--r--drivers/clk/tegra/Kconfig5
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-dfll.c459
-rw-r--r--drivers/clk/tegra/clk-dfll.h6
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c520
-rw-r--r--drivers/clk/tegra/cvb.c12
-rw-r--r--drivers/clk/tegra/cvb.h7
-rw-r--r--drivers/clocksource/Kconfig22
-rw-r--r--drivers/clocksource/Makefile7
-rw-r--r--drivers/clocksource/arm_arch_timer.c55
-rw-r--r--drivers/clocksource/exynos_mct.c48
-rw-r--r--drivers/clocksource/timer-cs5535.c (renamed from drivers/clocksource/cs5535-clockevt.c)0
-rw-r--r--drivers/clocksource/timer-milbeaut.c161
-rw-r--r--drivers/clocksource/timer-pxa.c (renamed from drivers/clocksource/pxa_timer.c)0
-rw-r--r--drivers/clocksource/timer-riscv.c23
-rw-r--r--drivers/clocksource/timer-sun5i.c10
-rw-r--r--drivers/clocksource/timer-tango-xtal.c (renamed from drivers/clocksource/tango_xtal.c)0
-rw-r--r--drivers/clocksource/timer-tegra20.c370
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c44
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/apple-properties.c13
-rw-r--r--drivers/firmware/efi/arm-init.c6
-rw-r--r--drivers/firmware/efi/arm-runtime.c6
-rw-r--r--drivers/firmware/efi/capsule-loader.c4
-rw-r--r--drivers/firmware/efi/capsule.c4
-rw-r--r--drivers/firmware/efi/cper-arm.c14
-rw-r--r--drivers/firmware/efi/cper.c14
-rw-r--r--drivers/firmware/efi/dev-path-parser.c9
-rw-r--r--drivers/firmware/efi/earlycon.c206
-rw-r--r--drivers/firmware/efi/efi-bgrt.c5
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efibc.c10
-rw-r--r--drivers/firmware/efi/efivars.c58
-rw-r--r--drivers/firmware/efi/esrt.c1
-rw-r--r--drivers/firmware/efi/fake_mem.c16
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c5
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c15
-rw-r--r--drivers/firmware/efi/libstub/efistub.h12
-rw-r--r--drivers/firmware/efi/libstub/fdt.c115
-rw-r--r--drivers/firmware/efi/libstub/gop.c4
-rw-r--r--drivers/firmware/efi/libstub/random.c6
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/firmware/efi/libstub/tpm.c4
-rw-r--r--drivers/firmware/efi/memattr.c7
-rw-r--r--drivers/firmware/efi/runtime-map.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c48
-rw-r--r--drivers/firmware/efi/test/efi_test.c1
-rw-r--r--drivers/firmware/efi/test/efi_test.h2
-rw-r--r--drivers/firmware/efi/tpm.c5
-rw-r--r--drivers/firmware/efi/vars.c15
-rw-r--r--drivers/firmware/imx/misc.c38
-rw-r--r--drivers/firmware/imx/scu-pd.c1
-rw-r--r--drivers/firmware/raspberrypi.c11
-rw-r--r--drivers/firmware/tegra/Makefile3
-rw-r--r--drivers/firmware/tegra/bpmp-private.h34
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c305
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c243
-rw-r--r--drivers/firmware/tegra/bpmp.c376
-rw-r--r--drivers/firmware/ti_sci.c21
-rw-r--r--drivers/firmware/xilinx/Kconfig1
-rw-r--r--drivers/firmware/xilinx/zynqmp.c166
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c119
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h1
-rw-r--r--drivers/hwtracing/coresight/coresight.c60
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c4
-rw-r--r--drivers/irqchip/Kconfig19
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c163
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c260
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c28
-rw-r--r--drivers/irqchip/irq-i8259.c9
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c115
-rw-r--r--drivers/irqchip/irq-ls1x.c192
-rw-r--r--drivers/irqchip/irq-sifive-plic.c116
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/bcm2835-pm.c92
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig6
-rw-r--r--drivers/net/ethernet/ti/cpsw.h6
-rw-r--r--drivers/nvme/host/pci.c117
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c86
-rw-r--r--drivers/opp/core.c18
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/opp/opp.h2
-rw-r--r--drivers/pci/msi.c39
-rw-r--r--drivers/perf/arm-cci.c10
-rw-r--r--drivers/perf/arm-ccn.c6
-rw-r--r--drivers/perf/arm_dsu_pmu.c9
-rw-r--r--drivers/perf/arm_pmu.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c1
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c9
-rw-r--r--drivers/perf/qcom_l3_pmu.c8
-rw-r--r--drivers/perf/thunderx2_pmu.c10
-rw-r--r--drivers/perf/xgene_pmu.c6
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-brcmstb.c132
-rw-r--r--drivers/reset/reset-imx7.c172
-rw-r--r--drivers/reset/reset-socfpga.c2
-rw-r--r--drivers/reset/reset-sunxi.c1
-rw-r--r--drivers/reset/reset-zynqmp.c114
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/soc/amlogic/meson-canvas.c18
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c196
-rw-r--r--drivers/soc/bcm/Kconfig12
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/bcm2835-power.c661
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h5
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c54
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c5
-rw-r--r--drivers/soc/fsl/dpio/dpio.c16
-rw-r--r--drivers/soc/fsl/dpio/dpio.h5
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c5
-rw-r--r--drivers/soc/fsl/guts.c10
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/gpcv2.c76
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c6
-rw-r--r--drivers/soc/qcom/llcc-slice.c101
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c32
-rw-r--r--drivers/soc/qcom/rpmh.c37
-rw-r--r--drivers/soc/qcom/rpmhpd.c406
-rw-r--r--drivers/soc/qcom/rpmpd.c315
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c12
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c2
-rw-r--r--drivers/soc/tegra/pmc.c424
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/soc/xilinx/Kconfig20
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c321
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c178
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c4
-rw-r--r--drivers/tee/optee/device.c160
-rw-r--r--drivers/tee/optee/optee_msg.h26
-rw-r--r--drivers/tee/optee/optee_private.h3
-rw-r--r--drivers/tee/optee/optee_smc.h26
-rw-r--r--drivers/tee/optee/supp.c10
-rw-r--r--drivers/tee/tee_core.c78
-rw-r--r--drivers/usb/host/ohci-da8xx.c118
-rw-r--r--drivers/watchdog/bcm2835_wdt.c26
166 files changed, 7938 insertions, 1480 deletions
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index d5f85455fa62..19d7b6ff2f17 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -522,10 +522,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
if (!found) {
dev_warn(hostdev,
- "could not find cell for child device (%s)\n",
+ "could not find cell for child device (%s), discarding\n",
hid);
- ret = -ENODEV;
- goto fail;
+ continue;
}
pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index d84996a4528e..db74334ca5ef 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -46,6 +46,17 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
};
#define MAX_CS_REGS_COUNT 6
+#define MAX_CS_COUNT 6
+#define OF_REG_SIZE 3
+
+struct cs_timing {
+ bool is_applied;
+ u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+ struct cs_timing cs[MAX_CS_COUNT];
+};
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
@@ -111,21 +122,19 @@ err:
}
/* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype)
+static int __init weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
+ int reg_idx, num_regs;
+ struct cs_timing *cst;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
-
- /* get the CS index from this child node's "reg" property. */
- ret = of_property_read_u32(np, "reg", &cs_idx);
- if (ret)
- return ret;
-
- if (cs_idx >= devtype->cs_count)
+ if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
@@ -133,9 +142,43 @@ static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
if (ret)
return ret;
- /* set the timing for WEIM */
- for (i = 0; i < devtype->cs_regs_count; i++)
- writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+ /*
+ * the child node's "reg" property may contain multiple address ranges,
+ * extract the chip select for each.
+ */
+ num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+ if (num_regs < 0)
+ return num_regs;
+ if (!num_regs)
+ return -EINVAL;
+ for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32_index(np, "reg",
+ reg_idx * OF_REG_SIZE, &cs_idx);
+ if (ret)
+ break;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ /* prevent re-configuring a CS that's already been configured */
+ cst = &ts->cs[cs_idx];
+ if (cst->is_applied && memcmp(value, cst->regs,
+ devtype->cs_regs_count * sizeof(u32))) {
+ dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+ return -EINVAL;
+ }
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i],
+ base + cs_idx * devtype->cs_stride + i * 4);
+ if (!cst->is_applied) {
+ cst->is_applied = true;
+ memcpy(cst->regs, value,
+ devtype->cs_regs_count * sizeof(u32));
+ }
+ }
return 0;
}
@@ -148,6 +191,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
int ret, have_child = 0;
+ struct cs_timing_state ts = {};
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -156,7 +200,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
- ret = weim_timing_setup(child, base, devtype);
+ ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dac895dc01b9..25a7d8ffdb5d 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -424,6 +424,21 @@ config HW_RANDOM_EXYNOS
will be called exynos-trng.
If unsure, say Y.
+
+config HW_RANDOM_OPTEE
+ tristate "OP-TEE based Random Number Generator support"
+ depends on OPTEE
+ default HW_RANDOM
+ help
+ This driver provides support for OP-TEE based Random Number
+ Generator on ARM SoCs where hardware entropy sources are not
+ accessible to normal world (Linux).
+
+ To compile this driver as a module, choose M here: the module
+ will be called optee-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e35ec3ce3a20..7c9ef4a7667f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
+obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
new file mode 100644
index 000000000000..ddfbabaa5f8f
--- /dev/null
+++ b/drivers/char/hw_random/optee-rng.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#define DRIVER_NAME "optee-rng"
+
+#define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001
+
+/*
+ * TA_CMD_GET_ENTROPY - Get Entropy from RNG
+ *
+ * param[0] (inout memref) - Entropy buffer memory reference
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool
+ * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed
+ */
+#define TA_CMD_GET_ENTROPY 0x0
+
+/*
+ * TA_CMD_GET_RNG_INFO - Get RNG information
+ *
+ * param[0] (out value) - value.a: RNG data-rate in bytes per second
+ * value.b: Quality/Entropy per 1024 bit of data
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ */
+#define TA_CMD_GET_RNG_INFO 0x1
+
+#define MAX_ENTROPY_REQ_SZ (4 * 1024)
+
+/**
+ * struct optee_rng_private - OP-TEE Random Number Generator private data
+ * @dev: OP-TEE based RNG device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: RNG TA session identifier.
+ * @data_rate: RNG data rate.
+ * @entropy_shm_pool: Memory pool shared with RNG device.
+ * @optee_rng: OP-TEE RNG driver structure.
+ */
+struct optee_rng_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ u32 data_rate;
+ struct tee_shm *entropy_shm_pool;
+ struct hwrng optee_rng;
+};
+
+#define to_optee_rng_private(r) \
+ container_of(r, struct optee_rng_private, optee_rng)
+
+static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
+ void *buf, size_t req_size)
+{
+ int ret = 0;
+ u8 *rng_data = NULL;
+ size_t rng_size = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */
+ inv_arg.func = TA_CMD_GET_ENTROPY;
+ inv_arg.session = pvt_data->session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data->entropy_shm_pool;
+ param[0].u.memref.size = req_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n",
+ inv_arg.ret);
+ return 0;
+ }
+
+ rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0);
+ if (IS_ERR(rng_data)) {
+ dev_err(pvt_data->dev, "tee_shm_get_va failed\n");
+ return 0;
+ }
+
+ rng_size = param[0].u.memref.size;
+ memcpy(buf, rng_data, rng_size);
+
+ return rng_size;
+}
+
+static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ size_t read = 0, rng_size = 0;
+ int timeout = 1;
+ u8 *data = buf;
+
+ if (max > MAX_ENTROPY_REQ_SZ)
+ max = MAX_ENTROPY_REQ_SZ;
+
+ while (read == 0) {
+ rng_size = get_optee_rng_data(pvt_data, data, (max - read));
+
+ data += rng_size;
+ read += rng_size;
+
+ if (wait) {
+ if (timeout-- == 0)
+ return read;
+ msleep((1000 * (max - read)) / pvt_data->data_rate);
+ } else {
+ return read;
+ }
+ }
+
+ return read;
+}
+
+static int optee_rng_init(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ struct tee_shm *entropy_shm_pool = NULL;
+
+ entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(entropy_shm_pool)) {
+ dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
+ return PTR_ERR(entropy_shm_pool);
+ }
+
+ pvt_data->entropy_shm_pool = entropy_shm_pool;
+
+ return 0;
+}
+
+static void optee_rng_cleanup(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+
+ tee_shm_free(pvt_data->entropy_shm_pool);
+}
+
+static struct optee_rng_private pvt_data = {
+ .optee_rng = {
+ .name = DRIVER_NAME,
+ .init = optee_rng_init,
+ .cleanup = optee_rng_cleanup,
+ .read = optee_rng_read,
+ }
+};
+
+static int get_optee_rng_info(struct device *dev)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */
+ inv_arg.func = TA_CMD_GET_RNG_INFO;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ pvt_data.data_rate = param[0].u.value.a;
+ pvt_data.optee_rng.quality = param[0].u.value.b;
+
+ return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int optee_rng_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret = 0, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with hwrng Trusted App */
+ memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ err = get_optee_rng_info(dev);
+ if (err)
+ goto out_sess;
+
+ err = hwrng_register(&pvt_data.optee_rng);
+ if (err) {
+ dev_err(dev, "hwrng registration failed (%d)\n", err);
+ goto out_sess;
+ }
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int optee_rng_remove(struct device *dev)
+{
+ hwrng_unregister(&pvt_data.optee_rng);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id optee_rng_id_table[] = {
+ {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f,
+ 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rng_id_table);
+
+static struct tee_client_driver optee_rng_driver = {
+ .id_table = optee_rng_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = optee_rng_probe,
+ .remove = optee_rng_remove,
+ },
+};
+
+static int __init optee_rng_mod_init(void)
+{
+ return driver_register(&optee_rng_driver.driver);
+}
+
+static void __exit optee_rng_mod_exit(void)
+{
+ driver_unregister(&optee_rng_driver.driver);
+}
+
+module_init(optee_rng_mod_init);
+module_exit(optee_rng_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sumit Garg <sumit.garg@linaro.org>");
+MODULE_DESCRIPTION("OP-TEE based random number generator driver");
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 7ddacae5d0b1..1adcccfa7829 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -5,3 +5,8 @@ config TEGRA_CLK_EMC
config CLK_TEGRA_BPMP
def_bool y
depends on TEGRA_BPMP
+
+config TEGRA_CLK_DFLL
+ depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ select PM_OPP
+ def_bool y
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 6507acc843c7..4812e45c2214 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
-obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 609e363dabf8..0400e5b1d627 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1,7 +1,7 @@
/*
* clk-dfll.c - Tegra DFLL clock source common code
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -47,6 +47,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -243,6 +244,12 @@ enum dfll_tune_range {
DFLL_TUNE_LOW = 1,
};
+
+enum tegra_dfll_pmu_if {
+ TEGRA_DFLL_PMU_I2C = 0,
+ TEGRA_DFLL_PMU_PWM = 1,
+};
+
/**
* struct dfll_rate_req - target DFLL rate request data
* @rate: target frequency, after the postscaling
@@ -300,10 +307,19 @@ struct tegra_dfll {
u32 i2c_reg;
u32 i2c_slave_addr;
- /* i2c_lut array entries are regulator framework selectors */
- unsigned i2c_lut[MAX_DFLL_VOLTAGES];
- int i2c_lut_size;
- u8 lut_min, lut_max, lut_safe;
+ /* lut array entries are regulator framework selectors or PWM values*/
+ unsigned lut[MAX_DFLL_VOLTAGES];
+ unsigned long lut_uv[MAX_DFLL_VOLTAGES];
+ int lut_size;
+ u8 lut_bottom, lut_min, lut_max, lut_safe;
+
+ /* PWM interface */
+ enum tegra_dfll_pmu_if pmu_if;
+ unsigned long pwm_rate;
+ struct pinctrl *pwm_pin;
+ struct pinctrl_state *pwm_enable_state;
+ struct pinctrl_state *pwm_disable_state;
+ u32 reg_init_uV;
};
#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
@@ -490,6 +506,34 @@ static void dfll_set_mode(struct tegra_dfll *td,
}
/*
+ * DVCO rate control
+ */
+
+static unsigned long get_dvco_rate_below(struct tegra_dfll *td, u8 out_min)
+{
+ struct dev_pm_opp *opp;
+ unsigned long rate, prev_rate;
+ unsigned long uv, min_uv;
+
+ min_uv = td->lut_uv[out_min];
+ for (rate = 0, prev_rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ uv = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (uv && uv > min_uv)
+ return prev_rate;
+
+ prev_rate = rate;
+ }
+
+ return prev_rate;
+}
+
+/*
* DFLL-to-I2C controller interface
*/
@@ -518,6 +562,118 @@ static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
return 0;
}
+
+/*
+ * DFLL-to-PWM controller interface
+ */
+
+/**
+ * dfll_pwm_set_output_enabled - enable/disable PWM voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the PWM voltage requests
+ *
+ * Set the master enable control for PWM control value updates. If disabled,
+ * then the PWM signal is not driven. Also configure the PWM output pad
+ * to the appropriate state.
+ */
+static int dfll_pwm_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ int ret;
+ u32 val, div;
+
+ if (enable) {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_enable_state);
+ if (ret < 0) {
+ dev_err(td->dev, "setting enable state failed\n");
+ return -EINVAL;
+ }
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ div = DIV_ROUND_UP(td->ref_rate, td->pwm_rate);
+ val |= (div << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+ & DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+
+ val |= DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ } else {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_disable_state);
+ if (ret < 0)
+ dev_warn(td->dev, "setting disable state failed\n");
+
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ }
+
+ return 0;
+}
+
+/**
+ * dfll_set_force_output_value - set fixed value for force output
+ * @td: DFLL instance
+ * @out_val: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value when
+ * force output is enabled.
+ */
+static u32 dfll_set_force_output_value(struct tegra_dfll *td, u8 out_val)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ val = (val & DFLL_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+
+ return dfll_readl(td, DFLL_OUTPUT_FORCE);
+}
+
+/**
+ * dfll_set_force_output_enabled - enable/disable force output
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the force output
+ *
+ * Set the enable control for fouce output with fixed value.
+ */
+static void dfll_set_force_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ if (enable)
+ val |= DFLL_OUTPUT_FORCE_ENABLE;
+ else
+ val &= ~DFLL_OUTPUT_FORCE_ENABLE;
+
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+}
+
+/**
+ * dfll_force_output - force output a fixed value
+ * @td: DFLL instance
+ * @out_sel: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value.
+ */
+static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel)
+{
+ u32 val;
+
+ if (out_sel > OUT_MASK)
+ return -EINVAL;
+
+ val = dfll_set_force_output_value(td, out_sel);
+ if ((td->mode < DFLL_CLOSED_LOOP) &&
+ !(val & DFLL_OUTPUT_FORCE_ENABLE)) {
+ dfll_set_force_output_enabled(td, true);
+ }
+
+ return 0;
+}
+
/**
* dfll_load_lut - load the voltage lookup table
* @td: struct tegra_dfll *
@@ -539,7 +695,7 @@ static void dfll_load_i2c_lut(struct tegra_dfll *td)
lut_index = i;
val = regulator_list_hardware_vsel(td->vdd_reg,
- td->i2c_lut[lut_index]);
+ td->lut[lut_index]);
__raw_writel(val, td->lut_base + i * 4);
}
@@ -594,24 +750,41 @@ static void dfll_init_out_if(struct tegra_dfll *td)
{
u32 val;
- td->lut_min = 0;
- td->lut_max = td->i2c_lut_size - 1;
- td->lut_safe = td->lut_min + 1;
+ td->lut_min = td->lut_bottom;
+ td->lut_max = td->lut_size - 1;
+ td->lut_safe = td->lut_min + (td->lut_min < td->lut_max ? 1 : 0);
+
+ /* clear DFLL_OUTPUT_CFG before setting new value */
+ dfll_writel(td, 0, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
- dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
- (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
- (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
- dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
- dfll_i2c_wmb(td);
+ (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+ (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
dfll_i2c_writel(td, 0, DFLL_INTR_EN);
dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
DFLL_INTR_STS);
- dfll_load_i2c_lut(td);
- dfll_init_i2c_if(td);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM) {
+ u32 vinit = td->reg_init_uV;
+ int vstep = td->soc->alignment.step_uv;
+ unsigned long vmin = td->lut_uv[0];
+
+ /* set initial voltage */
+ if ((vinit >= vmin) && vstep) {
+ unsigned int vsel;
+
+ vsel = DIV_ROUND_UP((vinit - vmin), vstep);
+ dfll_force_output(td, vsel);
+ }
+ } else {
+ dfll_load_i2c_lut(td);
+ dfll_init_i2c_if(td);
+ }
}
/*
@@ -631,17 +804,17 @@ static void dfll_init_out_if(struct tegra_dfll *td)
static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
{
struct dev_pm_opp *opp;
- int i, uv;
+ int i, align_step;
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
return PTR_ERR(opp);
- uv = dev_pm_opp_get_voltage(opp);
+ align_step = dev_pm_opp_get_voltage(opp) / td->soc->alignment.step_uv;
dev_pm_opp_put(opp);
- for (i = 0; i < td->i2c_lut_size; i++) {
- if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+ for (i = td->lut_bottom; i < td->lut_size; i++) {
+ if ((td->lut_uv[i] / td->soc->alignment.step_uv) >= align_step)
return i;
}
@@ -863,9 +1036,14 @@ static int dfll_lock(struct tegra_dfll *td)
return -EINVAL;
}
- dfll_i2c_set_output_enabled(td, true);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, true);
+ else
+ dfll_i2c_set_output_enabled(td, true);
+
dfll_set_mode(td, DFLL_CLOSED_LOOP);
dfll_set_frequency_request(td, req);
+ dfll_set_force_output_enabled(td, false);
return 0;
default:
@@ -889,7 +1067,10 @@ static int dfll_unlock(struct tegra_dfll *td)
case DFLL_CLOSED_LOOP:
dfll_set_open_loop_config(td);
dfll_set_mode(td, DFLL_OPEN_LOOP);
- dfll_i2c_set_output_enabled(td, false);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, false);
+ else
+ dfll_i2c_set_output_enabled(td, false);
return 0;
case DFLL_OPEN_LOOP:
@@ -1171,15 +1352,17 @@ static int attr_registers_show(struct seq_file *s, void *data)
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
dfll_i2c_readl(td, offs));
- seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
- offs = DFLL_I2C_CLK_DIVISOR;
- seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->i2c_controller_base + offs));
-
- seq_puts(s, "\nLUT:\n");
- for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ if (td->pmu_if == TEGRA_DFLL_PMU_I2C) {
+ seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+ offs = DFLL_I2C_CLK_DIVISOR;
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->lut_base + offs));
+ __raw_readl(td->i2c_controller_base + offs));
+
+ seq_puts(s, "\nLUT:\n");
+ for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ __raw_readl(td->lut_base + offs));
+ }
return 0;
}
@@ -1349,15 +1532,21 @@ di_err1:
*/
static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV,reg_volt_id, align_step;
+
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV == reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step == reg_volt_id)
return i;
}
@@ -1371,15 +1560,21 @@ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
* */
static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV, reg_volt_id, align_step;
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV <= reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step <= reg_volt_id)
return i;
}
@@ -1387,9 +1582,61 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
return -EINVAL;
}
+/*
+ * dfll_build_pwm_lut - build the PWM regulator lookup table
+ * @td: DFLL instance
+ * @v_max: Vmax from OPP table
+ *
+ * Look-up table in h/w is ignored when PWM is used as DFLL interface to PMIC.
+ * In this case closed loop output is controlling duty cycle directly. The s/w
+ * look-up that maps PWM duty cycle to voltage is still built by this function.
+ */
+static int dfll_build_pwm_lut(struct tegra_dfll *td, unsigned long v_max)
+{
+ int i;
+ unsigned long rate, reg_volt;
+ u8 lut_bottom = MAX_DFLL_VOLTAGES;
+ int v_min = td->soc->cvb->min_millivolts * 1000;
+
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+ reg_volt = td->lut_uv[i];
+
+ /* since opp voltage is exact mv */
+ reg_volt = (reg_volt / 1000) * 1000;
+ if (reg_volt > v_max)
+ break;
+
+ td->lut[i] = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) && (reg_volt >= v_min))
+ lut_bottom = i;
+ }
+
+ /* determine voltage boundaries */
+ td->lut_size = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) ||
+ (lut_bottom + 1 >= td->lut_size)) {
+ dev_err(td->dev, "no voltage above DFLL minimum %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->lut_bottom = lut_bottom;
+
+ /* determine rate boundaries */
+ rate = get_dvco_rate_below(td, td->lut_bottom);
+ if (!rate) {
+ dev_err(td->dev, "no opp below DFLL minimum voltage %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->dvco_rate_min = rate;
+
+ return 0;
+}
+
/**
* dfll_build_i2c_lut - build the I2C voltage register lookup table
* @td: DFLL instance
+ * @v_max: Vmax from OPP table
*
* The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
* PMIC voltage register values that span the entire DFLL operating range.
@@ -1397,33 +1644,24 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
* the soc-specific platform driver (td->soc->opp_dev) and the PMIC
* register-to-voltage mapping queried from the regulator framework.
*
- * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ * On success, fills in td->lut and returns 0, or -err on failure.
*/
-static int dfll_build_i2c_lut(struct tegra_dfll *td)
+static int dfll_build_i2c_lut(struct tegra_dfll *td, unsigned long v_max)
{
+ unsigned long rate, v, v_opp;
int ret = -EINVAL;
- int j, v, v_max, v_opp;
- int selector;
- unsigned long rate;
- struct dev_pm_opp *opp;
- int lut;
-
- rate = ULONG_MAX;
- opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
- if (IS_ERR(opp)) {
- dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
- goto out;
- }
- v_max = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
+ int j, selector, lut;
v = td->soc->cvb->min_millivolts * 1000;
lut = find_vdd_map_entry_exact(td, v);
if (lut < 0)
goto out;
- td->i2c_lut[0] = lut;
+ td->lut[0] = lut;
+ td->lut_bottom = 0;
for (j = 1, rate = 0; ; rate++) {
+ struct dev_pm_opp *opp;
+
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
break;
@@ -1435,39 +1673,64 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
dev_pm_opp_put(opp);
for (;;) {
- v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+ v += max(1UL, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
if (v >= v_opp)
break;
selector = find_vdd_map_entry_min(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
}
v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
selector = find_vdd_map_entry_exact(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
if (v >= v_max)
break;
}
- td->i2c_lut_size = j;
+ td->lut_size = j;
if (!td->dvco_rate_min)
dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
td->soc->cvb->min_millivolts);
- else
+ else {
ret = 0;
+ for (j = 0; j < td->lut_size; j++)
+ td->lut_uv[j] =
+ regulator_list_voltage(td->vdd_reg,
+ td->lut[j]);
+ }
out:
return ret;
}
+static int dfll_build_lut(struct tegra_dfll *td)
+{
+ unsigned long rate, v_max;
+ struct dev_pm_opp *opp;
+
+ rate = ULONG_MAX;
+ opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+ return -EINVAL;
+ }
+ v_max = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ return dfll_build_pwm_lut(td, v_max);
+ else
+ return dfll_build_i2c_lut(td, v_max);
+}
+
/**
* read_dt_param - helper function for reading required parameters from the DT
* @td: DFLL instance
@@ -1526,11 +1789,56 @@ static int dfll_fetch_i2c_params(struct tegra_dfll *td)
}
td->i2c_reg = vsel_reg;
- ret = dfll_build_i2c_lut(td);
- if (ret) {
- dev_err(td->dev, "couldn't build I2C LUT\n");
+ return 0;
+}
+
+static int dfll_fetch_pwm_params(struct tegra_dfll *td)
+{
+ int ret, i;
+ u32 pwm_period;
+
+ if (!td->soc->alignment.step_uv || !td->soc->alignment.offset_uv) {
+ dev_err(td->dev,
+ "Missing step or alignment info for PWM regulator");
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++)
+ td->lut_uv[i] = td->soc->alignment.offset_uv +
+ i * td->soc->alignment.step_uv;
+
+ ret = read_dt_param(td, "nvidia,pwm-tristate-microvolts",
+ &td->reg_init_uV);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get initialized voltage\n");
+ return ret;
+ }
+
+ ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get PWM period\n");
return ret;
}
+ td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
+
+ td->pwm_pin = devm_pinctrl_get(td->dev);
+ if (IS_ERR(td->pwm_pin)) {
+ dev_err(td->dev, "DT: missing pinctrl device\n");
+ return PTR_ERR(td->pwm_pin);
+ }
+
+ td->pwm_enable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_enable");
+ if (IS_ERR(td->pwm_enable_state)) {
+ dev_err(td->dev, "DT: missing pwm enabled state\n");
+ return PTR_ERR(td->pwm_enable_state);
+ }
+
+ td->pwm_disable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_disable");
+ if (IS_ERR(td->pwm_disable_state)) {
+ dev_err(td->dev, "DT: missing pwm disabled state\n");
+ return PTR_ERR(td->pwm_disable_state);
+ }
return 0;
}
@@ -1597,16 +1905,6 @@ int tegra_dfll_register(struct platform_device *pdev,
td->soc = soc;
- td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
- if (IS_ERR(td->vdd_reg)) {
- ret = PTR_ERR(td->vdd_reg);
- if (ret != -EPROBE_DEFER)
- dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n",
- ret);
-
- return ret;
- }
-
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
if (IS_ERR(td->dvco_rst)) {
dev_err(td->dev, "couldn't get dvco reset\n");
@@ -1619,10 +1917,27 @@ int tegra_dfll_register(struct platform_device *pdev,
return ret;
}
- ret = dfll_fetch_i2c_params(td);
+ if (of_property_read_bool(td->dev->of_node, "nvidia,pwm-to-pmic")) {
+ td->pmu_if = TEGRA_DFLL_PMU_PWM;
+ ret = dfll_fetch_pwm_params(td);
+ } else {
+ td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+ if (IS_ERR(td->vdd_reg)) {
+ dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+ return PTR_ERR(td->vdd_reg);
+ }
+ td->pmu_if = TEGRA_DFLL_PMU_I2C;
+ ret = dfll_fetch_i2c_params(td);
+ }
if (ret)
return ret;
+ ret = dfll_build_lut(td);
+ if (ret) {
+ dev_err(td->dev, "couldn't build LUT\n");
+ return ret;
+ }
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(td->dev, "no control register resource\n");
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 83352c8078f2..85d0d95223f3 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -1,6 +1,6 @@
/*
* clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
- * Copyright (C) 2013 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2013-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -22,11 +22,14 @@
#include <linux/reset.h>
#include <linux/types.h>
+#include "cvb.h"
+
/**
* struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
* @dev: struct device * that holds the OPP table for the DFLL
* @max_freq: maximum frequency supported on this SoC
* @cvb: CPU frequency table for this SoC
+ * @alignment: parameters of the regulator step and offset
* @init_clock_trimmers: callback to initialize clock trimmers
* @set_clock_trimmers_high: callback to tune clock trimmers for high voltage
* @set_clock_trimmers_low: callback to tune clock trimmers for low voltage
@@ -35,6 +38,7 @@ struct tegra_dfll_soc_data {
struct device *dev;
unsigned long max_freq;
const struct cvb_table *cvb;
+ struct rail_alignment alignment;
void (*init_clock_trimmers)(void);
void (*set_clock_trimmers_high)(void);
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index edc31bb56674..e8ec42bf8638 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -1,7 +1,7 @@
/*
* Tegra124 DFLL FCPU clock source driver
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -21,15 +21,24 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <soc/tegra/fuse.h>
#include "clk.h"
#include "clk-dfll.h"
#include "cvb.h"
+struct dfll_fcpu_data {
+ const unsigned long *cpu_max_freq_table;
+ unsigned int cpu_max_freq_table_size;
+ const struct cvb_table *cpu_cvb_tables;
+ unsigned int cpu_cvb_tables_size;
+};
+
/* Maximum CPU frequency, indexed by CPU speedo id */
-static const unsigned long cpu_max_freq_table[] = {
+static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
[2] = 2116500000UL,
@@ -42,9 +51,6 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 900,
.max_millivolts = 1260,
- .alignment = {
- .step_uv = 10000, /* 10mV */
- },
.speedo_scale = 100,
.voltage_scale = 1000,
.entries = {
@@ -82,16 +88,493 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
},
};
+static const unsigned long tegra210_cpu_max_freq_table[] = {
+ [0] = 1912500000UL,
+ [1] = 1912500000UL,
+ [2] = 2218500000UL,
+ [3] = 1785000000UL,
+ [4] = 1632000000UL,
+ [5] = 1912500000UL,
+ [6] = 2014500000UL,
+ [7] = 1734000000UL,
+ [8] = 1683000000UL,
+ [9] = 1555500000UL,
+ [10] = 1504500000UL,
+};
+
+#define CPU_CVB_TABLE \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1007452, -23865, 370 } }, \
+ { 306000000UL, { 1052709, -24875, 370 } }, \
+ { 408000000UL, { 1099069, -25895, 370 } }, \
+ { 510000000UL, { 1146534, -26905, 370 } }, \
+ { 612000000UL, { 1195102, -27915, 370 } }, \
+ { 714000000UL, { 1244773, -28925, 370 } }, \
+ { 816000000UL, { 1295549, -29935, 370 } }, \
+ { 918000000UL, { 1347428, -30955, 370 } }, \
+ { 1020000000UL, { 1400411, -31965, 370 } }, \
+ { 1122000000UL, { 1454497, -32975, 370 } }, \
+ { 1224000000UL, { 1509687, -33985, 370 } }, \
+ { 1326000000UL, { 1565981, -35005, 370 } }, \
+ { 1428000000UL, { 1623379, -36015, 370 } }, \
+ { 1530000000UL, { 1681880, -37025, 370 } }, \
+ { 1632000000UL, { 1741485, -38035, 370 } }, \
+ { 1734000000UL, { 1802194, -39055, 370 } }, \
+ { 1836000000UL, { 1864006, -40065, 370 } }, \
+ { 1912500000UL, { 1910780, -40815, 370 } }, \
+ { 2014500000UL, { 1227000, 0, 0 } }, \
+ { 2218500000UL, { 1227000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_XA \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1250024, -39785, 565 } }, \
+ { 306000000UL, { 1297556, -41145, 565 } }, \
+ { 408000000UL, { 1346718, -42505, 565 } }, \
+ { 510000000UL, { 1397511, -43855, 565 } }, \
+ { 612000000UL, { 1449933, -45215, 565 } }, \
+ { 714000000UL, { 1503986, -46575, 565 } }, \
+ { 816000000UL, { 1559669, -47935, 565 } }, \
+ { 918000000UL, { 1616982, -49295, 565 } }, \
+ { 1020000000UL, { 1675926, -50645, 565 } }, \
+ { 1122000000UL, { 1736500, -52005, 565 } }, \
+ { 1224000000UL, { 1798704, -53365, 565 } }, \
+ { 1326000000UL, { 1862538, -54725, 565 } }, \
+ { 1428000000UL, { 1928003, -56085, 565 } }, \
+ { 1530000000UL, { 1995097, -57435, 565 } }, \
+ { 1606500000UL, { 2046149, -58445, 565 } }, \
+ { 1632000000UL, { 2063822, -58795, 565 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM1 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 734429, 0, 0 } }, \
+ { 306000000UL, { 768191, 0, 0 } }, \
+ { 408000000UL, { 801953, 0, 0 } }, \
+ { 510000000UL, { 835715, 0, 0 } }, \
+ { 612000000UL, { 869477, 0, 0 } }, \
+ { 714000000UL, { 903239, 0, 0 } }, \
+ { 816000000UL, { 937001, 0, 0 } }, \
+ { 918000000UL, { 970763, 0, 0 } }, \
+ { 1020000000UL, { 1004525, 0, 0 } }, \
+ { 1122000000UL, { 1038287, 0, 0 } }, \
+ { 1224000000UL, { 1072049, 0, 0 } }, \
+ { 1326000000UL, { 1105811, 0, 0 } }, \
+ { 1428000000UL, { 1130000, 0, 0 } }, \
+ { 1555500000UL, { 1130000, 0, 0 } }, \
+ { 1632000000UL, { 1170000, 0, 0 } }, \
+ { 1734000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1555500000UL, { 1162000, 0, 0 } }, \
+ { 1683000000UL, { 1195000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1504500000UL, { 1120000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_ODN \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 721094, 0, 0 } }, \
+ { 306000000UL, { 754040, 0, 0 } }, \
+ { 408000000UL, { 786986, 0, 0 } }, \
+ { 510000000UL, { 819932, 0, 0 } }, \
+ { 612000000UL, { 852878, 0, 0 } }, \
+ { 714000000UL, { 885824, 0, 0 } }, \
+ { 816000000UL, { 918770, 0, 0 } }, \
+ { 918000000UL, { 915716, 0, 0 } }, \
+ { 1020000000UL, { 984662, 0, 0 } }, \
+ { 1122000000UL, { 1017608, 0, 0 } }, \
+ { 1224000000UL, { 1050554, 0, 0 } }, \
+ { 1326000000UL, { 1083500, 0, 0 } }, \
+ { 1428000000UL, { 1116446, 0, 0 } }, \
+ { 1581000000UL, { 1130000, 0, 0 } }, \
+ { 1683000000UL, { 1168000, 0, 0 } }, \
+ { 1785000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+static struct cvb_table tegra210_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 10,
+ .process_id = 0,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 10,
+ .process_id = 1,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 0,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 1,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 0,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 1,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 4,
+ .process_id = -1,
+ .min_millivolts = 918,
+ .max_millivolts = 1113,
+ CPU_CVB_TABLE_XA,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x17711BD,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 0,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 1,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 0,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 1,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 0,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 1,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+};
+
+static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra124_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra124_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra124_cpu_cvb_tables)
+};
+
+static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra210_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra210_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra210_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra210_cpu_cvb_tables),
+};
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+ {
+ .compatible = "nvidia,tegra124-dfll",
+ .data = &tegra124_dfll_fcpu_data,
+ },
+ {
+ .compatible = "nvidia,tegra210-dfll",
+ .data = &tegra210_dfll_fcpu_data
+ },
+ { },
+};
+
+static void get_alignment_from_dt(struct device *dev,
+ struct rail_alignment *align)
+{
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-voltage-step-microvolts",
+ &align->step_uv))
+ align->step_uv = 0;
+
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-min-microvolts",
+ &align->offset_uv))
+ align->offset_uv = 0;
+}
+
+static int get_alignment_from_regulator(struct device *dev,
+ struct rail_alignment *align)
+{
+ struct regulator *reg = devm_regulator_get(dev, "vdd-cpu");
+
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ align->offset_uv = regulator_list_voltage(reg, 0);
+ align->step_uv = regulator_get_linear_step(reg);
+
+ devm_regulator_put(reg);
+
+ return 0;
+}
+
static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
{
int process_id, speedo_id, speedo_value, err;
struct tegra_dfll_soc_data *soc;
+ const struct dfll_fcpu_data *fcpu_data;
+ struct rail_alignment align;
+
+ fcpu_data = of_device_get_match_data(&pdev->dev);
+ if (!fcpu_data)
+ return -ENODEV;
process_id = tegra_sku_info.cpu_process_id;
speedo_id = tegra_sku_info.cpu_speedo_id;
speedo_value = tegra_sku_info.cpu_speedo_value;
- if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+ if (speedo_id >= fcpu_data->cpu_max_freq_table_size) {
dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
speedo_id);
return -ENODEV;
@@ -107,12 +590,22 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return -ENODEV;
}
- soc->max_freq = cpu_max_freq_table[speedo_id];
+ if (of_property_read_bool(pdev->dev.of_node, "nvidia,pwm-to-pmic")) {
+ get_alignment_from_dt(&pdev->dev, &align);
+ } else {
+ err = get_alignment_from_regulator(&pdev->dev, &align);
+ if (err)
+ return err;
+ }
+
+ soc->max_freq = fcpu_data->cpu_max_freq_table[speedo_id];
+
+ soc->cvb = tegra_cvb_add_opp_table(soc->dev, fcpu_data->cpu_cvb_tables,
+ fcpu_data->cpu_cvb_tables_size,
+ &align, process_id, speedo_id,
+ speedo_value, soc->max_freq);
+ soc->alignment = align;
- soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables,
- ARRAY_SIZE(tegra124_cpu_cvb_tables),
- process_id, speedo_id, speedo_value,
- soc->max_freq);
if (IS_ERR(soc->cvb)) {
dev_err(&pdev->dev, "couldn't add OPP table: %ld\n",
PTR_ERR(soc->cvb));
@@ -144,11 +637,6 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
- { .compatible = "nvidia,tegra124-dfll", },
- { },
-};
-
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
tegra_dfll_runtime_resume, NULL)
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index da9e8e7b5ce5..35eeb6adc68e 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -1,7 +1,7 @@
/*
* Utility functions for parsing Tegra CVB voltage tables
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -62,9 +62,9 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up)
}
static int build_opp_table(struct device *dev, const struct cvb_table *table,
+ struct rail_alignment *align,
int speedo_value, unsigned long max_freq)
{
- const struct rail_alignment *align = &table->alignment;
int i, ret, dfll_mv, min_mv, max_mv;
min_mv = round_voltage(table->min_millivolts, align, UP);
@@ -109,8 +109,9 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
*/
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq)
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq)
{
size_t i;
int ret;
@@ -124,7 +125,8 @@ tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
if (table->process_id != -1 && table->process_id != process_id)
continue;
- ret = build_opp_table(dev, table, speedo_value, max_freq);
+ ret = build_opp_table(dev, table, align, speedo_value,
+ max_freq);
return ret ? ERR_PTR(ret) : table;
}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
index c1f077993b2a..91a1941c21ef 100644
--- a/drivers/clk/tegra/cvb.h
+++ b/drivers/clk/tegra/cvb.h
@@ -41,6 +41,7 @@ struct cvb_cpu_dfll_data {
u32 tune0_low;
u32 tune0_high;
u32 tune1;
+ unsigned int tune_high_min_millivolts;
};
struct cvb_table {
@@ -49,7 +50,6 @@ struct cvb_table {
int min_millivolts;
int max_millivolts;
- struct rail_alignment alignment;
int speedo_scale;
int voltage_scale;
@@ -59,8 +59,9 @@ struct cvb_table {
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq);
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq);
void tegra_cvb_remove_opp_table(struct device *dev,
const struct cvb_table *table,
unsigned long max_freq);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index a9e26f6a81a1..171502a356aa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -131,7 +131,8 @@ config SUN5I_HSTIMER
config TEGRA_TIMER
bool "Tegra timer driver" if COMPILE_TEST
select CLKSRC_MMIO
- depends on ARM
+ select TIMER_OF
+ depends on ARM || ARM64
help
Enables support for the Tegra driver.
@@ -360,6 +361,16 @@ config ARM64_ERRATUM_858921
The workaround will be dynamically enabled when an affected
core is detected.
+config SUN50I_ERRATUM_UNKNOWN1
+ bool "Workaround for Allwinner A64 erratum UNKNOWN1"
+ default y
+ depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option enables a workaround for instability in the timer on
+ the Allwinner A64 SoC. The workaround will only be active if the
+ allwinner,erratum-unknown1 property is found in the timer node.
+
config ARM_GLOBAL_TIMER
bool "Support for the ARM global timer" if COMPILE_TEST
select TIMER_OF if OF
@@ -634,4 +645,13 @@ config GX6605S_TIMER
help
This option enables support for gx6605s SOC's timer.
+config MILBEAUT_TIMER
+ bool "Milbeaut timer driver" if COMPILE_TEST
+ depends on OF
+ depends on ARM
+ select TIMER_OF
+ select CLKSRC_MMIO
+ help
+ Enables the support for Milbeaut timer driver.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index cdd210ff89ea..be6e0fbc7489 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
-obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o
obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
@@ -29,7 +29,7 @@ obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
-obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
+obj-$(CONFIG_CLKSRC_PXA) += timer-pxa.o
obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
obj-$(CONFIG_U300_TIMER) += timer-u300.o
obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
@@ -55,6 +55,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
+obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_RDA_TIMER) += timer-rda.o
@@ -69,7 +70,7 @@ obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
-obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
+obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o
obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9a7d4dc00b6e..a8b20b65bd4b 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -326,6 +326,48 @@ static u64 notrace arm64_1188873_read_cntvct_el0(void)
}
#endif
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
+/*
+ * The low bits of the counter registers are indeterminate while bit 10 or
+ * greater is rolling over. Since the counter value can jump both backward
+ * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
+ * with all ones or all zeros in the low bits. Bound the loop by the maximum
+ * number of CPU cycles in 3 consecutive 24 MHz counter periods.
+ */
+#define __sun50i_a64_read_reg(reg) ({ \
+ u64 _val; \
+ int _retries = 150; \
+ \
+ do { \
+ _val = read_sysreg(reg); \
+ _retries--; \
+ } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
+ \
+ WARN_ON_ONCE(!_retries); \
+ _val; \
+})
+
+static u64 notrace sun50i_a64_read_cntpct_el0(void)
+{
+ return __sun50i_a64_read_reg(cntpct_el0);
+}
+
+static u64 notrace sun50i_a64_read_cntvct_el0(void)
+{
+ return __sun50i_a64_read_reg(cntvct_el0);
+}
+
+static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
+{
+ return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
+}
+
+static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
+{
+ return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
+}
+#endif
+
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
@@ -423,6 +465,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
+ {
+ .match_type = ate_match_dt,
+ .id = "allwinner,erratum-unknown1",
+ .desc = "Allwinner erratum UNKNOWN1",
+ .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
+ .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
+ .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
+ .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
+ .set_next_event_phys = erratum_set_next_event_tval_phys,
+ .set_next_event_virt = erratum_set_next_event_tval_virt,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 7a244b681876..34bd250d46c6 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -10,14 +10,12 @@
* published by the Free Software Foundation.
*/
-#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
-#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/percpu.h>
#include <linux/of.h>
@@ -388,6 +386,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+ /* Clear the MCT tick interrupt */
+ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+}
+
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@@ -404,6 +409,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
+ exynos4_mct_tick_clear(mevt);
return 0;
}
@@ -420,8 +426,11 @@ static int set_state_periodic(struct clock_event_device *evt)
return 0;
}
-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
+ struct mct_clock_event_device *mevt = dev_id;
+ struct clock_event_device *evt = &mevt->evt;
+
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
@@ -430,16 +439,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
- /* Clear the MCT tick interrupt */
- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
-}
-
-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
-{
- struct mct_clock_event_device *mevt = dev_id;
- struct clock_event_device *evt = &mevt->evt;
-
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);
@@ -507,13 +506,12 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
int err, cpu;
struct clk *mct_clk, *tick_clk;
- tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
- clk_get(NULL, "fin_pll");
+ tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
clk_rate = clk_get_rate(tick_clk);
- mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct");
+ mct_clk = of_clk_get_by_name(np, "mct");
if (IS_ERR(mct_clk))
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
@@ -562,7 +560,19 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
return 0;
out_irq:
- free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ if (mct_int_type == MCT_INT_PPI) {
+ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+ } else {
+ for_each_possible_cpu(cpu) {
+ struct mct_clock_event_device *pcpu_mevt =
+ per_cpu_ptr(&percpu_mct_tick, cpu);
+
+ if (pcpu_mevt->evt.irq != -1) {
+ free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
+ pcpu_mevt->evt.irq = -1;
+ }
+ }
+ }
return err;
}
@@ -581,11 +591,7 @@ static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
* timer irqs are specified after the four global timer
* irqs are specified.
*/
-#ifdef CONFIG_OF
nr_irqs = of_irq_count(np);
-#else
- nr_irqs = 0;
-#endif
for (i = MCT_L0_IRQ; i < nr_irqs; i++)
mct_irqs[i] = irq_of_parse_and_map(np, i);
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/timer-cs5535.c
index 1de8cac99a0e..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/timer-cs5535.c
diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c
new file mode 100644
index 000000000000..f2019a88e3ee
--- /dev/null
+++ b/drivers/clocksource/timer-milbeaut.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Socionext Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include "timer-of.h"
+
+#define MLB_TMR_TMCSR_OFS 0x0
+#define MLB_TMR_TMR_OFS 0x4
+#define MLB_TMR_TMRLR1_OFS 0x8
+#define MLB_TMR_TMRLR2_OFS 0xc
+#define MLB_TMR_REGSZPCH 0x10
+
+#define MLB_TMR_TMCSR_OUTL BIT(5)
+#define MLB_TMR_TMCSR_RELD BIT(4)
+#define MLB_TMR_TMCSR_INTE BIT(3)
+#define MLB_TMR_TMCSR_UF BIT(2)
+#define MLB_TMR_TMCSR_CNTE BIT(1)
+#define MLB_TMR_TMCSR_TRG BIT(0)
+
+#define MLB_TMR_TMCSR_CSL_DIV2 0
+#define MLB_TMR_DIV_CNT 2
+
+#define MLB_TMR_SRC_CH (1)
+#define MLB_TMR_EVT_CH (0)
+
+#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
+#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
+
+#define MLB_TMR_SRC_TMCSR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMCSR_OFS)
+#define MLB_TMR_SRC_TMR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMR_OFS)
+#define MLB_TMR_SRC_TMRLR1_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR1_OFS)
+#define MLB_TMR_SRC_TMRLR2_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR2_OFS)
+
+#define MLB_TMR_EVT_TMCSR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMCSR_OFS)
+#define MLB_TMR_EVT_TMR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMR_OFS)
+#define MLB_TMR_EVT_TMRLR1_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR1_OFS)
+#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
+
+#define MLB_TIMER_RATING 500
+
+static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = dev_id;
+ struct timer_of *to = to_timer_of(clk);
+ u32 val;
+
+ val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ val &= ~MLB_TMR_TMCSR_UF;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ clk->event_handler(clk);
+
+ return IRQ_HANDLED;
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ writel_relaxed(to->of_clk.period, timer_of_base(to) +
+ MLB_TMR_EVT_TMRLR1_OFS);
+ val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
+ MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_set_state_oneshot(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_clkevt_next_event(unsigned long event,
+ struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+ writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
+ MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
+ MLB_TMR_TMCSR_TRG, timer_of_base(to) +
+ MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_config_clock_source(struct timer_of *to)
+{
+ writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
+ writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
+ writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
+ MLB_TMR_SRC_TMCSR_OFS);
+ return 0;
+}
+
+static int mlb_config_clock_event(struct timer_of *to)
+{
+ writel_relaxed(0, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ return 0;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = "mlb-clkevt",
+ .rating = MLB_TIMER_RATING,
+ .cpumask = cpu_possible_mask,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_oneshot = mlb_set_state_oneshot,
+ .set_state_periodic = mlb_set_state_periodic,
+ .set_next_event = mlb_clkevt_next_event,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = mlb_timer_interrupt,
+ },
+};
+
+static u64 notrace mlb_timer_sched_read(void)
+{
+ return ~readl_relaxed(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS);
+}
+
+static int __init mlb_timer_init(struct device_node *node)
+{
+ int ret;
+ unsigned long rate;
+
+ ret = timer_of_init(node, &to);
+ if (ret)
+ return ret;
+
+ rate = timer_of_rate(&to) / MLB_TMR_DIV_CNT;
+ mlb_config_clock_source(&to);
+ clocksource_mmio_init(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS,
+ node->name, rate, MLB_TIMER_RATING, 32,
+ clocksource_mmio_readl_down);
+ sched_clock_register(mlb_timer_sched_read, 32, rate);
+ mlb_config_clock_event(&to);
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 15,
+ 0xffffffff);
+ return 0;
+}
+TIMER_OF_DECLARE(mlb_peritimer, "socionext,milbeaut-timer",
+ mlb_timer_init);
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/timer-pxa.c
index 395837938301..395837938301 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/timer-pxa.c
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 431892200a08..e8163693e936 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -95,13 +95,30 @@ static int __init riscv_timer_init_dt(struct device_node *n)
struct clocksource *cs;
hartid = riscv_of_processor_hartid(n);
+ if (hartid < 0) {
+ pr_warn("Not valid hartid for node [%pOF] error = [%d]\n",
+ n, hartid);
+ return hartid;
+ }
+
cpuid = riscv_hartid_to_cpuid(hartid);
+ if (cpuid < 0) {
+ pr_warn("Invalid cpuid for hartid [%d]\n", hartid);
+ return cpuid;
+ }
if (cpuid != smp_processor_id())
return 0;
+ pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
+ __func__, cpuid, hartid);
cs = per_cpu_ptr(&riscv_clocksource, cpuid);
- clocksource_register_hz(cs, riscv_timebase);
+ error = clocksource_register_hz(cs, riscv_timebase);
+ if (error) {
+ pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
+ error, cpuid);
+ return error;
+ }
sched_clock_register(riscv_sched_clock,
BITS_PER_LONG, riscv_timebase);
@@ -110,8 +127,8 @@ static int __init riscv_timer_init_dt(struct device_node *n)
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
- pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
- error, cpuid);
+ pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
+ error);
return error;
}
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 3b56ea3f52af..552c5254390c 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -202,6 +202,11 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
cs->timer.base = base;
cs->timer.clk = clk;
@@ -275,6 +280,11 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
}
rate = clk_get_rate(clk);
+ if (!rate) {
+ pr_err("Couldn't get parent clock rate\n");
+ ret = -EINVAL;
+ goto err_disable_clk;
+ }
ce->timer.base = base;
ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/timer-tango-xtal.c
index 3f94e454ef99..3f94e454ef99 100644
--- a/drivers/clocksource/tango_xtal.c
+++ b/drivers/clocksource/timer-tango-xtal.c
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index 4293943f4e2b..fdb3d795a409 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -15,21 +15,24 @@
*
*/
-#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/time.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/clockchips.h>
-#include <linux/clocksource.h>
-#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/percpu.h>
#include <linux/sched_clock.h>
-#include <linux/delay.h>
+#include <linux/time.h>
+
+#include "timer-of.h"
+#ifdef CONFIG_ARM
#include <asm/mach/time.h>
+#endif
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
@@ -39,74 +42,161 @@
#define TIMERUS_USEC_CFG 0x14
#define TIMERUS_CNTR_FREEZE 0x4c
-#define TIMER1_BASE 0x0
-#define TIMER2_BASE 0x8
-#define TIMER3_BASE 0x50
-#define TIMER4_BASE 0x58
-
-#define TIMER_PTV 0x0
-#define TIMER_PCR 0x4
-
+#define TIMER_PTV 0x0
+#define TIMER_PTV_EN BIT(31)
+#define TIMER_PTV_PER BIT(30)
+#define TIMER_PCR 0x4
+#define TIMER_PCR_INTR_CLR BIT(30)
+
+#ifdef CONFIG_ARM
+#define TIMER_CPU0 0x50 /* TIMER3 */
+#else
+#define TIMER_CPU0 0x90 /* TIMER10 */
+#define TIMER10_IRQ_IDX 10
+#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu)
+#endif
+#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8)
+
+static u32 usec_config;
static void __iomem *timer_reg_base;
+#ifdef CONFIG_ARM
static void __iomem *rtc_base;
-
static struct timespec64 persistent_ts;
static u64 persistent_ms, last_persistent_ms;
-
static struct delay_timer tegra_delay_timer;
-
-#define timer_writel(value, reg) \
- writel_relaxed(value, timer_reg_base + (reg))
-#define timer_readl(reg) \
- readl_relaxed(timer_reg_base + (reg))
+#endif
static int tegra_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- u32 reg;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
- reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+ writel(TIMER_PTV_EN |
+ ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */
+ reg_base + TIMER_PTV);
return 0;
}
-static inline void timer_shutdown(struct clock_event_device *evt)
+static int tegra_timer_shutdown(struct clock_event_device *evt)
{
- timer_writel(0, TIMER3_BASE + TIMER_PTV);
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(0, reg_base + TIMER_PTV);
+
+ return 0;
}
-static int tegra_timer_shutdown(struct clock_event_device *evt)
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
{
- timer_shutdown(evt);
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PTV_EN | TIMER_PTV_PER |
+ ((timer_of_rate(to_timer_of(evt)) / HZ) - 1),
+ reg_base + TIMER_PTV);
+
return 0;
}
-static int tegra_timer_set_periodic(struct clock_event_device *evt)
+static irqreturn_t tegra_timer_isr(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_timer_suspend(struct clock_event_device *evt)
+{
+ void __iomem *reg_base = timer_of_base(to_timer_of(evt));
+
+ writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR);
+}
+
+static void tegra_timer_resume(struct clock_event_device *evt)
+{
+ writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG);
+}
+
+#ifdef CONFIG_ARM64
+static DEFINE_PER_CPU(struct timer_of, tegra_to) = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .rating = 460,
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ },
+};
+
+static int tegra_timer_setup(unsigned int cpu)
{
- u32 reg = 0xC0000000 | ((1000000 / HZ) - 1);
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ irq_force_affinity(to->clkevt.irq, cpumask_of(cpu));
+ enable_irq(to->clkevt.irq);
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ 1, /* min */
+ 0x1fffffff); /* 29 bits */
- timer_shutdown(evt);
- timer_writel(reg, TIMER3_BASE + TIMER_PTV);
return 0;
}
-static struct clock_event_device tegra_clockevent = {
- .name = "timer0",
- .rating = 300,
- .features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DYNIRQ,
- .set_next_event = tegra_timer_set_next_event,
- .set_state_shutdown = tegra_timer_shutdown,
- .set_state_periodic = tegra_timer_set_periodic,
- .set_state_oneshot = tegra_timer_shutdown,
- .tick_resume = tegra_timer_shutdown,
+static int tegra_timer_stop(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
+
+ to->clkevt.set_state_shutdown(&to->clkevt);
+ disable_irq_nosync(to->clkevt.irq);
+
+ return 0;
+}
+#else /* CONFIG_ARM */
+static struct timer_of tegra_to = {
+ .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ,
+
+ .clkevt = {
+ .name = "tegra_timer",
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_next_event = tegra_timer_set_next_event,
+ .set_state_shutdown = tegra_timer_shutdown,
+ .set_state_periodic = tegra_timer_set_periodic,
+ .set_state_oneshot = tegra_timer_shutdown,
+ .tick_resume = tegra_timer_shutdown,
+ .suspend = tegra_timer_suspend,
+ .resume = tegra_timer_resume,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .index = 2,
+ .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
+ .handler = tegra_timer_isr,
+ },
};
static u64 notrace tegra_read_sched_clock(void)
{
- return timer_readl(TIMERUS_CNTR_1US);
+ return readl(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
+static unsigned long tegra_delay_timer_read_counter_long(void)
+{
+ return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
/*
@@ -143,100 +233,155 @@ static void tegra_read_persistent_clock64(struct timespec64 *ts)
timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
*ts = persistent_ts;
}
+#endif
-static unsigned long tegra_delay_timer_read_counter_long(void)
-{
- return readl(timer_reg_base + TIMERUS_CNTR_1US);
-}
-
-static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
- timer_writel(1<<30, TIMER3_BASE + TIMER_PCR);
- evt->event_handler(evt);
- return IRQ_HANDLED;
-}
-
-static struct irqaction tegra_timer_irq = {
- .name = "timer0",
- .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
- .handler = tegra_timer_interrupt,
- .dev_id = &tegra_clockevent,
-};
-
-static int __init tegra20_init_timer(struct device_node *np)
+static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
{
- struct clk *clk;
- unsigned long rate;
- int ret;
-
- timer_reg_base = of_iomap(np, 0);
- if (!timer_reg_base) {
- pr_err("Can't map timer registers\n");
- return -ENXIO;
- }
+ int ret = 0;
- tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
- if (tegra_timer_irq.irq <= 0) {
- pr_err("Failed to map timer IRQ\n");
- return -EINVAL;
- }
+ ret = timer_of_init(np, to);
+ if (ret < 0)
+ goto out;
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n");
- rate = 12000000;
- } else {
- clk_prepare_enable(clk);
- rate = clk_get_rate(clk);
- }
+ timer_reg_base = timer_of_base(to);
- switch (rate) {
+ /*
+ * Configure microsecond timers to have 1MHz clock
+ * Config register is 0xqqww, where qq is "dividend", ww is "divisor"
+ * Uses n+1 scheme
+ */
+ switch (timer_of_rate(to)) {
case 12000000:
- timer_writel(0x000b, TIMERUS_USEC_CFG);
+ usec_config = 0x000b; /* (11+1)/(0+1) */
+ break;
+ case 12800000:
+ usec_config = 0x043f; /* (63+1)/(4+1) */
break;
case 13000000:
- timer_writel(0x000c, TIMERUS_USEC_CFG);
+ usec_config = 0x000c; /* (12+1)/(0+1) */
+ break;
+ case 16800000:
+ usec_config = 0x0453; /* (83+1)/(4+1) */
break;
case 19200000:
- timer_writel(0x045f, TIMERUS_USEC_CFG);
+ usec_config = 0x045f; /* (95+1)/(4+1) */
break;
case 26000000:
- timer_writel(0x0019, TIMERUS_USEC_CFG);
+ usec_config = 0x0019; /* (25+1)/(0+1) */
+ break;
+ case 38400000:
+ usec_config = 0x04bf; /* (191+1)/(4+1) */
+ break;
+ case 48000000:
+ usec_config = 0x002f; /* (47+1)/(0+1) */
break;
default:
- WARN(1, "Unknown clock rate");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG);
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_ARM64
+static int __init tegra_init_timer(struct device_node *np)
+{
+ int cpu, ret = 0;
+ struct timer_of *to;
+
+ to = this_cpu_ptr(&tegra_to);
+ ret = tegra_timer_common_init(np, to);
+ if (ret < 0)
+ goto out;
+
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu);
+ cpu_to->of_clk.rate = timer_of_rate(to);
+ cpu_to->clkevt.cpumask = cpumask_of(cpu);
+ cpu_to->clkevt.irq =
+ irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu));
+ if (!cpu_to->clkevt.irq) {
+ pr_err("%s: can't map IRQ for CPU%d\n",
+ __func__, cpu);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
+ ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr,
+ IRQF_TIMER | IRQF_NOBALANCING,
+ cpu_to->clkevt.name, &cpu_to->clkevt);
+ if (ret) {
+ pr_err("%s: cannot setup irq %d for CPU%d\n",
+ __func__, cpu_to->clkevt.irq, cpu);
+ ret = -EINVAL;
+ goto out_irq;
+ }
+ }
+
+ cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING,
+ "AP_TEGRA_TIMER_STARTING", tegra_timer_setup,
+ tegra_timer_stop);
+
+ return ret;
+out_irq:
+ for_each_possible_cpu(cpu) {
+ struct timer_of *cpu_to;
+
+ cpu_to = per_cpu_ptr(&tegra_to, cpu);
+ if (cpu_to->clkevt.irq) {
+ free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt);
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ }
}
+out:
+ timer_of_cleanup(to);
+ return ret;
+}
+#else /* CONFIG_ARM */
+static int __init tegra_init_timer(struct device_node *np)
+{
+ int ret = 0;
+
+ ret = tegra_timer_common_init(np, &tegra_to);
+ if (ret < 0)
+ goto out;
- sched_clock_register(tegra_read_sched_clock, 32, 1000000);
+ tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0);
+ tegra_to.of_clk.rate = 1000000; /* microsecond timer */
+ sched_clock_register(tegra_read_sched_clock, 32,
+ timer_of_rate(&tegra_to));
ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
- "timer_us", 1000000, 300, 32,
- clocksource_mmio_readl_up);
+ "timer_us", timer_of_rate(&tegra_to),
+ 300, 32, clocksource_mmio_readl_up);
if (ret) {
pr_err("Failed to register clocksource\n");
- return ret;
+ goto out;
}
tegra_delay_timer.read_current_timer =
tegra_delay_timer_read_counter_long;
- tegra_delay_timer.freq = 1000000;
+ tegra_delay_timer.freq = timer_of_rate(&tegra_to);
register_current_timer_delay(&tegra_delay_timer);
- ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
- if (ret) {
- pr_err("Failed to register timer IRQ: %d\n", ret);
- return ret;
- }
+ clockevents_config_and_register(&tegra_to.clkevt,
+ timer_of_rate(&tegra_to),
+ 0x1,
+ 0x1fffffff);
- tegra_clockevent.cpumask = cpu_possible_mask;
- tegra_clockevent.irq = tegra_timer_irq.irq;
- clockevents_config_and_register(&tegra_clockevent, 1000000,
- 0x1, 0x1fffffff);
+ return ret;
+out:
+ timer_of_cleanup(&tegra_to);
- return 0;
+ return ret;
}
-TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
static int __init tegra20_init_rtc(struct device_node *np)
{
@@ -261,3 +406,6 @@ static int __init tegra20_init_rtc(struct device_node *np)
return register_persistent_clock(tegra_read_persistent_clock64);
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
+#endif
+TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer);
+TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 688f10227793..1a6778e81f90 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -272,8 +272,8 @@ config ARM_TEGRA20_CPUFREQ
This adds the CPUFreq driver support for Tegra20 SOCs.
config ARM_TEGRA124_CPUFREQ
- tristate "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
+ bool "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b1c5468dca16..47729a22c159 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -119,6 +119,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8176", },
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 43530254201a..ba3795e13ac6 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -22,11 +22,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
-#include <linux/regulator/consumer.h>
#include <linux/types.h>
struct tegra124_cpufreq_priv {
- struct regulator *vdd_cpu_reg;
struct clk *cpu_clk;
struct clk *pllp_clk;
struct clk *pllx_clk;
@@ -60,14 +58,6 @@ out:
return ret;
}
-static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
-{
- clk_set_parent(priv->cpu_clk, priv->pllp_clk);
- clk_disable_unprepare(priv->dfll_clk);
- regulator_sync_voltage(priv->vdd_cpu_reg);
- clk_set_parent(priv->cpu_clk, priv->pllx_clk);
-}
-
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
struct tegra124_cpufreq_priv *priv;
@@ -88,16 +78,10 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
- if (IS_ERR(priv->vdd_cpu_reg)) {
- ret = PTR_ERR(priv->vdd_cpu_reg);
- goto out_put_np;
- }
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
if (IS_ERR(priv->cpu_clk)) {
ret = PTR_ERR(priv->cpu_clk);
- goto out_put_vdd_cpu_reg;
+ goto out_put_np;
}
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
@@ -129,15 +113,13 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_device_register_full(&cpufreq_dt_devinfo);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
- goto out_switch_to_pllx;
+ goto out_put_pllp_clk;
}
platform_set_drvdata(pdev, priv);
return 0;
-out_switch_to_pllx:
- tegra124_cpu_switch_to_pllx(priv);
out_put_pllp_clk:
clk_put(priv->pllp_clk);
out_put_pllx_clk:
@@ -146,34 +128,15 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_vdd_cpu_reg:
- regulator_put(priv->vdd_cpu_reg);
out_put_np:
of_node_put(np);
return ret;
}
-static int tegra124_cpufreq_remove(struct platform_device *pdev)
-{
- struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
-
- platform_device_unregister(priv->cpufreq_dt_pdev);
- tegra124_cpu_switch_to_pllx(priv);
-
- clk_put(priv->pllp_clk);
- clk_put(priv->pllx_clk);
- clk_put(priv->dfll_clk);
- clk_put(priv->cpu_clk);
- regulator_put(priv->vdd_cpu_reg);
-
- return 0;
-}
-
static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.probe = tegra124_cpufreq_probe,
- .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
@@ -181,7 +144,8 @@ static int __init tegra_cpufreq_init(void)
int ret;
struct platform_device *pdev;
- if (!of_machine_is_compatible("nvidia,tegra124"))
+ if (!(of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 89110dfc7127..190be0b1d109 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -198,3 +198,9 @@ config EFI_DEV_PATH_PARSER
bool
depends on ACPI
default n
+
+config EFI_EARLYCON
+ def_bool y
+ depends on SERIAL_EARLYCON && !ARM && !IA64
+ select FONT_SUPPORT
+ select ARCH_USE_MEMREMAP_PROT
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 5f9f5039de50..d2d0d2030620 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -30,5 +30,6 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_EFI_EARLYCON) += earlycon.o
obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
index ac1654f74dc7..0e206c9e0d7a 100644
--- a/drivers/firmware/efi/apple-properties.c
+++ b/drivers/firmware/efi/apple-properties.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* apple-properties.c - EFI device properties on Macs
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- *
* Note, all properties are considered as u8 arrays.
* To get a value of any of them the caller must use device_property_read_u8_array().
*/
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1a6a77df8a5e..311cd349a862 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013 - 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 352bd2473162..f99995666f86 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extensible Firmware Interface
*
* Based on Extensible Firmware Interface Specification version 2.4
*
* Copyright (C) 2013, 2014 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/dmi.h>
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 96688986da56..b1395133389e 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EFI capsule loader driver.
*
* Copyright 2015 Intel Corporation
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 4938c29b7c5d..598b7800d14e 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EFI capsule support.
*
* Copyright 2013 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) "efi: " fmt
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index 502811344e81..36d3b8b9da47 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
* Copyright (C) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index a7902fccdcfa..6a966ecdd80a 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* UEFI Common Platform Error Record (CPER) support
*
@@ -9,19 +10,6 @@
*
* For more information about CPER, please refer to Appendix N of UEFI
* Specification version 2.4.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
index 85d1834ee9b7..85ec99f97841 100644
--- a/drivers/firmware/efi/dev-path-parser.c
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* dev-path-parser.c - EFI Device Path parser
* Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
@@ -5,14 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2) as
* published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
new file mode 100644
index 000000000000..c9a0efca17b0
--- /dev/null
+++ b/drivers/firmware/efi/earlycon.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/console.h>
+#include <linux/efi.h>
+#include <linux/font.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/serial_core.h>
+#include <linux/screen_info.h>
+
+#include <asm/early_ioremap.h>
+
+static const struct font_desc *font;
+static u32 efi_x, efi_y;
+static u64 fb_base;
+static pgprot_t fb_prot;
+
+static __ref void *efi_earlycon_map(unsigned long start, unsigned long len)
+{
+ return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot));
+}
+
+static __ref void efi_earlycon_unmap(void *addr, unsigned long len)
+{
+ early_memunmap(addr, len);
+}
+
+static void efi_earlycon_clear_scanline(unsigned int y)
+{
+ unsigned long *dst;
+ u16 len;
+
+ len = screen_info.lfb_linelength;
+ dst = efi_earlycon_map(y*len, len);
+ if (!dst)
+ return;
+
+ memset(dst, 0, len);
+ efi_earlycon_unmap(dst, len);
+}
+
+static void efi_earlycon_scroll_up(void)
+{
+ unsigned long *dst, *src;
+ u16 len;
+ u32 i, height;
+
+ len = screen_info.lfb_linelength;
+ height = screen_info.lfb_height;
+
+ for (i = 0; i < height - font->height; i++) {
+ dst = efi_earlycon_map(i*len, len);
+ if (!dst)
+ return;
+
+ src = efi_earlycon_map((i + font->height) * len, len);
+ if (!src) {
+ efi_earlycon_unmap(dst, len);
+ return;
+ }
+
+ memmove(dst, src, len);
+
+ efi_earlycon_unmap(src, len);
+ efi_earlycon_unmap(dst, len);
+ }
+}
+
+static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h)
+{
+ const u32 color_black = 0x00000000;
+ const u32 color_white = 0x00ffffff;
+ const u8 *src;
+ u8 s8;
+ int m;
+
+ src = font->data + c * font->height;
+ s8 = *(src + h);
+
+ for (m = 0; m < 8; m++) {
+ if ((s8 >> (7 - m)) & 1)
+ *dst = color_white;
+ else
+ *dst = color_black;
+ dst++;
+ }
+}
+
+static void
+efi_earlycon_write(struct console *con, const char *str, unsigned int num)
+{
+ struct screen_info *si;
+ unsigned int len;
+ const char *s;
+ void *dst;
+
+ si = &screen_info;
+ len = si->lfb_linelength;
+
+ while (num) {
+ unsigned int linemax;
+ unsigned int h, count = 0;
+
+ for (s = str; *s && *s != '\n'; s++) {
+ if (count == num)
+ break;
+ count++;
+ }
+
+ linemax = (si->lfb_width - efi_x) / font->width;
+ if (count > linemax)
+ count = linemax;
+
+ for (h = 0; h < font->height; h++) {
+ unsigned int n, x;
+
+ dst = efi_earlycon_map((efi_y + h) * len, len);
+ if (!dst)
+ return;
+
+ s = str;
+ n = count;
+ x = efi_x;
+
+ while (n-- > 0) {
+ efi_earlycon_write_char(dst + x*4, *s, h);
+ x += font->width;
+ s++;
+ }
+
+ efi_earlycon_unmap(dst, len);
+ }
+
+ num -= count;
+ efi_x += count * font->width;
+ str += count;
+
+ if (num > 0 && *s == '\n') {
+ efi_x = 0;
+ efi_y += font->height;
+ str++;
+ num--;
+ }
+
+ if (efi_x + font->width > si->lfb_width) {
+ efi_x = 0;
+ efi_y += font->height;
+ }
+
+ if (efi_y + font->height > si->lfb_height) {
+ u32 i;
+
+ efi_y -= font->height;
+ efi_earlycon_scroll_up();
+
+ for (i = 0; i < font->height; i++)
+ efi_earlycon_clear_scanline(efi_y + i);
+ }
+ }
+}
+
+static int __init efi_earlycon_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ struct screen_info *si;
+ u16 xres, yres;
+ u32 i;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return -ENODEV;
+
+ fb_base = screen_info.lfb_base;
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ fb_base |= (u64)screen_info.ext_lfb_base << 32;
+
+ if (opt && !strcmp(opt, "ram"))
+ fb_prot = PAGE_KERNEL;
+ else
+ fb_prot = pgprot_writecombine(PAGE_KERNEL);
+
+ si = &screen_info;
+ xres = si->lfb_width;
+ yres = si->lfb_height;
+
+ /*
+ * efi_earlycon_write_char() implicitly assumes a framebuffer with
+ * 32 bits per pixel.
+ */
+ if (si->lfb_depth != 32)
+ return -ENODEV;
+
+ font = get_default_font(xres, yres, -1, -1);
+ if (!font)
+ return -ENODEV;
+
+ efi_y = rounddown(yres, font->height) - font->height;
+ for (i = 0; i < (yres - efi_y) / font->height; i++)
+ efi_earlycon_scroll_up();
+
+ device->con->write = efi_earlycon_write;
+ return 0;
+}
+EARLYCON_DECLARE(efifb, efi_earlycon_setup);
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c
index b22ccfb0c991..a2384184a7de 100644
--- a/drivers/firmware/efi/efi-bgrt.c
+++ b/drivers/firmware/efi/efi-bgrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2012 Intel Corporation
* Author: Josh Triplett <josh@joshtriplett.org>
@@ -5,10 +6,6 @@
* Based on the bgrt driver:
* Copyright 2012 Red Hat, Inc <mjg@redhat.com>
* Author: Matthew Garrett
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 0f7d97917197..9ea13e8d12ec 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0+
+
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/pstore.h>
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 503bbe2a9d49..61e099826cbb 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* efibc: control EFI bootloaders which obey LoaderEntryOneShot var
* Copyright (c) 2013-2016, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) "efibc: " fmt
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 8061667a6765..7576450c8254 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Originally from efivars.c,
*
@@ -6,63 +7,6 @@
*
* This code takes all variables accessible from EFI runtime and
* exports them via sysfs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Changelog:
- *
- * 17 May 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * remove check for efi_enabled in exit
- * add MODULE_VERSION
- *
- * 26 Apr 2004 - Matt Domsch <Matt_Domsch@dell.com>
- * minor bug fixes
- *
- * 21 Apr 2004 - Matt Tolentino <matthew.e.tolentino@intel.com)
- * converted driver to export variable information via sysfs
- * and moved to drivers/firmware directory
- * bumped revision number to v0.07 to reflect conversion & move
- *
- * 10 Dec 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * fix locking per Peter Chubb's findings
- *
- * 25 Mar 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str()
- *
- * 12 Feb 2002 - Matt Domsch <Matt_Domsch@dell.com>
- * use list_for_each_safe when deleting vars.
- * remove ifdef CONFIG_SMP around include <linux/smp.h>
- * v0.04 release to linux-ia64@linuxia64.org
- *
- * 20 April 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Moved vars from /proc/efi to /proc/efi/vars, and made
- * efi.c own the /proc/efi directory.
- * v0.03 release to linux-ia64@linuxia64.org
- *
- * 26 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * At the request of Stephane, moved ownership of /proc/efi
- * to efi.c, and now efivars lives under /proc/efi/vars.
- *
- * 12 March 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * Feedback received from Stephane Eranian incorporated.
- * efivar_write() checks copy_from_user() return value.
- * efivar_read/write() returns proper errno.
- * v0.02 release to linux-ia64@linuxia64.org
- *
- * 26 February 2001 - Matt Domsch <Matt_Domsch@dell.com>
- * v0.01 release to linux-ia64@linuxia64.org
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 5d06bd247d07..d6dd5f503fa2 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* esrt.c
*
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 6c7d60c239b5..9501edc0fcfb 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fake_mem.c
*
@@ -8,21 +9,6 @@
* By specifying this parameter, you can add arbitrary attribute to
* specific memory range by updating original (firmware provided) EFI
* memmap.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, see <http://www.gnu.org/licenses/>.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
*/
#include <linux/kernel.h>
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d9845099635e..b0103e16fc1b 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -52,7 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o
-CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
# arm64 puts the stub in the kernel proper, which will unnecessarily retain all
@@ -89,7 +89,7 @@ quiet_cmd_stubcopy = STUBCPY $@
cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); \
+ rm -f $@; /bin/false); \
else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
else /bin/false; fi
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index c037c6c5d0b7..04e6ecd72cd9 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -367,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
paddr = in->phys_addr;
size = in->num_pages * EFI_PAGE_SIZE;
+ if (novamap()) {
+ in->virt_addr = in->phys_addr;
+ continue;
+ }
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index becbda445913..e8f7aefb6813 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/efi.h>
#include <asm/efi.h>
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 1b4d465cc5d9..1550d244e996 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org>
*
* This file implements the EFI boot stub for the arm64 kernel.
* Adapted from ARM version by Mark Salter <msalter@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
/*
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index e94975f4655b..e4610e72b78f 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Helper functions used by the EFI stub on multiple
* architectures. This should be #included by the EFI stub
* implementation files.
*
* Copyright 2011 Intel Corporation; author Matt Fleming
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
*/
#include <linux/efi.h>
@@ -34,6 +31,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
static int __section(.data) __nokaslr;
static int __section(.data) __quiet;
+static int __section(.data) __novamap;
int __pure nokaslr(void)
{
@@ -43,6 +41,10 @@ int __pure is_quiet(void)
{
return __quiet;
}
+int __pure novamap(void)
+{
+ return __novamap;
+}
#define EFI_MMAP_NR_SLACK_SLOTS 8
@@ -482,6 +484,11 @@ efi_status_t efi_parse_options(char const *cmdline)
__chunk_size = -1UL;
}
+ if (!strncmp(str, "novamap", 7)) {
+ str += strlen("novamap");
+ __novamap = 1;
+ }
+
/* Group words together, delimited by "," */
while (*str && *str != ' ' && *str != ',')
str++;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 32799cf039ef..1b1dfcaa6fb9 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -27,6 +27,7 @@
extern int __pure nokaslr(void);
extern int __pure is_quiet(void);
+extern int __pure novamap(void);
#define pr_efi(sys_table, msg) do { \
if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
@@ -64,4 +65,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+/* Helper macros for the usual case of using simple C variables: */
+#ifndef fdt_setprop_inplace_var
+#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \
+ fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
+#ifndef fdt_setprop_var
+#define fdt_setprop_var(fdt, node_offset, name, var) \
+ fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var))
+#endif
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0dc7b4987cc2..5440ba17a1c5 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* FDT related Helper functions used by the EFI stub on multiple
* architectures. This should be #included by the EFI stub
* implementation files.
*
* Copyright 2013 Linaro Limited; author Roy Franz
- *
- * This file is part of the Linux kernel, and is made available
- * under the terms of the GNU General Public License version 2.
- *
*/
#include <linux/efi.h>
@@ -26,10 +23,8 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt)
offset = fdt_path_offset(fdt, "/");
/* Set the #address-cells and #size-cells values for an empty tree */
- fdt_setprop_u32(fdt, offset, "#address-cells",
- EFI_DT_ADDR_CELLS_DEFAULT);
-
- fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT);
+ fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT);
}
static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
@@ -42,7 +37,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u32 fdt_val32;
u64 fdt_val64;
- /* Do some checks on provided FDT, if it exists*/
+ /* Do some checks on provided FDT, if it exists: */
if (orig_fdt) {
if (fdt_check_header(orig_fdt)) {
pr_efi_err(sys_table, "Device Tree header not valid!\n");
@@ -50,7 +45,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
/*
* We don't get the size of the FDT if we get if from a
- * configuration table.
+ * configuration table:
*/
if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) {
pr_efi_err(sys_table, "Truncated device tree! foo!\n");
@@ -64,8 +59,8 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
status = fdt_create_empty_tree(fdt, new_fdt_size);
if (status == 0) {
/*
- * Any failure from the following function is non
- * critical
+ * Any failure from the following function is
+ * non-critical:
*/
fdt_update_cell_size(sys_table, fdt);
}
@@ -86,12 +81,13 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (node < 0) {
node = fdt_add_subnode(fdt, 0, "chosen");
if (node < 0) {
- status = node; /* node is error code when negative */
+ /* 'node' is an error code when negative: */
+ status = node;
goto fdt_set_fail;
}
}
- if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) {
+ if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) {
status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr,
strlen(cmdline_ptr) + 1);
if (status)
@@ -103,13 +99,12 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
u64 initrd_image_end;
u64 initrd_image_start = cpu_to_fdt64(initrd_addr);
- status = fdt_setprop(fdt, node, "linux,initrd-start",
- &initrd_image_start, sizeof(u64));
+ status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start);
if (status)
goto fdt_set_fail;
+
initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size);
- status = fdt_setprop(fdt, node, "linux,initrd-end",
- &initrd_image_end, sizeof(u64));
+ status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end);
if (status)
goto fdt_set_fail;
}
@@ -117,30 +112,28 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
/* Add FDT entries for EFI runtime services in chosen node. */
node = fdt_subnode_offset(fdt, 0, "chosen");
fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table);
- status = fdt_setprop(fdt, node, "linux,uefi-system-table",
- &fdt_val64, sizeof(fdt_val64));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64);
if (status)
goto fdt_set_fail;
fdt_val64 = U64_MAX; /* placeholder */
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
- &fdt_val64, sizeof(fdt_val64));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (status)
goto fdt_set_fail;
fdt_val32 = U32_MAX; /* placeholder */
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (status)
goto fdt_set_fail;
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
- &fdt_val32, sizeof(fdt_val32));
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (status)
goto fdt_set_fail;
- status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
- &fdt_val32, sizeof(fdt_val32));
+ status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (status)
goto fdt_set_fail;
@@ -150,8 +143,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64),
(u8 *)&fdt_val64);
if (efi_status == EFI_SUCCESS) {
- status = fdt_setprop(fdt, node, "kaslr-seed",
- &fdt_val64, sizeof(fdt_val64));
+ status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64);
if (status)
goto fdt_set_fail;
} else if (efi_status != EFI_NOT_FOUND) {
@@ -159,7 +151,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
}
}
- /* shrink the FDT back to its minimum size */
+ /* Shrink the FDT back to its minimum size: */
fdt_pack(fdt);
return EFI_SUCCESS;
@@ -182,26 +174,26 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
return EFI_LOAD_ERROR;
fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
- &fdt_val64, sizeof(fdt_val64));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->map_size);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->desc_size);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
fdt_val32 = cpu_to_fdt32(*map->desc_ver);
- err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
- &fdt_val32, sizeof(fdt_val32));
+
+ err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32);
if (err)
return EFI_LOAD_ERROR;
@@ -209,13 +201,13 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
}
#ifndef EFI_FDT_ALIGN
-#define EFI_FDT_ALIGN EFI_PAGE_SIZE
+# define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
struct exit_boot_struct {
- efi_memory_desc_t *runtime_map;
- int *runtime_entry_count;
- void *new_fdt_addr;
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+ void *new_fdt_addr;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -235,7 +227,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
}
#ifndef MAX_FDT_SIZE
-#define MAX_FDT_SIZE SZ_2M
+# define MAX_FDT_SIZE SZ_2M
#endif
/*
@@ -266,16 +258,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
efi_status_t status;
- int runtime_entry_count = 0;
+ int runtime_entry_count;
struct efi_boot_memmap map;
struct exit_boot_struct priv;
- map.map = &runtime_map;
- map.map_size = &map_size;
- map.desc_size = &desc_size;
- map.desc_ver = &desc_ver;
- map.key_ptr = &mmap_key;
- map.buff_size = &buff_size;
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@@ -289,15 +281,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
return status;
}
- pr_efi(sys_table,
- "Exiting boot services and installing virtual address map...\n");
+ pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN,
new_fdt_addr, max_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table,
- "Unable to allocate memory for new device tree.\n");
+ pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n");
goto fail;
}
@@ -318,15 +308,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail_free_new_fdt;
}
- priv.runtime_map = runtime_map;
- priv.runtime_entry_count = &runtime_entry_count;
- priv.new_fdt_addr = (void *)*new_fdt_addr;
- status = efi_exit_boot_services(sys_table, handle, &map, &priv,
- exit_boot_func);
+ runtime_entry_count = 0;
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
+
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
+ if (novamap())
+ return EFI_SUCCESS;
+
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
@@ -363,6 +357,7 @@ fail_free_new_fdt:
fail:
sys_table->boottime->free_pool(runtime_map);
+
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 24c461dea7af..0101ca4c13b1 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -1,10 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/* -----------------------------------------------------------------------
*
* Copyright 2011 Intel Corporation; author Matt Fleming
*
- * This file is part of the Linux kernel, and is made available under
- * the terms of the GNU General Public License version 2.
- *
* ----------------------------------------------------------------------- */
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index e0e603a89aa9..b4b1d1dcb5fd 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -1,10 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Linaro Ltd; <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 72d9dfbebf08..edba5e7a3743 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Secure boot handling.
*
@@ -5,9 +6,6 @@
* Roy Franz <roy.franz@linaro.org
* Copyright (C) 2013 Red Hat, Inc.
* Mark Salter <msalter@redhat.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
#include <asm/efi.h>
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index a90b0b8fc69a..5bd04f75d8d6 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TPM handling.
*
@@ -5,9 +6,6 @@
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
* Thiebaud Weksteen <tweek@google.com>
- *
- * This file is part of the Linux kernel, and is made available under the
- * terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 8986757eafaf..58452fde92cc 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) "efi: memattr: " fmt
@@ -94,7 +91,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- if (md->virt_addr == 0) {
+ if (md->virt_addr == 0 && md->phys_addr != 0) {
/* no virtual mapping has been installed by the stub */
break;
}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 84a11d0a8023..ad9ddefc9dcb 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -1,8 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/efi/runtime-map.c
* Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
- *
- * This file is released under the GPLv2.
*/
#include <linux/string.h>
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index e2abfdb5cee6..698745c249e8 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -85,7 +85,7 @@ struct efi_runtime_work efi_rts_work;
pr_err("Failed to queue work to efi_rts_wq.\n"); \
\
exit: \
- efi_rts_work.efi_rts_id = NONE; \
+ efi_rts_work.efi_rts_id = EFI_NONE; \
efi_rts_work.status; \
})
@@ -175,50 +175,50 @@ static void efi_call_rts(struct work_struct *work)
arg5 = efi_rts_work.arg5;
switch (efi_rts_work.efi_rts_id) {
- case GET_TIME:
+ case EFI_GET_TIME:
status = efi_call_virt(get_time, (efi_time_t *)arg1,
(efi_time_cap_t *)arg2);
break;
- case SET_TIME:
+ case EFI_SET_TIME:
status = efi_call_virt(set_time, (efi_time_t *)arg1);
break;
- case GET_WAKEUP_TIME:
+ case EFI_GET_WAKEUP_TIME:
status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
(efi_bool_t *)arg2, (efi_time_t *)arg3);
break;
- case SET_WAKEUP_TIME:
+ case EFI_SET_WAKEUP_TIME:
status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
(efi_time_t *)arg2);
break;
- case GET_VARIABLE:
+ case EFI_GET_VARIABLE:
status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
(efi_guid_t *)arg2, (u32 *)arg3,
(unsigned long *)arg4, (void *)arg5);
break;
- case GET_NEXT_VARIABLE:
+ case EFI_GET_NEXT_VARIABLE:
status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
(efi_char16_t *)arg2,
(efi_guid_t *)arg3);
break;
- case SET_VARIABLE:
+ case EFI_SET_VARIABLE:
status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
(efi_guid_t *)arg2, *(u32 *)arg3,
*(unsigned long *)arg4, (void *)arg5);
break;
- case QUERY_VARIABLE_INFO:
+ case EFI_QUERY_VARIABLE_INFO:
status = efi_call_virt(query_variable_info, *(u32 *)arg1,
(u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
break;
- case GET_NEXT_HIGH_MONO_COUNT:
+ case EFI_GET_NEXT_HIGH_MONO_COUNT:
status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
break;
- case UPDATE_CAPSULE:
+ case EFI_UPDATE_CAPSULE:
status = efi_call_virt(update_capsule,
(efi_capsule_header_t **)arg1,
*(unsigned long *)arg2,
*(unsigned long *)arg3);
break;
- case QUERY_CAPSULE_CAPS:
+ case EFI_QUERY_CAPSULE_CAPS:
status = efi_call_virt(query_capsule_caps,
(efi_capsule_header_t **)arg1,
*(unsigned long *)arg2, (u64 *)arg3,
@@ -242,7 +242,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
+ status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -253,7 +253,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
+ status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
up(&efi_runtime_lock);
return status;
}
@@ -266,7 +266,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+ status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
NULL);
up(&efi_runtime_lock);
return status;
@@ -278,7 +278,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+ status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
NULL);
up(&efi_runtime_lock);
return status;
@@ -294,7 +294,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+ status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
data);
up(&efi_runtime_lock);
return status;
@@ -308,7 +308,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+ status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -324,7 +324,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+ status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
data);
up(&efi_runtime_lock);
return status;
@@ -359,7 +359,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+ status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
remaining_space, max_variable_size, NULL);
up(&efi_runtime_lock);
return status;
@@ -391,7 +391,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+ status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -407,7 +407,7 @@ static void virt_efi_reset_system(int reset_type,
"could not get exclusive access to the firmware\n");
return;
}
- efi_rts_work.efi_rts_id = RESET_SYSTEM;
+ efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
__efi_call_virt(reset_system, reset_type, status, data_size, data);
up(&efi_runtime_lock);
}
@@ -423,7 +423,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+ status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
NULL, NULL);
up(&efi_runtime_lock);
return status;
@@ -441,7 +441,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
if (down_interruptible(&efi_runtime_lock))
return EFI_ABORTED;
- status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+ status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
max_size, reset_type, NULL);
up(&efi_runtime_lock);
return status;
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 51ecf7d6da48..877745c3aaf2 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* EFI Test Driver for Runtime Services
*
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index 5f4818bf112f..f2446aa1c2e3 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* EFI Test driver Header
*
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
index 0cbeb3d46b18..3a689b40ccc0 100644
--- a/drivers/firmware/efi/tpm.c
+++ b/drivers/firmware/efi/tpm.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Google, Inc.
* Thiebaud Weksteen <tweek@google.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/efi.h>
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index fceaafd67ec6..436d1776bc7b 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Originally from efivars.c
*
* Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/capability.h>
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
index 97f5424dbac9..4b56a587dacd 100644
--- a/drivers/firmware/imx/misc.c
+++ b/drivers/firmware/imx/misc.c
@@ -18,6 +18,14 @@ struct imx_sc_msg_req_misc_set_ctrl {
u16 resource;
} __packed;
+struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+ u32 address_hi;
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+} __packed;
+
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
@@ -97,3 +105,33 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
return 0;
}
EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] enable true for start, false for stop
+ * @param[in] phys_addr initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ struct imx_sc_msg_req_cpu_start msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CPU_START;
+ hdr->size = 4;
+
+ msg.address_hi = phys_addr >> 32;
+ msg.address_lo = phys_addr;
+ msg.resource = resource;
+ msg.enable = enable;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 407245f2efd0..39a94c7177fc 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -322,6 +322,7 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
static const struct of_device_id imx_sc_pd_match[] = {
{ .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+ { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
{ /* sentinel */ }
};
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index a13558154ac3..61be15d9df7d 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -238,6 +238,16 @@ static int rpi_firmware_probe(struct platform_device *pdev)
return 0;
}
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ if (!fw)
+ return;
+
+ rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
static int rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
@@ -278,6 +288,7 @@ static struct platform_driver rpi_firmware_driver = {
.of_match_table = rpi_firmware_of_match,
},
.probe = rpi_firmware_probe,
+ .shutdown = rpi_firmware_shutdown,
.remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 1b826dcca719..676b01caff05 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,4 +1,7 @@
tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 000000000000..54d560c48398
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+ int (*init)(struct tegra_bpmp *bpmp);
+ void (*deinit)(struct tegra_bpmp *bpmp);
+ bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+ int (*ack_response)(struct tegra_bpmp_channel *channel);
+ int (*ack_request)(struct tegra_bpmp_channel *channel);
+ bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+ int (*post_response)(struct tegra_bpmp_channel *channel);
+ int (*post_request)(struct tegra_bpmp_channel *channel);
+ int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+ int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 000000000000..ea308751635f
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+ struct tegra_bpmp *parent;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ struct tegra186_bpmp *priv;
+
+ priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+ return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ err = mbox_send_message(priv->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(priv->mbox.channel, 0);
+
+ return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ if (WARN_ON(priv->mbox.channel == NULL))
+ return;
+
+ tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ priv->rx.virt + offset, priv->rx.phys + offset,
+ priv->tx.virt + offset, priv->tx.phys + offset,
+ 1, message_size, tegra186_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+ tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+ priv->parent = bpmp;
+
+ priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+ if (!priv->tx.pool) {
+ dev_err(bpmp->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+ if (!priv->tx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+ if (!priv->rx.pool) {
+ dev_err(bpmp->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+ if (!priv->rx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ goto free_rx;
+
+ err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ goto cleanup_tx_channel;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ priv->mbox.client.dev = bpmp->dev;
+ priv->mbox.client.rx_callback = mbox_handle_rx;
+ priv->mbox.client.tx_block = false;
+ priv->mbox.client.knows_txdone = false;
+
+ priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+ if (IS_ERR(priv->mbox.channel)) {
+ err = PTR_ERR(priv->mbox.channel);
+ dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+
+cleanup_channels:
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+ return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ unsigned int i;
+
+ mbox_free_channel(priv->mbox.channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+ .init = tegra186_bpmp_init,
+ .deinit = tegra186_bpmp_deinit,
+ .is_response_ready = tegra186_bpmp_is_message_ready,
+ .is_request_ready = tegra186_bpmp_is_message_ready,
+ .ack_response = tegra186_bpmp_ack_message,
+ .ack_request = tegra186_bpmp_ack_message,
+ .is_response_channel_free = tegra186_bpmp_is_channel_free,
+ .is_request_channel_free = tegra186_bpmp_is_channel_free,
+ .post_response = tegra186_bpmp_post_message,
+ .post_request = tegra186_bpmp_post_message,
+ .ring_doorbell = tegra186_bpmp_ring_doorbell,
+ .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 000000000000..ae15940a078e
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET 0x000
+#define RESULT_OFFSET(id) (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT 16
+#define TRIGGER_CMD_GET 4
+
+#define STA_OFFSET 0
+#define SET_OFFSET 4
+#define CLR_OFFSET 8
+
+#define CH_MASK(ch) (0x3 << ((ch) * 2))
+#define SL_SIGL(ch) (0x0 << ((ch) * 2))
+#define SL_QUED(ch) (0x1 << ((ch) * 2))
+#define MA_FREE(ch) (0x2 << ((ch) * 2))
+#define MA_ACKD(ch) (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+ void __iomem *atomics;
+ void __iomem *arb_sema;
+ struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+
+ return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+ priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ struct irq_data *irq_data = priv->tx_irq_data;
+
+ /*
+ * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+ * available messages
+ */
+ if (irq_data->chip->irq_retrigger)
+ return irq_data->chip->irq_retrigger(irq_data);
+
+ return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+
+ tegra_bpmp_handle_rx(bpmp);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ u32 address;
+ void *p;
+
+ /* Retrieve channel base address from BPMP */
+ writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+ priv->atomics + TRIGGER_OFFSET);
+ address = readl(priv->atomics + RESULT_OFFSET(index));
+
+ p = devm_ioremap(bpmp->dev, address, 0x80);
+ if (!p)
+ return -ENOMEM;
+
+ channel->ib = p;
+ channel->ob = p;
+ channel->index = index;
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct platform_device *pdev = to_platform_device(bpmp->dev);
+ struct tegra210_bpmp *priv;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->atomics))
+ return PTR_ERR(priv->atomics);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->arb_sema))
+ return PTR_ERR(priv->arb_sema);
+
+ err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "tx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+ return err;
+ }
+
+ priv->tx_irq_data = irq_get_irq_data(err);
+ if (!priv->tx_irq_data) {
+ dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "rx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, err, rx_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+ .init = tegra210_bpmp_init,
+ .is_response_ready = tegra210_bpmp_is_response_ready,
+ .is_request_ready = tegra210_bpmp_is_request_ready,
+ .ack_response = tegra210_bpmp_ack_response,
+ .ack_request = tegra210_bpmp_ack_request,
+ .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+ .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+ .post_response = tegra210_bpmp_post_response,
+ .post_request = tegra210_bpmp_post_request,
+ .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 689478b92bce..dd775e8ba5a0 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -26,6 +26,8 @@
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/ivc.h>
+#include "bpmp-private.h"
+
#define MSG_ACK BIT(0)
#define MSG_RING BIT(1)
#define TAG_SZ 32
@@ -36,6 +38,14 @@ mbox_client_to_bpmp(struct mbox_client *client)
return container_of(client, struct tegra_bpmp, mbox.client);
}
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+
+ return bpmp->soc->ops;
+}
+
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
struct platform_device *pdev;
@@ -96,22 +106,21 @@ static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
(msg->rx.size == 0 || msg->rx.data);
}
-static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_read_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ib = NULL;
- return false;
- }
+ return ops->is_response_ready(channel);
+}
- channel->ib = frame;
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->is_request_ready(channel);
}
-static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t end;
@@ -119,29 +128,45 @@ static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
end = ktime_add_us(ktime_get(), timeout);
do {
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_response_ready(channel))
return 0;
} while (ktime_before(ktime_get(), end));
return -ETIMEDOUT;
}
-static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ob = NULL;
- return false;
- }
+ return ops->ack_response(channel);
+}
- channel->ob = frame;
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->ack_request(channel);
}
-static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t start, now;
@@ -149,7 +174,7 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
start = ns_to_ktime(local_clock());
do {
- if (tegra_bpmp_master_free(channel))
+ if (tegra_bpmp_is_request_channel_free(channel))
return 0;
now = ns_to_ktime(local_clock());
@@ -158,6 +183,25 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
return -ETIMEDOUT;
}
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
void *data, size_t size, int *ret)
{
@@ -166,7 +210,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(data, channel->ib->data, size);
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_response(channel);
if (err < 0)
return err;
@@ -210,7 +254,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(channel->ob->data, data, size);
- return tegra_ivc_write_advance(channel->ivc);
+ return tegra_bpmp_post_request(channel);
}
static struct tegra_bpmp_channel *
@@ -238,7 +282,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
channel = &bpmp->threaded_channels[index];
- if (!tegra_bpmp_master_free(channel)) {
+ if (!tegra_bpmp_is_request_channel_free(channel)) {
err = -EBUSY;
goto unlock;
}
@@ -270,7 +314,7 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
{
int err;
- err = tegra_bpmp_wait_master_free(channel);
+ err = tegra_bpmp_wait_request_channel_free(channel);
if (err < 0)
return err;
@@ -302,13 +346,11 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
spin_unlock(&bpmp->atomic_tx_lock);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
- err = tegra_bpmp_wait_ack(channel);
+ err = tegra_bpmp_wait_response(channel);
if (err < 0)
return err;
@@ -335,12 +377,10 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
if (IS_ERR(channel))
return PTR_ERR(channel);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
err = wait_for_completion_timeout(&channel->completion, timeout);
@@ -369,38 +409,34 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
{
unsigned long flags = channel->ib->flags;
struct tegra_bpmp *bpmp = channel->bpmp;
- struct tegra_bpmp_mb_data *frame;
int err;
if (WARN_ON(size > MSG_DATA_MIN_SZ))
return;
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_request(channel);
if (WARN_ON(err < 0))
return;
if ((flags & MSG_ACK) == 0)
return;
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (WARN_ON(IS_ERR(frame)))
+ if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
return;
- frame->code = code;
+ channel->ob->code = code;
if (data && size > 0)
- memcpy(frame->data, data, size);
+ memcpy(channel->ob->data, data, size);
- err = tegra_ivc_write_advance(channel->ivc);
+ err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
return;
if (flags & MSG_RING) {
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (WARN_ON(err < 0))
return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
}
}
EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
@@ -627,9 +663,8 @@ static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
complete(&channel->completion);
}
-static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
{
- struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
struct tegra_bpmp_channel *channel;
unsigned int i, count;
unsigned long *busy;
@@ -638,7 +673,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
count = bpmp->soc->channels.thread.count;
busy = bpmp->threaded.busy;
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_request_ready(channel))
tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
spin_lock(&bpmp->lock);
@@ -648,7 +683,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
channel = &bpmp->threaded_channels[i];
- if (tegra_bpmp_master_acked(channel)) {
+ if (tegra_bpmp_is_response_ready(channel)) {
tegra_bpmp_channel_signal(channel);
clear_bit(i, busy);
}
@@ -657,74 +692,9 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
spin_unlock(&bpmp->lock);
}
-static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
-{
- struct tegra_bpmp *bpmp = data;
- int err;
-
- if (WARN_ON(bpmp->mbox.channel == NULL))
- return;
-
- err = mbox_send_message(bpmp->mbox.channel, NULL);
- if (err < 0)
- return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
-}
-
-static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
- struct tegra_bpmp *bpmp,
- unsigned int index)
-{
- size_t message_size, queue_size;
- unsigned int offset;
- int err;
-
- channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
- GFP_KERNEL);
- if (!channel->ivc)
- return -ENOMEM;
-
- message_size = tegra_ivc_align(MSG_MIN_SZ);
- queue_size = tegra_ivc_total_queue_size(message_size);
- offset = queue_size * index;
-
- err = tegra_ivc_init(channel->ivc, NULL,
- bpmp->rx.virt + offset, bpmp->rx.phys + offset,
- bpmp->tx.virt + offset, bpmp->tx.phys + offset,
- 1, message_size, tegra_bpmp_ivc_notify,
- bpmp);
- if (err < 0) {
- dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
- index, err);
- return err;
- }
-
- init_completion(&channel->completion);
- channel->bpmp = bpmp;
-
- return 0;
-}
-
-static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
-{
- /* reset the channel state */
- tegra_ivc_reset(channel->ivc);
-
- /* sync the channel state with BPMP */
- while (tegra_ivc_notified(channel->ivc))
- ;
-}
-
-static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
-{
- tegra_ivc_cleanup(channel->ivc);
-}
-
static int tegra_bpmp_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp;
- unsigned int i;
char tag[TAG_SZ];
size_t size;
int err;
@@ -736,32 +706,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
bpmp->soc = of_device_get_match_data(&pdev->dev);
bpmp->dev = &pdev->dev;
- bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
- if (!bpmp->tx.pool) {
- dev_err(&pdev->dev, "TX shmem pool not found\n");
- return -ENOMEM;
- }
-
- bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
- if (!bpmp->tx.virt) {
- dev_err(&pdev->dev, "failed to allocate from TX pool\n");
- return -ENOMEM;
- }
-
- bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
- if (!bpmp->rx.pool) {
- dev_err(&pdev->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
- bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
- if (!bpmp->rx.virt) {
- dev_err(&pdev->dev, "failed to allocate from RX pool\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
INIT_LIST_HEAD(&bpmp->mrqs);
spin_lock_init(&bpmp->lock);
@@ -771,81 +715,38 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.allocated) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.allocated)
+ return -ENOMEM;
bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.busy) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.busy)
+ return -ENOMEM;
spin_lock_init(&bpmp->atomic_tx_lock);
bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
GFP_KERNEL);
- if (!bpmp->tx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->tx_channel)
+ return -ENOMEM;
bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
GFP_KERNEL);
- if (!bpmp->rx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->rx_channel)
+ return -ENOMEM;
bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
sizeof(*bpmp->threaded_channels),
GFP_KERNEL);
- if (!bpmp->threaded_channels) {
- err = -ENOMEM;
- goto free_rx;
- }
-
- err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
- bpmp->soc->channels.cpu_tx.offset);
- if (err < 0)
- goto free_rx;
+ if (!bpmp->threaded_channels)
+ return -ENOMEM;
- err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
- bpmp->soc->channels.cpu_rx.offset);
+ err = bpmp->soc->ops->init(bpmp);
if (err < 0)
- goto cleanup_tx_channel;
-
- for (i = 0; i < bpmp->threaded.count; i++) {
- err = tegra_bpmp_channel_init(
- &bpmp->threaded_channels[i], bpmp,
- bpmp->soc->channels.thread.offset + i);
- if (err < 0)
- goto cleanup_threaded_channels;
- }
-
- /* mbox registration */
- bpmp->mbox.client.dev = &pdev->dev;
- bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
- bpmp->mbox.client.tx_block = false;
- bpmp->mbox.client.knows_txdone = false;
-
- bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
- if (IS_ERR(bpmp->mbox.channel)) {
- err = PTR_ERR(bpmp->mbox.channel);
- dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
- goto cleanup_threaded_channels;
- }
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+ return err;
err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
tegra_bpmp_mrq_handle_ping, bpmp);
if (err < 0)
- goto free_mbox;
+ goto deinit;
err = tegra_bpmp_ping(bpmp);
if (err < 0) {
@@ -867,17 +768,23 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
- err = tegra_bpmp_init_clocks(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_resets(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_powergates(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+ err = tegra_bpmp_init_powergates(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
err = tegra_bpmp_init_debugfs(bpmp);
if (err < 0)
@@ -887,41 +794,27 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
free_mrq:
tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
-free_mbox:
- mbox_free_channel(bpmp->mbox.channel);
-cleanup_threaded_channels:
- for (i = 0; i < bpmp->threaded.count; i++) {
- if (bpmp->threaded_channels[i].bpmp)
- tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
- }
+deinit:
+ if (bpmp->soc->ops->deinit)
+ bpmp->soc->ops->deinit(bpmp);
- tegra_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
- tegra_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
- gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
-free_tx:
- gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
return err;
}
static int __maybe_unused tegra_bpmp_resume(struct device *dev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
- unsigned int i;
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
-
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
- return 0;
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+ return 0;
}
static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
@@ -938,11 +831,42 @@ static const struct tegra_bpmp_soc tegra186_soc = {
.timeout = 0,
},
},
+ .ops = &tegra186_bpmp_ops,
.num_resets = 193,
};
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 1,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 4,
+ .count = 1,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 8,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra210_bpmp_ops,
+};
+#endif
static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
{ }
};
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 69ed1464175c..3fbbb61012c4 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -146,25 +146,8 @@ static int ti_sci_debug_show(struct seq_file *s, void *unused)
return 0;
}
-/**
- * ti_sci_debug_open() - debug file open
- * @inode: inode pointer
- * @file: file pointer
- *
- * Return: result of single_open
- */
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ti_sci_debug_show, inode->i_private);
-}
-
-/* log file operations */
-static const struct file_operations ti_sci_debug_fops = {
- .open = ti_sci_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
/**
* ti_sci_debugfs_create() - Create log debug file
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index 8f44b9cd295a..bd33bbf70daf 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -6,6 +6,7 @@ menu "Zynq MPSoC Firmware Drivers"
config ZYNQMP_FIRMWARE
bool "Enable Xilinx Zynq MPSoC firmware interface"
+ select MFD_CORE
help
Firmware interface driver is used by different
drivers to communicate with the firmware for
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 9a1c72a9280f..98f936125643 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -23,6 +24,12 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct mfd_cell firmware_devs[] = {
+ {
+ .name = "zynqmp_power_controller",
+ },
+};
+
/**
* zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
* @ret_status: PMUFW return code
@@ -187,6 +194,29 @@ static int zynqmp_pm_get_api_version(u32 *version)
}
/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode: IDCODE register
+ * @version: version register
+ *
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
+ */
+static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!idcode || !version)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ *idcode = ret_payload[1];
+ *version = ret_payload[2];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -469,8 +499,129 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
arg1, arg2, out);
}
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset: Reset to be configured
+ * @assert_flag: Flag stating should reset be asserted (1) or
+ * released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset: Reset whose status should be returned
+ * @status: Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+ 0, 0, ret_payload);
+ *status = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ * master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_init_finalize(void)
+{
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_suspend_mode() - Set system suspend mode
+ * @mode: Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+ qos, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node: Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_release_node(const u32 node)
+{
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+ qos, ack, NULL);
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
+ .get_chipid = zynqmp_pm_get_chipid,
.query_data = zynqmp_pm_query_data,
.clock_enable = zynqmp_pm_clock_enable,
.clock_disable = zynqmp_pm_clock_disable,
@@ -482,6 +633,13 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.clock_setparent = zynqmp_pm_clock_setparent,
.clock_getparent = zynqmp_pm_clock_getparent,
.ioctl = zynqmp_pm_ioctl,
+ .reset_assert = zynqmp_pm_reset_assert,
+ .reset_get_status = zynqmp_pm_reset_get_status,
+ .init_finalize = zynqmp_pm_init_finalize,
+ .set_suspend_mode = zynqmp_pm_set_suspend_mode,
+ .request_node = zynqmp_pm_request_node,
+ .release_node = zynqmp_pm_release_node,
+ .set_requirement = zynqmp_pm_set_requirement,
};
/**
@@ -538,11 +696,19 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+ ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ return ret;
+ }
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
+ mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
return 0;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index abe8249b893b..4d5a2b9f9d6a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -14,6 +14,7 @@
#include <linux/perf_event.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -30,11 +31,14 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
+/* Sink ID - same for all ETMs */
+PMU_FORMAT_ATTR(sinkid, "config2:0-31");
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
+ &format_attr_sinkid.attr,
NULL,
};
@@ -43,8 +47,18 @@ static const struct attribute_group etm_pmu_format_group = {
.attrs = etm_config_formats_attr,
};
+static struct attribute *etm_config_sinks_attr[] = {
+ NULL,
+};
+
+static const struct attribute_group etm_pmu_sinks_group = {
+ .name = "sinks",
+ .attrs = etm_config_sinks_attr,
+};
+
static const struct attribute_group *etm_pmu_attr_groups[] = {
&etm_pmu_format_group,
+ &etm_pmu_sinks_group,
NULL,
};
@@ -177,31 +191,28 @@ static void etm_free_aux(void *data)
schedule_work(&event_data->work);
}
-static void *etm_setup_aux(int event_cpu, void **pages,
+static void *etm_setup_aux(struct perf_event *event, void **pages,
int nr_pages, bool overwrite)
{
- int cpu;
+ u32 id;
+ int cpu = event->cpu;
cpumask_t *mask;
struct coresight_device *sink;
struct etm_event_data *event_data = NULL;
- event_data = alloc_event_data(event_cpu);
+ event_data = alloc_event_data(cpu);
if (!event_data)
return NULL;
INIT_WORK(&event_data->work, free_event_data);
- /*
- * In theory nothing prevent tracers in a trace session from being
- * associated with different sinks, nor having a sink per tracer. But
- * until we have HW with this kind of topology we need to assume tracers
- * in a trace session are using the same sink. Therefore go through
- * the coresight bus and pick the first enabled sink.
- *
- * When operated from sysFS users are responsible to enable the sink
- * while from perf, the perf tools will do it based on the choice made
- * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
- */
- sink = coresight_get_enabled_sink(true);
+ /* First get the selected sink from user space. */
+ if (event->attr.config2) {
+ id = (u32)event->attr.config2;
+ sink = coresight_get_sink_by_id(id);
+ } else {
+ sink = coresight_get_enabled_sink(true);
+ }
+
if (!sink || !sink_ops(sink)->alloc_buffer)
goto err;
@@ -422,15 +433,16 @@ static int etm_addr_filters_validate(struct list_head *filters)
static void etm_addr_filters_sync(struct perf_event *event)
{
struct perf_addr_filters_head *head = perf_event_addr_filters(event);
- unsigned long start, stop, *offs = event->addr_filters_offs;
+ unsigned long start, stop;
+ struct perf_addr_filter_range *fr = event->addr_filter_ranges;
struct etm_filters *filters = event->hw.addr_filters;
struct etm_filter *etm_filter;
struct perf_addr_filter *filter;
int i = 0;
list_for_each_entry(filter, &head->list, entry) {
- start = filter->offset + offs[i];
- stop = start + filter->size;
+ start = fr[i].start;
+ stop = start + fr[i].size;
etm_filter = &filters->etm_filter[i];
switch (filter->action) {
@@ -479,6 +491,77 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link)
return 0;
}
+static ssize_t etm_perf_sink_name_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(dattr, struct dev_ext_attribute, attr);
+ return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
+}
+
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{
+ int ret;
+ unsigned long hash;
+ const char *name;
+ struct device *pmu_dev = etm_pmu.dev;
+ struct device *pdev = csdev->dev.parent;
+ struct dev_ext_attribute *ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return -EINVAL;
+
+ if (csdev->ea != NULL)
+ return -EINVAL;
+
+ if (!etm_perf_up)
+ return -EPROBE_DEFER;
+
+ ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL);
+ if (!ea)
+ return -ENOMEM;
+
+ name = dev_name(pdev);
+ /* See function coresight_get_sink_by_id() to know where this is used */
+ hash = hashlen_hash(hashlen_string(NULL, name));
+
+ ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ if (!ea->attr.attr.name)
+ return -ENOMEM;
+
+ ea->attr.attr.mode = 0444;
+ ea->attr.show = etm_perf_sink_name_show;
+ ea->var = (unsigned long *)hash;
+
+ ret = sysfs_add_file_to_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+
+ if (!ret)
+ csdev->ea = ea;
+
+ return ret;
+}
+
+void etm_perf_del_symlink_sink(struct coresight_device *csdev)
+{
+ struct device *pmu_dev = etm_pmu.dev;
+ struct dev_ext_attribute *ea = csdev->ea;
+
+ if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
+ csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
+ return;
+
+ if (!ea)
+ return;
+
+ sysfs_remove_file_from_group(&pmu_dev->kobj,
+ &ea->attr.attr, "sinks");
+ csdev->ea = NULL;
+}
+
static int __init etm_perf_init(void)
{
int ret;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index da7d9336a15c..015213abe00a 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -59,6 +59,8 @@ struct etm_event_data {
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
+int etm_perf_add_symlink_sink(struct coresight_device *csdev);
+void etm_perf_del_symlink_sink(struct coresight_device *csdev);
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
struct etm_event_data *data = perf_get_aux(handle);
@@ -70,7 +72,9 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
-
+int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+{ return -EINVAL; }
+void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
return NULL;
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 579f34943bf1..b936c6d7e13f 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -147,6 +147,7 @@ void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
+struct coresight_device *coresight_get_sink_by_id(u32 id);
struct list_head *coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink);
void coresight_release_path(struct list_head *path);
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 2b0df1a0a8df..29cef898afba 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/stringhash.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/coresight.h>
@@ -18,6 +19,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
static DEFINE_MUTEX(coresight_mutex);
@@ -540,6 +542,47 @@ struct coresight_device *coresight_get_enabled_sink(bool deactivate)
return dev ? to_coresight_device(dev) : NULL;
}
+static int coresight_sink_by_id(struct device *dev, void *data)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+ unsigned long hash;
+
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+
+ if (!csdev->ea)
+ return 0;
+ /*
+ * See function etm_perf_add_symlink_sink() to know where
+ * this comes from.
+ */
+ hash = (unsigned long)csdev->ea->var;
+
+ if ((u32)hash == *(u32 *)data)
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * coresight_get_sink_by_id - returns the sink that matches the id
+ * @id: Id of the sink to match
+ *
+ * The name of a sink is unique, whether it is found on the AMBA bus or
+ * otherwise. As such the hash of that name can easily be used to identify
+ * a sink.
+ */
+struct coresight_device *coresight_get_sink_by_id(u32 id)
+{
+ struct device *dev = NULL;
+
+ dev = bus_find_device(&coresight_bustype, NULL, &id,
+ coresight_sink_by_id);
+
+ return dev ? to_coresight_device(dev) : NULL;
+}
+
/*
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
@@ -1167,6 +1210,22 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto err_out;
}
+ if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ ret = etm_perf_add_symlink_sink(csdev);
+
+ if (ret) {
+ device_unregister(&csdev->dev);
+ /*
+ * As with the above, all resources are free'd
+ * explicitly via coresight_device_release() triggered
+ * from put_device(), which is in turn called from
+ * function device_unregister().
+ */
+ goto err_out;
+ }
+ }
+
mutex_lock(&coresight_mutex);
coresight_fixup_device_conns(csdev);
@@ -1185,6 +1244,7 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
+ etm_perf_del_symlink_sink(csdev);
/* Remove references of that device in the topology */
coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index b20a5d044caa..b4db72f833ca 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -32,10 +32,6 @@
#include <linux/errno.h>
#include <linux/slab.h>
-#include <asm/irq.h>
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <linux/platform_data/keyscan-davinci.h>
/* Key scan registers */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3d1e60779078..5438abb1baba 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -129,6 +129,16 @@ config BRCMSTB_L2_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config DAVINCI_AINTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
+config DAVINCI_CP_INTC
+ bool
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+
config DW_APB_ICTL
bool
select GENERIC_IRQ_CHIP
@@ -406,6 +416,15 @@ config IMX_IRQSTEER
help
Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+config LS1X_IRQ
+ bool "Loongson-1 Interrupt Controller"
+ depends on MACH_LOONGSON32
+ default y
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
+ help
+ Support for the Loongson-1 platform Interrupt Controller.
+
endmenu
config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c93713d24b86..85972ae1bd7f 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_ATH79) += irq-ath79-cpu.o
obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
+obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
+obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
@@ -94,3 +96,4 @@ obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
+obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 0e65f609352e..83364fedbf0a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
/* Save the current mask */
b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
@@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
}
static void brcmstb_l2_intc_resume(struct irq_data *d)
@@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
+ unsigned long flags;
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
if (ct->chip.irq_ack) {
/* Clear unmasked non-wakeup interrupts */
irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
/* Restore the saved mask */
irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
new file mode 100644
index 000000000000..810ccc4fe476
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+//
+// Copyright (C) 2006, 2019 Texas Instruments.
+//
+// Interrupt handler for DaVinci boards.
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/irq-davinci-aintc.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_AINTC_FIQ_REG0 0x00
+#define DAVINCI_AINTC_FIQ_REG1 0x04
+#define DAVINCI_AINTC_IRQ_REG0 0x08
+#define DAVINCI_AINTC_IRQ_REG1 0x0c
+#define DAVINCI_AINTC_IRQ_IRQENTRY 0x14
+#define DAVINCI_AINTC_IRQ_ENT_REG0 0x18
+#define DAVINCI_AINTC_IRQ_ENT_REG1 0x1c
+#define DAVINCI_AINTC_IRQ_INCTL_REG 0x20
+#define DAVINCI_AINTC_IRQ_EABASE_REG 0x24
+#define DAVINCI_AINTC_IRQ_INTPRI0_REG 0x30
+#define DAVINCI_AINTC_IRQ_INTPRI7_REG 0x4c
+
+static void __iomem *davinci_aintc_base;
+static struct irq_domain *davinci_aintc_irq_domain;
+
+static inline void davinci_aintc_writel(unsigned long value, int offset)
+{
+ writel_relaxed(value, davinci_aintc_base + offset);
+}
+
+static inline unsigned long davinci_aintc_readl(int offset)
+{
+ return readl_relaxed(davinci_aintc_base + offset);
+}
+
+static __init void
+davinci_aintc_setup_gc(void __iomem *base,
+ unsigned int irq_start, unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_get_domain_generic_chip(davinci_aintc_irq_domain, irq_start);
+ gc->reg_base = base;
+ gc->irq_base = irq_start;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+
+ ct->regs.ack = DAVINCI_AINTC_IRQ_REG0;
+ ct->regs.mask = DAVINCI_AINTC_IRQ_ENT_REG0;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
+static asmlinkage void __exception_irq_entry
+davinci_aintc_handle_irq(struct pt_regs *regs)
+{
+ int irqnr = davinci_aintc_readl(DAVINCI_AINTC_IRQ_IRQENTRY);
+
+ /*
+ * Use the formula for entry vector index generation from section
+ * 8.3.3 of the manual.
+ */
+ irqnr >>= 2;
+ irqnr -= 1;
+
+ handle_domain_irq(davinci_aintc_irq_domain, irqnr, regs);
+}
+
+/* ARM Interrupt Controller Initialization */
+void __init davinci_aintc_init(const struct davinci_aintc_config *config)
+{
+ unsigned int irq_off, reg_off, prio, shift;
+ void __iomem *req;
+ int ret, irq_base;
+ const u8 *prios;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return;
+ }
+
+ davinci_aintc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_aintc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return;
+ }
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ /* Disable all interrupts */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG0);
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG1);
+
+ /* Interrupts disabled immediately, IRQ entry reflects all */
+ davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_INCTL_REG);
+
+ /* we don't use the hardware vector table, just its entry addresses */
+ davinci_aintc_writel(0, DAVINCI_AINTC_IRQ_EABASE_REG);
+
+ /* Clear all interrupt requests */
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
+ davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
+
+ prios = config->prios;
+ for (reg_off = DAVINCI_AINTC_IRQ_INTPRI0_REG;
+ reg_off <= DAVINCI_AINTC_IRQ_INTPRI7_REG; reg_off += 4) {
+ for (shift = 0, prio = 0; shift < 32; shift += 4, prios++)
+ prio |= (*prios & 0x07) << shift;
+ davinci_aintc_writel(prio, reg_off);
+ }
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return;
+ }
+
+ davinci_aintc_irq_domain = irq_domain_add_legacy(NULL,
+ config->num_irqs, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!davinci_aintc_irq_domain) {
+ pr_err("%s: unable to create interrupt domain\n", __func__);
+ return;
+ }
+
+ ret = irq_alloc_domain_generic_chips(davinci_aintc_irq_domain, 32, 1,
+ "AINTC", handle_edge_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0, 0);
+ if (ret) {
+ pr_err("%s: unable to allocate generic irq chips for domain\n",
+ __func__);
+ return;
+ }
+
+ for (irq_off = 0, reg_off = 0;
+ irq_off < config->num_irqs;
+ irq_off += 32, reg_off += 0x04)
+ davinci_aintc_setup_gc(davinci_aintc_base + reg_off,
+ irq_base + irq_off, 32);
+
+ set_handle_irq(davinci_aintc_handle_irq);
+}
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
new file mode 100644
index 000000000000..276da2772e7f
--- /dev/null
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Author: Steve Chen <schen@mvista.com>
+// Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+// Copyright (C) 2019, Texas Instruments
+//
+// TI Common Platform Interrupt Controller (cp_intc) driver
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-davinci-cp-intc.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/exception.h>
+
+#define DAVINCI_CP_INTC_CTRL 0x04
+#define DAVINCI_CP_INTC_HOST_CTRL 0x0c
+#define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10
+#define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28
+#define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34
+#define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38
+#define DAVINCI_CP_INTC_PRIO_IDX 0x80
+#define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2))
+#define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2))
+#define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2))
+#define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2))
+#define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0)
+#define DAVINCI_CP_INTC_GPIR_NONE BIT(31)
+
+static void __iomem *davinci_cp_intc_base;
+static struct irq_domain *davinci_cp_intc_irq_domain;
+
+static inline unsigned int davinci_cp_intc_read(unsigned int offset)
+{
+ return readl_relaxed(davinci_cp_intc_base + offset);
+}
+
+static inline void davinci_cp_intc_write(unsigned long value,
+ unsigned int offset)
+{
+ writel_relaxed(value, davinci_cp_intc_base + offset);
+}
+
+static void davinci_cp_intc_ack_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR);
+}
+
+static void davinci_cp_intc_mask_irq(struct irq_data *d)
+{
+ /* XXX don't know why we need to disable nIRQ here... */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR);
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+}
+
+static void davinci_cp_intc_unmask_irq(struct irq_data *d)
+{
+ davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET);
+}
+
+static int davinci_cp_intc_set_irq_type(struct irq_data *d,
+ unsigned int flow_type)
+{
+ unsigned int reg, mask, polarity, type;
+
+ reg = BIT_WORD(d->hwirq);
+ mask = BIT_MASK(d->hwirq);
+ polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ switch (flow_type) {
+ case IRQ_TYPE_EDGE_RISING:
+ polarity |= mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ polarity &= ~mask;
+ type |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ polarity |= mask;
+ type &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ polarity &= ~mask;
+ type &= ~mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg));
+ davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg));
+
+ return 0;
+}
+
+static struct irq_chip davinci_cp_intc_irq_chip = {
+ .name = "cp_intc",
+ .irq_ack = davinci_cp_intc_ack_irq,
+ .irq_mask = davinci_cp_intc_mask_irq,
+ .irq_unmask = davinci_cp_intc_unmask_irq,
+ .irq_set_type = davinci_cp_intc_set_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static asmlinkage void __exception_irq_entry
+davinci_cp_intc_handle_irq(struct pt_regs *regs)
+{
+ int gpir, irqnr, none;
+
+ /*
+ * The interrupt number is in first ten bits. The NONE field set to 1
+ * indicates a spurious irq.
+ */
+
+ gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX);
+ irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK;
+ none = gpir & DAVINCI_CP_INTC_GPIR_NONE;
+
+ if (unlikely(none)) {
+ pr_err_once("%s: spurious irq!\n", __func__);
+ return;
+ }
+
+ handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
+}
+
+static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
+
+ irq_set_chip(virq, &davinci_cp_intc_irq_chip);
+ irq_set_probe(virq);
+ irq_set_handler(virq, handle_edge_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
+ .map = davinci_cp_intc_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init
+davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
+ struct device_node *node)
+{
+ unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
+ int offset, irq_base;
+ void __iomem *req;
+
+ req = request_mem_region(config->reg.start,
+ resource_size(&config->reg),
+ "davinci-cp-intc");
+ if (!req) {
+ pr_err("%s: register range busy\n", __func__);
+ return -EBUSY;
+ }
+
+ davinci_cp_intc_base = ioremap(config->reg.start,
+ resource_size(&config->reg));
+ if (!davinci_cp_intc_base) {
+ pr_err("%s: unable to ioremap register range\n", __func__);
+ return -EINVAL;
+ }
+
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ /* Disable all host interrupts */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0));
+
+ /* Disable system interrupts */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
+
+ /* Set to normal mode, no nesting, no priority hold */
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
+ davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL);
+
+ /* Clear system interrupt status */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(~0,
+ DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
+
+ /* Enable nIRQ (what about nFIQ?) */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
+
+ /* Default all priorities to channel 7. */
+ num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
+ for (offset = 0; offset < num_regs; offset++)
+ davinci_cp_intc_write(0x07070707,
+ DAVINCI_CP_INTC_CHAN_MAP(offset));
+
+ irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
+ if (irq_base < 0) {
+ pr_err("%s: unable to allocate interrupt descriptors: %d\n",
+ __func__, irq_base);
+ return irq_base;
+ }
+
+ davinci_cp_intc_irq_domain = irq_domain_add_legacy(
+ node, config->num_irqs, irq_base, 0,
+ &davinci_cp_intc_irq_domain_ops, NULL);
+
+ if (!davinci_cp_intc_irq_domain) {
+ pr_err("%s: unable to create an interrupt domain\n", __func__);
+ return -EINVAL;
+ }
+
+ set_handle_irq(davinci_cp_intc_handle_irq);
+
+ /* Enable global interrupt */
+ davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE);
+
+ return 0;
+}
+
+int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
+{
+ return davinci_cp_intc_do_init(config, NULL);
+}
+
+static int __init davinci_cp_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct davinci_cp_intc_config config = { };
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &config.reg);
+ if (ret) {
+ pr_err("%s: unable to get the register range from device-tree\n",
+ __func__);
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
+ if (ret) {
+ pr_err("%s: unable to read the 'ti,intc-size' property\n",
+ __func__);
+ return ret;
+ }
+
+ return davinci_cp_intc_do_init(&config, node);
+}
+IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c3aba3fc818d..2dd1ff0cf558 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1746,6 +1746,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 type = GITS_BASER_TYPE(val);
u64 baser_phys, tmp;
u32 alloc_pages;
+ struct page *page;
void *base;
retry_alloc_baser:
@@ -1758,10 +1759,11 @@ retry_alloc_baser:
order = get_order(GITS_BASER_PAGES_MAX * psz);
}
- base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!base)
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
return -ENOMEM;
+ base = (void *)page_address(page);
baser_phys = virt_to_phys(base);
/* Check if the physical address of the memory is above 48bits */
@@ -1955,6 +1957,8 @@ static int its_alloc_tables(struct its_node *its)
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
its->device_ids);
+ break;
+
case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
psz, &order,
@@ -2292,7 +2296,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
return NULL;
}
-static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
+static bool its_alloc_table_entry(struct its_node *its,
+ struct its_baser *baser, u32 id)
{
struct page *page;
u32 esz, idx;
@@ -2312,7 +2317,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
/* Allocate memory for 2nd level table */
if (!table[idx]) {
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(baser->psz));
if (!page)
return false;
@@ -2343,7 +2349,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
if (!baser)
return (ilog2(dev_id) < its->device_ids);
- return its_alloc_table_entry(baser, dev_id);
+ return its_alloc_table_entry(its, baser, dev_id);
}
static bool its_alloc_vpe_table(u32 vpe_id)
@@ -2367,7 +2373,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
if (!baser)
return false;
- if (!its_alloc_table_entry(baser, vpe_id))
+ if (!its_alloc_table_entry(its, baser, vpe_id))
return false;
}
@@ -2401,7 +2407,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc(sz, GFP_KERNEL);
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -3543,6 +3549,7 @@ static int __init its_probe_one(struct resource *res,
void __iomem *its_base;
u32 val, ctlr;
u64 baser, tmp, typer;
+ struct page *page;
int err;
its_base = ioremap(res->start, resource_size(res));
@@ -3599,12 +3606,13 @@ static int __init its_probe_one(struct resource *res,
its->numa_node = numa_node;
- its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- get_order(ITS_CMD_QUEUE_SZ));
- if (!its->cmd_base) {
+ page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ if (!page) {
err = -ENOMEM;
goto out_free_its;
}
+ its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
its->fwnode_handle = handle;
its->get_msi_base = its_irq_get_msi_base;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index b0d4aab1a58c..d000870d9b6b 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -225,14 +225,6 @@ static struct syscore_ops i8259_syscore_ops = {
.shutdown = i8259A_shutdown,
};
-static int __init i8259A_init_sysfs(void)
-{
- register_syscore_ops(&i8259_syscore_ops);
- return 0;
-}
-
-device_initcall(i8259A_init_sysfs);
-
static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -332,6 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ register_syscore_ops(&i8259_syscore_ops);
return domain;
}
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 5b3f1d735685..d1098f4da6a4 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -10,10 +10,11 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
-#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r)
+#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
#define CHANCTRL 0x0
#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
@@ -21,12 +22,15 @@
#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
+#define CHAN_MAX_OUTPUT_INT 0x8
+
struct irqsteer_data {
void __iomem *regs;
struct clk *ipg_clk;
- int irq;
+ int irq[CHAN_MAX_OUTPUT_INT];
+ int irq_count;
raw_spinlock_t lock;
- int irq_groups;
+ int reg_num;
int channel;
struct irq_domain *domain;
u32 *saved_reg;
@@ -35,7 +39,7 @@ struct irqsteer_data {
static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
unsigned long irqnum)
{
- return (data->irq_groups * 2 - irqnum / 32 - 1);
+ return (data->reg_num - irqnum / 32 - 1);
}
static void imx_irqsteer_irq_unmask(struct irq_data *d)
@@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d)
u32 val;
raw_spin_lock_irqsave(&data->lock, flags);
- val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
val |= BIT(d->hwirq % 32);
- writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
raw_spin_unlock_irqrestore(&data->lock, flags);
}
@@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
u32 val;
raw_spin_lock_irqsave(&data->lock, flags);
- val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
val &= ~BIT(d->hwirq % 32);
- writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
raw_spin_unlock_irqrestore(&data->lock, flags);
}
@@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
+static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
+{
+ int i;
+
+ for (i = 0; i < data->irq_count; i++) {
+ if (data->irq[i] == irq)
+ return i * 64;
+ }
+
+ return -EINVAL;
+}
+
static void imx_irqsteer_irq_handler(struct irq_desc *desc)
{
struct irqsteer_data *data = irq_desc_get_handler_data(desc);
- int i;
+ int hwirq;
+ int irq, i;
chained_irq_enter(irq_desc_get_chip(desc), desc);
- for (i = 0; i < data->irq_groups * 64; i += 32) {
- int idx = imx_irqsteer_get_reg_index(data, i);
+ irq = irq_desc_get_irq(desc);
+ hwirq = imx_irqsteer_get_hwirq_base(data, irq);
+ if (hwirq < 0) {
+ pr_warn("%s: unable to get hwirq base for irq %d\n",
+ __func__, irq);
+ return;
+ }
+
+ for (i = 0; i < 2; i++, hwirq += 32) {
+ int idx = imx_irqsteer_get_reg_index(data, hwirq);
unsigned long irqmap;
int pos, virq;
+ if (hwirq >= data->reg_num * 32)
+ break;
+
irqmap = readl_relaxed(data->regs +
- CHANSTATUS(idx, data->irq_groups));
+ CHANSTATUS(idx, data->reg_num));
for_each_set_bit(pos, &irqmap, 32) {
- virq = irq_find_mapping(data->domain, pos + i);
+ virq = irq_find_mapping(data->domain, pos + hwirq);
if (virq)
generic_handle_irq(virq);
}
@@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct irqsteer_data *data;
struct resource *res;
- int ret;
+ u32 irqs_num;
+ int i, ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
return PTR_ERR(data->regs);
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq <= 0) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
- }
-
data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(data->ipg_clk)) {
ret = PTR_ERR(data->ipg_clk);
@@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
raw_spin_lock_init(&data->lock);
- of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups);
+ of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
of_property_read_u32(np, "fsl,channel", &data->channel);
+ /*
+ * There is one output irq for each group of 64 inputs.
+ * One register bit map can represent 32 input interrupts.
+ */
+ data->irq_count = DIV_ROUND_UP(irqs_num, 64);
+ data->reg_num = irqs_num / 32;
+
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
data->saved_reg = devm_kzalloc(&pdev->dev,
- sizeof(u32) * data->irq_groups * 2,
+ sizeof(u32) * data->reg_num,
GFP_KERNEL);
if (!data->saved_reg)
return -ENOMEM;
@@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
/* steer all IRQs into configured channel */
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- data->domain = irq_domain_add_linear(np, data->irq_groups * 64,
+ data->domain = irq_domain_add_linear(np, data->reg_num * 32,
&imx_irqsteer_domain_ops, data);
if (!data->domain) {
dev_err(&pdev->dev, "failed to create IRQ domain\n");
- clk_disable_unprepare(data->ipg_clk);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
+ ret = -EINVAL;
+ goto out;
}
- irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler,
- data);
+ for (i = 0; i < data->irq_count; i++) {
+ data->irq[i] = irq_of_parse_and_map(np, i);
+ if (!data->irq[i]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ imx_irqsteer_irq_handler,
+ data);
+ }
platform_set_drvdata(pdev, data);
return 0;
+out:
+ clk_disable_unprepare(data->ipg_clk);
+ return ret;
}
static int imx_irqsteer_remove(struct platform_device *pdev)
{
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < irqsteer_data->irq_count; i++)
+ irq_set_chained_handler_and_data(irqsteer_data->irq[i],
+ NULL, NULL);
- irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL);
irq_domain_remove(irqsteer_data->domain);
clk_disable_unprepare(irqsteer_data->ipg_clk);
@@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data)
{
int i;
- for (i = 0; i < data->irq_groups * 2; i++)
+ for (i = 0; i < data->reg_num; i++)
data->saved_reg[i] = readl_relaxed(data->regs +
- CHANMASK(i, data->irq_groups));
+ CHANMASK(i, data->reg_num));
}
static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
@@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
int i;
writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
- for (i = 0; i < data->irq_groups * 2; i++)
+ for (i = 0; i < data->reg_num; i++)
writel_relaxed(data->saved_reg[i],
- data->regs + CHANMASK(i, data->irq_groups));
+ data->regs + CHANMASK(i, data->reg_num));
}
static int imx_irqsteer_suspend(struct device *dev)
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
new file mode 100644
index 000000000000..86b72fbd3b45
--- /dev/null
+++ b/drivers/irqchip/irq-ls1x.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com>
+ * Loongson-1 platform IRQ support
+ */
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define LS_REG_INTC_STATUS 0x00
+#define LS_REG_INTC_EN 0x04
+#define LS_REG_INTC_SET 0x08
+#define LS_REG_INTC_CLR 0x0c
+#define LS_REG_INTC_POL 0x10
+#define LS_REG_INTC_EDGE 0x14
+
+/**
+ * struct ls1x_intc_priv - private ls1x-intc data.
+ * @domain: IRQ domain.
+ * @intc_base: IO Base of intc registers.
+ */
+
+struct ls1x_intc_priv {
+ struct irq_domain *domain;
+ void __iomem *intc_base;
+};
+
+
+static void ls1x_chained_handle_irq(struct irq_desc *desc)
+{
+ struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ pending = readl(priv->intc_base + LS_REG_INTC_STATUS) &
+ readl(priv->intc_base + LS_REG_INTC_EN);
+
+ if (!pending)
+ spurious_interrupt();
+
+ while (pending) {
+ int bit = __ffs(pending);
+
+ generic_handle_irq(irq_find_mapping(priv->domain, bit));
+ pending &= ~BIT(bit);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void ls_intc_set_bit(struct irq_chip_generic *gc,
+ unsigned int offset,
+ u32 mask, bool set)
+{
+ if (set)
+ writel(readl(gc->reg_base + offset) | mask,
+ gc->reg_base + offset);
+ else
+ writel(readl(gc->reg_base + offset) & ~mask,
+ gc->reg_base + offset);
+}
+
+static int ls_intc_set_type(struct irq_data *data, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ u32 mask = data->mask;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
+ ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(data, type);
+ return irq_setup_alt_chip(data, type);
+}
+
+
+static int __init ls1x_intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ struct ls1x_intc_priv *priv;
+ int parent_irq, err = 0;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->intc_base = of_iomap(node, 0);
+ if (!priv->intc_base) {
+ err = -ENODEV;
+ goto out_free_priv;
+ }
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("ls1x-irq: unable to get parent irq\n");
+ err = -ENODEV;
+ goto out_iounmap;
+ }
+
+ /* Set up an IRQ domain */
+ priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
+ NULL);
+ if (!priv->domain) {
+ pr_err("ls1x-irq: cannot add IRQ domain\n");
+ goto out_iounmap;
+ }
+
+ err = irq_alloc_domain_generic_chips(priv->domain, 32, 2,
+ node->full_name, handle_level_irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0,
+ IRQ_GC_INIT_MASK_CACHE);
+ if (err) {
+ pr_err("ls1x-irq: unable to register IRQ domain\n");
+ goto out_free_domain;
+ }
+
+ /* Mask all irqs */
+ writel(0x0, priv->intc_base + LS_REG_INTC_EN);
+
+ /* Ack all irqs */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR);
+
+ /* Set all irqs to high level triggered */
+ writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL);
+
+ gc = irq_get_domain_generic_chip(priv->domain, 0);
+
+ gc->reg_base = priv->intc_base;
+
+ ct = gc->chip_types;
+ ct[0].type = IRQ_TYPE_LEVEL_MASK;
+ ct[0].regs.mask = LS_REG_INTC_EN;
+ ct[0].regs.ack = LS_REG_INTC_CLR;
+ ct[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[0].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[0].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[0].chip.irq_set_type = ls_intc_set_type;
+ ct[0].handler = handle_level_irq;
+
+ ct[1].type = IRQ_TYPE_EDGE_BOTH;
+ ct[1].regs.mask = LS_REG_INTC_EN;
+ ct[1].regs.ack = LS_REG_INTC_CLR;
+ ct[1].chip.irq_unmask = irq_gc_mask_set_bit;
+ ct[1].chip.irq_mask = irq_gc_mask_clr_bit;
+ ct[1].chip.irq_ack = irq_gc_ack_set_bit;
+ ct[1].chip.irq_set_type = ls_intc_set_type;
+ ct[1].handler = handle_edge_irq;
+
+ irq_set_chained_handler_and_data(parent_irq,
+ ls1x_chained_handle_irq, priv);
+
+ return 0;
+
+out_free_domain:
+ irq_domain_remove(priv->domain);
+out_iounmap:
+ iounmap(priv->intc_base);
+out_free_priv:
+ kfree(priv);
+
+ return err;
+}
+
+IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..cf755964f2f8 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,62 +59,83 @@ static void __iomem *plic_regs;
struct plic_handler {
bool present;
- int ctxid;
+ void __iomem *hart_base;
+ /*
+ * Protect mask operations on the registers given that we can't
+ * assume atomic memory operations work on them.
+ */
+ raw_spinlock_t enable_lock;
+ void __iomem *enable_base;
};
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
-static inline void __iomem *plic_hart_offset(int ctxid)
-{
- return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
-}
-
-static inline u32 __iomem *plic_enable_base(int ctxid)
-{
- return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
-}
-
-/*
- * Protect mask operations on the registers given that we can't assume that
- * atomic memory operations work on them.
- */
-static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
-
-static inline void plic_toggle(int ctxid, int hwirq, int enable)
+static inline void plic_toggle(struct plic_handler *handler,
+ int hwirq, int enable)
{
- u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32);
+ u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
u32 hwirq_mask = 1 << (hwirq % 32);
- raw_spin_lock(&plic_toggle_lock);
+ raw_spin_lock(&handler->enable_lock);
if (enable)
writel(readl(reg) | hwirq_mask, reg);
else
writel(readl(reg) & ~hwirq_mask, reg);
- raw_spin_unlock(&plic_toggle_lock);
+ raw_spin_unlock(&handler->enable_lock);
}
-static inline void plic_irq_toggle(struct irq_data *d, int enable)
+static inline void plic_irq_toggle(const struct cpumask *mask,
+ int hwirq, int enable)
{
int cpu;
- writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
- for_each_cpu(cpu, irq_data_get_affinity_mask(d)) {
+ writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
+ for_each_cpu(cpu, mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present)
- plic_toggle(handler->ctxid, d->hwirq, enable);
+ plic_toggle(handler, hwirq, enable);
}
}
static void plic_irq_enable(struct irq_data *d)
{
- plic_irq_toggle(d, 1);
+ unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
+ cpu_online_mask);
+ if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
+ return;
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
}
static void plic_irq_disable(struct irq_data *d)
{
- plic_irq_toggle(d, 0);
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
}
+#ifdef CONFIG_SMP
+static int plic_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ unsigned int cpu;
+
+ if (force)
+ cpu = cpumask_first(mask_val);
+ else
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ if (!irqd_irq_disabled(d)) {
+ plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+ plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+#endif
+
static struct irq_chip plic_chip = {
.name = "SiFive PLIC",
/*
@@ -123,6 +144,9 @@ static struct irq_chip plic_chip = {
*/
.irq_enable = plic_irq_enable,
.irq_disable = plic_irq_disable,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = plic_set_affinity,
+#endif
};
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
@@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain;
static void plic_handle_irq(struct pt_regs *regs)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM;
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present);
@@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node)
static int __init plic_init(struct device_node *node,
struct device_node *parent)
{
- int error = 0, nr_handlers, nr_mapped = 0, i;
+ int error = 0, nr_contexts, nr_handlers = 0, i;
u32 nr_irqs;
if (plic_regs) {
@@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!nr_irqs))
goto out_iounmap;
- nr_handlers = of_irq_count(node);
- if (WARN_ON(!nr_handlers))
+ nr_contexts = of_irq_count(node);
+ if (WARN_ON(!nr_contexts))
goto out_iounmap;
- if (WARN_ON(nr_handlers < num_possible_cpus()))
+ if (WARN_ON(nr_contexts < num_possible_cpus()))
goto out_iounmap;
error = -ENOMEM;
@@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!plic_irqdomain))
goto out_iounmap;
- for (i = 0; i < nr_handlers; i++) {
+ for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
@@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node,
}
cpu = riscv_hartid_to_cpuid(hartid);
+ if (cpu < 0) {
+ pr_warn("Invalid cpuid for context %d\n", i);
+ continue;
+ }
+
handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler already present for context %d.\n", i);
+ continue;
+ }
+
handler->present = true;
- handler->ctxid = i;
+ handler->hart_base =
+ plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
+ raw_spin_lock_init(&handler->enable_lock);
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
/* priority must be > threshold to trigger an interrupt */
- writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD);
+ writel(0, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- plic_toggle(i, hwirq, 0);
- nr_mapped++;
+ plic_toggle(handler, hwirq, 0);
+ nr_handlers++;
}
- pr_info("mapped %d interrupts to %d (out of %d) handlers.\n",
- nr_irqs, nr_mapped, nr_handlers);
+ pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
+ nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0;
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 12980a4ad460..ee6fb6af655e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
cros_ec_core-objs := cros_ec.o
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
new file mode 100644
index 000000000000..42fe67f1538e
--- /dev/null
+++ b/drivers/mfd/bcm2835-pm.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PM MFD driver for Broadcom BCM2835
+ *
+ * This driver binds to the PM block and creates the MFD device for
+ * the WDT and power drivers.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+static const struct mfd_cell bcm2835_pm_devs[] = {
+ { .name = "bcm2835-wdt" },
+};
+
+static const struct mfd_cell bcm2835_power_devs[] = {
+ { .name = "bcm2835-power" },
+};
+
+static int bcm2835_pm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct bcm2835_pm *pm;
+ int ret;
+
+ pm = devm_kzalloc(dev, sizeof(*pm), GFP_KERNEL);
+ if (!pm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pm);
+
+ pm->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ /* We'll use the presence of the AXI ASB regs in the
+ * bcm2835-pm binding as the key for whether we can reference
+ * the full PM register range and support power domains.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ pm->asb = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->asb))
+ return PTR_ERR(pm->asb);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_power_devs,
+ ARRAY_SIZE(bcm2835_power_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_pm_of_match[] = {
+ { .compatible = "brcm,bcm2835-pm-wdt", },
+ { .compatible = "brcm,bcm2835-pm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
+
+static struct platform_driver bcm2835_pm_driver = {
+ .probe = bcm2835_pm_probe,
+ .driver = {
+ .name = "bcm2835-pm",
+ .of_match_table = bcm2835_pm_of_match,
+ },
+};
+module_platform_driver(bcm2835_pm_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 02bf20f51349..2ba49e959c3f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2468,9 +2468,14 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
+ queue.flc.stash_control = 1;
+ queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 01 01 00 - data, annotation, flow context */
+ queue.flc.value |= 0x14;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
+ DPNI_QUEUE_OPT_FLC,
&queue);
if (err) {
dev_err(dev, "dpni_set_queue(RX) failed\n");
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index bb126be1eb72..8b21b40a9fe5 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -49,10 +49,11 @@ config TI_DAVINCI_CPDMA
will be called davinci_cpdma. This is recommended.
config TI_CPSW_PHY_SEL
- bool
+ bool "TI CPSW Phy mode Selection (DEPRECATED)"
+ default n
---help---
This driver supports configuring of the phy mode connected to
- the CPSW.
+ the CPSW. DEPRECATED: use PHY_TI_GMII_SEL.
config TI_CPSW_ALE
tristate "TI CPSW ALE Support"
@@ -64,7 +65,6 @@ config TI_CPSW
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
- select TI_CPSW_PHY_SEL
select TI_CPSW_ALE
select MFD_SYSCON
select REGMAP
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index cf111db3dc27..907e05fc22e4 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -21,7 +21,13 @@
((mac)[2] << 16) | ((mac)[3] << 24))
#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+#if IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL)
void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+#else
+static inline
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{}
+#endif
int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
#endif /* __CPSW_H__ */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 7fee665ec45e..e905861186e3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2041,53 +2041,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret;
}
-/* irq_queues covers admin queue */
-static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
+/*
+ * nirqs is the number of interrupts available for write and read
+ * queues. The core already reserved an interrupt for the admin queue.
+ */
+static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs)
{
- unsigned int this_w_queues = write_queues;
-
- WARN_ON(!irq_queues);
-
- /*
- * Setup read/write queue split, assign admin queue one independent
- * irq vector if irq_queues is > 1.
- */
- if (irq_queues <= 2) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
- dev->io_queues[HCTX_TYPE_READ] = 0;
- return;
- }
-
- /*
- * If 'write_queues' is set, ensure it leaves room for at least
- * one read queue and one admin queue
- */
- if (this_w_queues >= irq_queues)
- this_w_queues = irq_queues - 2;
+ struct nvme_dev *dev = affd->priv;
+ unsigned int nr_read_queues;
/*
- * If 'write_queues' is set to zero, reads and writes will share
- * a queue set.
+ * If there is no interupt available for queues, ensure that
+ * the default queue is set to 1. The affinity set size is
+ * also set to one, but the irq core ignores it for this case.
+ *
+ * If only one interrupt is available or 'write_queue' == 0, combine
+ * write and read queues.
+ *
+ * If 'write_queues' > 0, ensure it leaves room for at least one read
+ * queue.
*/
- if (!this_w_queues) {
- dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
- dev->io_queues[HCTX_TYPE_READ] = 0;
+ if (!nrirqs) {
+ nrirqs = 1;
+ nr_read_queues = 0;
+ } else if (nrirqs == 1 || !write_queues) {
+ nr_read_queues = 0;
+ } else if (write_queues >= nrirqs) {
+ nr_read_queues = 1;
} else {
- dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
- dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
+ nr_read_queues = nrirqs - write_queues;
}
+
+ dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues;
+ dev->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+ affd->set_size[HCTX_TYPE_READ] = nr_read_queues;
+ affd->nr_sets = nr_read_queues ? 2 : 1;
}
static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int irq_sets[2];
struct irq_affinity affd = {
- .pre_vectors = 1,
- .nr_sets = ARRAY_SIZE(irq_sets),
- .sets = irq_sets,
+ .pre_vectors = 1,
+ .calc_sets = nvme_calc_irq_sets,
+ .priv = dev,
};
- int result = 0;
unsigned int irq_queues, this_p_queues;
/*
@@ -2103,51 +2102,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
}
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
- /*
- * For irq sets, we have to ask for minvec == maxvec. This passes
- * any reduction back to us, so we can adjust our queue counts and
- * IRQ vector needs.
- */
- do {
- nvme_calc_io_queues(dev, irq_queues);
- irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT];
- irq_sets[1] = dev->io_queues[HCTX_TYPE_READ];
- if (!irq_sets[1])
- affd.nr_sets = 1;
-
- /*
- * If we got a failure and we're down to asking for just
- * 1 + 1 queues, just ask for a single vector. We'll share
- * that between the single IO queue and the admin queue.
- * Otherwise, we assign one independent vector to admin queue.
- */
- if (irq_queues > 1)
- irq_queues = irq_sets[0] + irq_sets[1] + 1;
+ /* Initialize for the single interrupt case */
+ dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
+ dev->io_queues[HCTX_TYPE_READ] = 0;
- result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
- irq_queues,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
-
- /*
- * Need to reduce our vec counts. If we get ENOSPC, the
- * platform should support mulitple vecs, we just need
- * to decrease our ask. If we get EINVAL, the platform
- * likely does not. Back down to ask for just one vector.
- */
- if (result == -ENOSPC) {
- irq_queues--;
- if (!irq_queues)
- return result;
- continue;
- } else if (result == -EINVAL) {
- irq_queues = 1;
- continue;
- } else if (result <= 0)
- return -EIO;
- break;
- } while (1);
-
- return result;
+ return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
}
static void nvme_disable_io_queues(struct nvme_dev *dev)
@@ -3024,6 +2984,7 @@ static struct pci_driver nvme_driver = {
static int __init nvme_init(void)
{
+ BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 0a7a470ee859..4ad846ceac7c 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -192,4 +192,14 @@ config SC27XX_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sc27xx-efuse.
+config NVMEM_ZYNQMP
+ bool "Xilinx ZYNQMP SoC nvmem firmware support"
+ depends on ARCH_ZYNQMP
+ help
+ This is a driver to access hardware related data like
+ soc revision, IDCODE... etc by using the firmware
+ interface.
+
+ If sure, say yes. If unsure, say no.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 4e8c61628f1a..2ece8ffffdda 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -41,3 +41,5 @@ obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
new file mode 100644
index 000000000000..490c8fcaec80
--- /dev/null
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define SILICON_REVISION_MASK 0xF
+
+struct zynqmp_nvmem_data {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+};
+
+static int zynqmp_nvmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+ int idcode, version;
+ struct zynqmp_nvmem_data *priv = context;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_chipid)
+ return -ENXIO;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "zynqmp-nvmem",
+ .owner = THIS_MODULE,
+ .word_size = 1,
+ .size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id zynqmp_nvmem_match[] = {
+ { .compatible = "xlnx,zynqmp-nvmem-fw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
+
+static int zynqmp_nvmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_nvmem_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ econfig.dev = dev;
+ econfig.reg_read = zynqmp_nvmem_read;
+ econfig.priv = priv;
+
+ priv->nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(priv->nvmem);
+}
+
+static struct platform_driver zynqmp_nvmem_driver = {
+ .probe = zynqmp_nvmem_probe,
+ .driver = {
+ .name = "zynqmp-nvmem",
+ .of_match_table = zynqmp_nvmem_match,
+ },
+};
+
+module_platform_driver(zynqmp_nvmem_driver);
+
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP NVMEM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 18f1639dbc4a..e06a0ab05ad6 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -131,6 +131,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp: opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 06f0f632ec47..1779f2c93291 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -594,6 +594,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
new_opp->rate = (unsigned long)rate;
}
+ of_property_read_u32(np, "opp-level", &new_opp->level);
+
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index e24d81497375..4458175aa661 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -60,6 +60,7 @@ extern struct list_head opp_tables;
* @suspend: true if suspend OPP
* @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
+ * @level: Performance level
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
@@ -80,6 +81,7 @@ struct dev_pm_opp {
bool suspend;
unsigned int pstate;
unsigned long rate;
+ unsigned int level;
struct dev_pm_opp_supply *supplies;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 4c0b47867258..73986825d221 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -532,7 +532,7 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
+msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
{
struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry;
@@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* which could have been allocated.
*/
static int msi_capability_init(struct pci_dev *dev, int nvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
@@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev,
* requested MSI-X entries with allocated irqs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, const struct irq_affinity *affd)
+ int nvec, struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, const struct irq_affinity *affd)
+ int nvec, struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -1018,7 +1018,7 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
int nvec;
int rc;
@@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
- /*
- * If the caller is passing in sets, we can't support a range of
- * vectors. The caller needs to handle that.
- */
- if (affd && affd->nr_sets && minvec != maxvec)
- return -EINVAL;
-
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
@@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi);
static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec,
- int maxvec, const struct irq_affinity *affd)
+ int maxvec, struct irq_affinity *affd)
{
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
- /*
- * If the caller is passing in sets, we can't support a range of
- * supported vectors. The caller needs to handle that.
- */
- if (affd && affd->nr_sets && minvec != maxvec)
- return -EINVAL;
-
if (WARN_ON_ONCE(dev->msix_enabled))
return -EINVAL;
@@ -1165,9 +1151,9 @@ EXPORT_SYMBOL(pci_enable_msix_range);
*/
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
- const struct irq_affinity *affd)
+ struct irq_affinity *affd)
{
- static const struct irq_affinity msi_default_affd;
+ struct irq_affinity msi_default_affd = {0};
int msix_vecs = -ENOSPC;
int msi_vecs = -ENOSPC;
@@ -1196,6 +1182,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
/* use legacy irq if allowed */
if (flags & PCI_IRQ_LEGACY) {
if (min_vecs == 1 && dev->irq) {
+ /*
+ * Invoke the affinity spreading logic to ensure that
+ * the device driver can adjust queue configuration
+ * for the single interrupt case.
+ */
+ if (affd)
+ irq_create_affinity_masks(1, affd);
pci_intx(dev, 1);
return 1;
}
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 1bfeb160c5b1..bfd03e023308 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1327,15 +1327,6 @@ static int cci_pmu_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- /* We have no filtering of any kind */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
/*
* Following the example set by other "uncore" PMUs, we accept any CPU
* and rewrite its affinity dynamically rather than having perf core
@@ -1433,6 +1424,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
.stop = cci_pmu_stop,
.read = pmu_read,
.attr_groups = pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
cci_pmu->plat_device = pdev;
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 7dd850e02f19..2ae76026e947 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -741,10 +741,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- if (has_branch_stack(event) || event->attr.exclude_user ||
- event->attr.exclude_kernel || event->attr.exclude_hv ||
- event->attr.exclude_idle || event->attr.exclude_host ||
- event->attr.exclude_guest) {
+ if (has_branch_stack(event)) {
dev_dbg(ccn->dev, "Can't exclude execution levels!\n");
return -EINVAL;
}
@@ -1290,6 +1287,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
.read = arm_ccn_pmu_event_read,
.pmu_enable = arm_ccn_pmu_enable,
.pmu_disable = arm_ccn_pmu_disable,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* No overflow interrupt? Have to use a timer instead. */
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 660cb8ac886a..5851de56bbd0 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -562,13 +562,7 @@ static int dsu_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
- if (has_branch_stack(event) ||
- event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest) {
+ if (has_branch_stack(event)) {
dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
return -EINVAL;
}
@@ -735,6 +729,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
.read = dsu_pmu_read,
.attr_groups = dsu_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d0b7dd8fb184..eec75b97e7ea 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -357,13 +357,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
}
static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
- return attr->exclude_idle || attr->exclude_user ||
- attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
__hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
@@ -393,9 +386,8 @@ __hw_perf_event_init(struct perf_event *event)
/*
* Check whether we need to exclude the counter from certain modes.
*/
- if ((!armpmu->set_event_filter ||
- armpmu->set_event_filter(hwc, &event->attr)) &&
- event_requires_mode_exclusion(&event->attr)) {
+ if (armpmu->set_event_filter &&
+ armpmu->set_event_filter(hwc, &event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
return -EOPNOTSUPP;
@@ -867,6 +859,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
return ret;
+ if (!pmu->set_event_filter)
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
+
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
goto out_destroy;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8e46a9dad2fa..7cb766dafe85 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
{
}
-static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
- bool snapshot)
+static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
- int i;
+ int i, cpu = event->cpu;
struct page **pglist;
struct arm_spe_pmu_buf *buf;
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 69372e2bc93c..0eba947c2ee9 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -396,6 +396,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_ddrc_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 443906e0aff3..2553a844ebf6 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -407,6 +407,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_hha_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 0bde5d919b2e..cf1cc34f402a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -397,6 +397,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
.stop = hisi_uncore_pmu_stop,
.read = hisi_uncore_pmu_read,
.attr_groups = hisi_l3c_pmu_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 9efd2413240c..f028cbc3443c 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -142,15 +142,6 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- /* counters do not have these bits */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_host ||
- event->attr.exclude_guest ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle)
- return -EINVAL;
-
/*
* The uncore counters not specific to any CPU, so cannot
* support per-task
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 842135cf35a3..091b4d7d32c4 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -509,14 +509,6 @@ static int l2_cache_event_init(struct perf_event *event)
return -EOPNOTSUPP;
}
- /* We cannot filter accurately so we just don't allow it. */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_hv || event->attr.exclude_idle) {
- dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
- "Can't exclude execution levels\n");
- return -EOPNOTSUPP;
- }
-
if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
((event->attr.config & ~L2_EVT_MASK) != 0)) &&
(event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
@@ -982,6 +974,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
.stop = l2_cache_event_stop,
.read = l2_cache_event_read,
.attr_groups = l2_cache_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
l2cache_pmu->num_counters = get_num_counters();
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 2dc63d61f2ea..5d70646da8c7 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -495,13 +495,6 @@ static int qcom_l3_cache__event_init(struct perf_event *event)
return -ENOENT;
/*
- * There are no per-counter mode filters in the PMU.
- */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_hv || event->attr.exclude_idle)
- return -EINVAL;
-
- /*
* Sampling not supported since these events are not core-attributable.
*/
if (hwc->sample_period)
@@ -777,6 +770,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
.read = qcom_l3_cache__event_read,
.attr_groups = qcom_l3_cache_pmu_attr_grps,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index c9a1701d3e54..43d76c85da56 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -424,15 +424,6 @@ static int tx2_uncore_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* We have no filtering of any kind */
- if (event->attr.exclude_user ||
- event->attr.exclude_kernel ||
- event->attr.exclude_hv ||
- event->attr.exclude_idle ||
- event->attr.exclude_host ||
- event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
@@ -572,6 +563,7 @@ static int tx2_uncore_pmu_register(
.start = tx2_uncore_event_start,
.stop = tx2_uncore_event_stop,
.read = tx2_uncore_event_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0dc9ff0f8894..d4ec04868d59 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -917,11 +917,6 @@ static int xgene_perf_event_init(struct perf_event *event)
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- /* SOC counters do not have usr/os/guest/host bits */
- if (event->attr.exclude_user || event->attr.exclude_kernel ||
- event->attr.exclude_host || event->attr.exclude_guest)
- return -EINVAL;
-
if (event->cpu < 0)
return -EINVAL;
/*
@@ -1136,6 +1131,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
.start = xgene_perf_start,
.stop = xgene_perf_stop,
.read = xgene_perf_read,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
/* Hardware counter init */
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 48f3594a7458..79bd102c9bbc 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -124,7 +124,7 @@ static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
return err;
}
-static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx)
+static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock_info *ops;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 2e01bd833ffd..2c8c23db92fb 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -40,6 +40,14 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
+config RESET_BRCMSTB
+ tristate "Broadcom STB reset controller"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ This enables the reset controller driver for Broadcom STB SoCs using
+ a SUN_TOP_CTRL_SW_INIT style controller.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -48,9 +56,9 @@ config RESET_HSDK
This enables the reset controller driver for HSDK board.
config RESET_IMX7
- bool "i.MX7 Reset Driver" if COMPILE_TEST
+ bool "i.MX7/8 Reset Driver" if COMPILE_TEST
depends on HAS_IOMEM
- default SOC_IMX7D
+ default SOC_IMX7D || (ARM64 && ARCH_MXC)
select MFD_SYSCON
help
This enables the reset controller driver for i.MX7 SoCs.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index dc7874df78d9..61456b8f659c 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
new file mode 100644
index 000000000000..a608f445dad6
--- /dev/null
+++ b/drivers/reset/reset-brcmstb.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB generic reset controller for SW_INIT style reset controller
+ *
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2018 Broadcom
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+struct brcmstb_reset {
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+};
+
+#define SW_INIT_SET 0x00
+#define SW_INIT_CLEAR 0x04
+#define SW_INIT_STATUS 0x08
+
+#define SW_INIT_BIT(id) BIT((id) & 0x1f)
+#define SW_INIT_BANK(id) ((id) >> 5)
+
+/* A full bank contains extra registers that we are not utilizing but still
+ * qualify as a single bank.
+ */
+#define SW_INIT_BANK_SIZE 0x18
+
+static inline
+struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct brcmstb_reset, rcdev);
+}
+
+static int brcmstb_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET);
+
+ return 0;
+}
+
+static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR);
+ /* Maximum reset delay after de-asserting a line and seeing block
+ * operation is typically 14us for the worst case, build some slack
+ * here.
+ */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int brcmstb_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ return readl_relaxed(priv->base + off + SW_INIT_STATUS) &
+ SW_INIT_BIT(id);
+}
+
+static const struct reset_control_ops brcmstb_reset_ops = {
+ .assert = brcmstb_reset_assert,
+ .deassert = brcmstb_reset_deassert,
+ .status = brcmstb_reset_status,
+};
+
+static int brcmstb_reset_probe(struct platform_device *pdev)
+{
+ struct device *kdev = &pdev->dev;
+ struct brcmstb_reset *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
+ !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
+ dev_err(kdev, "incorrect register range\n");
+ return -EINVAL;
+ }
+
+ priv->base = devm_ioremap_resource(kdev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(kdev, priv);
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res),
+ SW_INIT_BANK_SIZE) * 32;
+ priv->rcdev.ops = &brcmstb_reset_ops;
+ priv->rcdev.of_node = kdev->of_node;
+ /* Use defaults: 1 cell and simple xlate function */
+
+ return devm_reset_controller_register(kdev, &priv->rcdev);
+}
+
+static const struct of_device_id brcmstb_reset_of_match[] = {
+ { .compatible = "brcm,brcmstb-reset" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver brcmstb_reset_driver = {
+ .probe = brcmstb_reset_probe,
+ .driver = {
+ .name = "brcmstb-reset",
+ .of_match_table = brcmstb_reset_of_match,
+ },
+};
+module_platform_driver(brcmstb_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 77911fa8f31d..aed76e33a0a9 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -17,14 +17,27 @@
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
+#include <dt-bindings/reset/imx8mq-reset.h>
+
+struct imx7_src_signal {
+ unsigned int offset, bit;
+};
+
+struct imx7_src_variant {
+ const struct imx7_src_signal *signals;
+ unsigned int signals_num;
+ struct reset_control_ops ops;
+};
struct imx7_src {
struct reset_controller_dev rcdev;
struct regmap *regmap;
+ const struct imx7_src_signal *signals;
};
enum imx7_src_registers {
@@ -39,9 +52,14 @@ enum imx7_src_registers {
SRC_DDRC_RCR = 0x1000,
};
-struct imx7_src_signal {
- unsigned int offset, bit;
-};
+static int imx7_reset_update(struct imx7_src *imx7src,
+ unsigned long id, unsigned int value)
+{
+ const struct imx7_src_signal *signal = &imx7src->signals[id];
+
+ return regmap_update_bits(imx7src->regmap,
+ signal->offset, signal->bit, value);
+}
static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
@@ -81,8 +99,8 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
- const struct imx7_src_signal *signal = &imx7_src_signals[id];
- unsigned int value = assert ? signal->bit : 0;
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
switch (id) {
case IMX7_RESET_PCIEPHY:
@@ -95,12 +113,11 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX7_RESET_PCIE_CTRL_APPS_EN:
- value = (assert) ? 0 : signal->bit;
+ value = assert ? 0 : bit;
break;
}
- return regmap_update_bits(imx7src->regmap,
- signal->offset, signal->bit, value);
+ return imx7_reset_update(imx7src, id, value);
}
static int imx7_reset_assert(struct reset_controller_dev *rcdev,
@@ -115,9 +132,133 @@ static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
return imx7_reset_set(rcdev, id, false);
}
-static const struct reset_control_ops imx7_reset_ops = {
- .assert = imx7_reset_assert,
- .deassert = imx7_reset_deassert,
+static const struct imx7_src_variant variant_imx7 = {
+ .signals = imx7_src_signals,
+ .signals_num = ARRAY_SIZE(imx7_src_signals),
+ .ops = {
+ .assert = imx7_reset_assert,
+ .deassert = imx7_reset_deassert,
+ },
+};
+
+enum imx8mq_src_registers {
+ SRC_A53RCR0 = 0x0004,
+ SRC_HDMI_RCR = 0x0030,
+ SRC_DISP_RCR = 0x0034,
+ SRC_GPU_RCR = 0x0040,
+ SRC_VPU_RCR = 0x0044,
+ SRC_PCIE2_RCR = 0x0048,
+ SRC_MIPIPHY1_RCR = 0x004c,
+ SRC_MIPIPHY2_RCR = 0x0050,
+ SRC_DDRC2_RCR = 0x1004,
+};
+
+static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
+ [IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
+ [IMX8MQ_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
+ [IMX8MQ_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
+ [IMX8MQ_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
+ [IMX8MQ_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
+ [IMX8MQ_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
+ [IMX8MQ_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
+ [IMX8MQ_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
+ [IMX8MQ_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
+ [IMX8MQ_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
+ [IMX8MQ_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
+ [IMX8MQ_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
+ [IMX8MQ_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
+ [IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
+ [IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
+ [IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
+ [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
+ [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
+ [IMX8MQ_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
+ [IMX8MQ_RESET_DISP_RESET] = { SRC_DISP_RCR, BIT(0) },
+ [IMX8MQ_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_PCIEPHY2] = { SRC_PCIE2_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY2_PERST] = { SRC_PCIE2_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_EN] = { SRC_PCIE2_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF] = { SRC_PCIE2_RCR, BIT(11) },
+ [IMX8MQ_RESET_MIPI_CSI1_CORE_RESET] = { SRC_MIPIPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET] = { SRC_MIPIPHY1_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI1_ESC_RESET] = { SRC_MIPIPHY1_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_CSI2_CORE_RESET] = { SRC_MIPIPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET] = { SRC_MIPIPHY2_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI2_ESC_RESET] = { SRC_MIPIPHY2_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC1_PRST] = { SRC_DDRC_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC1_CORE_RESET] = { SRC_DDRC_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC1_PHY_RESET] = { SRC_DDRC_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC2_PHY_RESET] = { SRC_DDRC2_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC2_CORE_RESET] = { SRC_DDRC2_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC2_PRST] = { SRC_DDRC2_RCR, BIT(2) },
+};
+
+static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
+
+ switch (id) {
+ case IMX8MQ_RESET_PCIEPHY:
+ case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
+ value = assert ? 0 : bit;
+ break;
+ }
+
+ return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mq_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, true);
+}
+
+static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mq = {
+ .signals = imx8mq_src_signals,
+ .signals_num = ARRAY_SIZE(imx8mq_src_signals),
+ .ops = {
+ .assert = imx8mq_reset_assert,
+ .deassert = imx8mq_reset_deassert,
+ },
};
static int imx7_reset_probe(struct platform_device *pdev)
@@ -125,11 +266,13 @@ static int imx7_reset_probe(struct platform_device *pdev)
struct imx7_src *imx7src;
struct device *dev = &pdev->dev;
struct regmap_config config = { .name = "src" };
+ const struct imx7_src_variant *variant = of_device_get_match_data(dev);
imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
if (!imx7src)
return -ENOMEM;
+ imx7src->signals = variant->signals;
imx7src->regmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(imx7src->regmap)) {
dev_err(dev, "Unable to get imx7-src regmap");
@@ -138,15 +281,16 @@ static int imx7_reset_probe(struct platform_device *pdev)
regmap_attach_dev(dev, imx7src->regmap, &config);
imx7src->rcdev.owner = THIS_MODULE;
- imx7src->rcdev.nr_resets = IMX7_RESET_NUM;
- imx7src->rcdev.ops = &imx7_reset_ops;
+ imx7src->rcdev.nr_resets = variant->signals_num;
+ imx7src->rcdev.ops = &variant->ops;
imx7src->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &imx7src->rcdev);
}
static const struct of_device_id imx7_reset_dt_ids[] = {
- { .compatible = "fsl,imx7d-src", },
+ { .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
+ { .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 318cfc51c441..96953992c2bb 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/socfpga.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -18,7 +19,6 @@
#include "reset-simple.h"
#define SOCFPGA_NR_BANKS 8
-void __init socfpga_reset_init(void);
static int a10_reset_init(struct device_node *np)
{
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index db9a1a75523f..b06d724d8f21 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
new file mode 100644
index 000000000000..2ef1f13aa47b
--- /dev/null
+++ b/drivers/reset/reset-zynqmp.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
+#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
+
+struct zynqmp_reset_data {
+ struct reset_controller_dev rcdev;
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+static inline struct zynqmp_reset_data *
+to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct zynqmp_reset_data, rcdev);
+}
+
+static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
+}
+
+static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
+}
+
+static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+ int val, err;
+
+ err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
+}
+
+static struct reset_control_ops zynqmp_reset_ops = {
+ .reset = zynqmp_reset_reset,
+ .assert = zynqmp_reset_assert,
+ .deassert = zynqmp_reset_deassert,
+ .status = zynqmp_reset_status,
+};
+
+static int zynqmp_reset_probe(struct platform_device *pdev)
+{
+ struct zynqmp_reset_data *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (!priv->eemi_ops)
+ return -ENXIO;
+
+ priv->rcdev.ops = &zynqmp_reset_ops;
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.of_node = pdev->dev.of_node;
+ priv->rcdev.nr_resets = ZYNQMP_NR_RESETS;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id zynqmp_reset_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-reset", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver zynqmp_reset_driver = {
+ .probe = zynqmp_reset_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = zynqmp_reset_dt_ids,
+ },
+};
+
+static int __init zynqmp_reset_init(void)
+{
+ return platform_driver_register(&zynqmp_reset_driver);
+}
+
+arch_initcall(zynqmp_reset_init);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 74e260027c7d..76e49d902609 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba)
/* if eqid_count == 1 fall back to INTX */
if (enable_msix && nvec > 1) {
- const struct irq_affinity desc = { .post_vectors = 1 };
+ struct irq_affinity desc = { .post_vectors = 1 };
if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) {
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index fce33ca76bb6..be95a37c3fec 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
{
struct device_node *canvas_node;
struct platform_device *canvas_pdev;
+ struct meson_canvas *canvas;
canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0);
if (!canvas_node)
return ERR_PTR(-ENODEV);
canvas_pdev = of_find_device_by_node(canvas_node);
- if (!canvas_pdev)
+ if (!canvas_pdev) {
+ of_node_put(canvas_node);
return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(canvas_node);
+
+ /*
+ * If priv is NULL, it's probably because the canvas hasn't
+ * properly initialized. Bail out with -EINVAL because, in the
+ * current state, this driver probe cannot return -EPROBE_DEFER
+ */
+ canvas = dev_get_drvdata(&canvas_pdev->dev);
+ if (!canvas)
+ return ERR_PTR(-EINVAL);
- return dev_get_drvdata(&canvas_pdev->dev);
+ return canvas;
}
EXPORT_SYMBOL_GPL(meson_canvas_get);
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index daea191a66fa..19d4cbc93a17 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
+static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "a53_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(23, "mmc_clk"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(40, "mod_eth_tx_clk"),
+ CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmm_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(66, "audio_slv_lrclk_c"),
+ CLK_MSR_ID(67, "audio_slv_lrclk_b"),
+ CLK_MSR_ID(68, "audio_slv_lrclk_a"),
+ CLK_MSR_ID(69, "audio_slv_sclk_c"),
+ CLK_MSR_ID(70, "audio_slv_sclk_b"),
+ CLK_MSR_ID(71, "audio_slv_sclk_a"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "wifi_beacon"),
+ CLK_MSR_ID(75, "tdmin_lb_lrcl"),
+ CLK_MSR_ID(76, "tdmin_lb_sclk"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(84, "audio_resample"),
+ CLK_MSR_ID(85, "audio_pdm_sys"),
+ CLK_MSR_ID(86, "audio_spdifout"),
+ CLK_MSR_ID(87, "audio_spdifin"),
+ CLK_MSR_ID(88, "audio_lrclk_f"),
+ CLK_MSR_ID(89, "audio_lrclk_e"),
+ CLK_MSR_ID(90, "audio_lrclk_d"),
+ CLK_MSR_ID(91, "audio_lrclk_c"),
+ CLK_MSR_ID(92, "audio_lrclk_b"),
+ CLK_MSR_ID(93, "audio_lrclk_a"),
+ CLK_MSR_ID(94, "audio_sclk_f"),
+ CLK_MSR_ID(95, "audio_sclk_e"),
+ CLK_MSR_ID(96, "audio_sclk_d"),
+ CLK_MSR_ID(97, "audio_sclk_c"),
+ CLK_MSR_ID(98, "audio_sclk_b"),
+ CLK_MSR_ID(99, "audio_sclk_a"),
+ CLK_MSR_ID(100, "audio_mclk_f"),
+ CLK_MSR_ID(101, "audio_mclk_e"),
+ CLK_MSR_ID(102, "audio_mclk_d"),
+ CLK_MSR_ID(103, "audio_mclk_c"),
+ CLK_MSR_ID(104, "audio_mclk_b"),
+ CLK_MSR_ID(105, "audio_mclk_a"),
+ CLK_MSR_ID(106, "pcie_refclk_n"),
+ CLK_MSR_ID(107, "pcie_refclk_p"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+};
+
+static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "sys_cpu_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(100, "ring_osc_out_ee_4"),
+ CLK_MSR_ID(101, "ring_osc_out_ee_5"),
+ CLK_MSR_ID(102, "ring_osc_out_ee_6"),
+ CLK_MSR_ID(103, "ring_osc_out_ee_7"),
+ CLK_MSR_ID(104, "ring_osc_out_ee_8"),
+ CLK_MSR_ID(105, "ring_osc_out_ee_9"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int duration)
{
@@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = {
.compatible = "amlogic,meson8b-clk-measure",
.data = (void *)clk_msr_m8,
},
+ {
+ .compatible = "amlogic,meson-axg-clk-measure",
+ .data = (void *)clk_msr_axg,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-clk-measure",
+ .data = (void *)clk_msr_g12a,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 055a845ed979..03fa91fbe2da 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,5 +1,17 @@
menu "Broadcom SoC drivers"
+config BCM2835_POWER
+ bool "BCM2835 power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ default y if ARCH_BCM2835
+ select PM_GENERIC_DOMAINS if PM
+ select RESET_CONTROLLER
+ help
+ This enables support for the BCM2835 power domains and reset
+ controller. Any usage of power domains by the Raspberry Pi
+ firmware means that Linux usage of the same power domain
+ must be accessed using the RASPBERRYPI_POWER driver
+
config RASPBERRYPI_POWER
bool "Raspberry Pi power domain driver"
depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index dc4fced72d21..c81df4b2403c 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,2 +1,3 @@
+obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
new file mode 100644
index 000000000000..9351349cf0a9
--- /dev/null
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power domain driver for Broadcom BCM2835
+ *
+ * Copyright (C) 2018 Broadcom
+ */
+
+#include <dt-bindings/soc/bcm2835-pm.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define PM_GNRIC 0x00
+#define PM_AUDIO 0x04
+#define PM_STATUS 0x18
+#define PM_RSTC 0x1c
+#define PM_RSTS 0x20
+#define PM_WDOG 0x24
+#define PM_PADS0 0x28
+#define PM_PADS2 0x2c
+#define PM_PADS3 0x30
+#define PM_PADS4 0x34
+#define PM_PADS5 0x38
+#define PM_PADS6 0x3c
+#define PM_CAM0 0x44
+#define PM_CAM0_LDOHPEN BIT(2)
+#define PM_CAM0_LDOLPEN BIT(1)
+#define PM_CAM0_CTRLEN BIT(0)
+
+#define PM_CAM1 0x48
+#define PM_CAM1_LDOHPEN BIT(2)
+#define PM_CAM1_LDOLPEN BIT(1)
+#define PM_CAM1_CTRLEN BIT(0)
+
+#define PM_CCP2TX 0x4c
+#define PM_CCP2TX_LDOEN BIT(1)
+#define PM_CCP2TX_CTRLEN BIT(0)
+
+#define PM_DSI0 0x50
+#define PM_DSI0_LDOHPEN BIT(2)
+#define PM_DSI0_LDOLPEN BIT(1)
+#define PM_DSI0_CTRLEN BIT(0)
+
+#define PM_DSI1 0x54
+#define PM_DSI1_LDOHPEN BIT(2)
+#define PM_DSI1_LDOLPEN BIT(1)
+#define PM_DSI1_CTRLEN BIT(0)
+
+#define PM_HDMI 0x58
+#define PM_HDMI_RSTDR BIT(19)
+#define PM_HDMI_LDOPD BIT(1)
+#define PM_HDMI_CTRLEN BIT(0)
+
+#define PM_USB 0x5c
+/* The power gates must be enabled with this bit before enabling the LDO in the
+ * USB block.
+ */
+#define PM_USB_CTRLEN BIT(0)
+
+#define PM_PXLDO 0x60
+#define PM_PXBG 0x64
+#define PM_DFT 0x68
+#define PM_SMPS 0x6c
+#define PM_XOSC 0x70
+#define PM_SPAREW 0x74
+#define PM_SPARER 0x78
+#define PM_AVS_RSTDR 0x7c
+#define PM_AVS_STAT 0x80
+#define PM_AVS_EVENT 0x84
+#define PM_AVS_INTEN 0x88
+#define PM_DUMMY 0xfc
+
+#define PM_IMAGE 0x108
+#define PM_GRAFX 0x10c
+#define PM_PROC 0x110
+#define PM_ENAB BIT(12)
+#define PM_ISPRSTN BIT(8)
+#define PM_H264RSTN BIT(7)
+#define PM_PERIRSTN BIT(6)
+#define PM_V3DRSTN BIT(6)
+#define PM_ISFUNC BIT(5)
+#define PM_MRDONE BIT(4)
+#define PM_MEMREP BIT(3)
+#define PM_ISPOW BIT(2)
+#define PM_POWOK BIT(1)
+#define PM_POWUP BIT(0)
+#define PM_INRUSH_SHIFT 13
+#define PM_INRUSH_3_5_MA 0
+#define PM_INRUSH_5_MA 1
+#define PM_INRUSH_10_MA 2
+#define PM_INRUSH_20_MA 3
+#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT)
+
+#define PM_PASSWORD 0x5a000000
+
+#define PM_WDOG_TIME_SET 0x000fffff
+#define PM_RSTC_WRCFG_CLR 0xffffffcf
+#define PM_RSTS_HADWRH_SET 0x00000040
+#define PM_RSTC_WRCFG_SET 0x00000030
+#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
+#define PM_RSTC_RESET 0x00000102
+
+#define PM_READ(reg) readl(power->base + (reg))
+#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg))
+
+#define ASB_BRDG_VERSION 0x00
+#define ASB_CPR_CTRL 0x04
+
+#define ASB_V3D_S_CTRL 0x08
+#define ASB_V3D_M_CTRL 0x0c
+#define ASB_ISP_S_CTRL 0x10
+#define ASB_ISP_M_CTRL 0x14
+#define ASB_H264_S_CTRL 0x18
+#define ASB_H264_M_CTRL 0x1c
+
+#define ASB_REQ_STOP BIT(0)
+#define ASB_ACK BIT(1)
+#define ASB_EMPTY BIT(2)
+#define ASB_FULL BIT(3)
+
+#define ASB_AXI_BRDG_ID 0x20
+
+#define ASB_READ(reg) readl(power->asb + (reg))
+#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+
+struct bcm2835_power_domain {
+ struct generic_pm_domain base;
+ struct bcm2835_power *power;
+ u32 domain;
+ struct clk *clk;
+};
+
+struct bcm2835_power {
+ struct device *dev;
+ /* PM registers. */
+ void __iomem *base;
+ /* AXI Async bridge registers. */
+ void __iomem *asb;
+
+ struct genpd_onecell_data pd_xlate;
+ struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
+ struct reset_controller_dev reset;
+};
+
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
+ while (ASB_READ(reg) & ASB_ACK) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
+ while (!(ASB_READ(reg) & ASB_ACK)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+
+ /* Enable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
+
+ /* Enable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+
+ /* Open the power switches. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP);
+
+ return 0;
+}
+
+static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+ struct device *dev = power->dev;
+ u64 start;
+ int ret;
+ int inrush;
+ bool powok;
+
+ /* If it was already powered on by the fw, leave it that way. */
+ if (PM_READ(pm_reg) & PM_POWUP)
+ return 0;
+
+ /* Enable power. Allowing too much current at once may result
+ * in POWOK never getting set, so start low and ramp it up as
+ * necessary to succeed.
+ */
+ powok = false;
+ for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) {
+ PM_WRITE(pm_reg,
+ (PM_READ(pm_reg) & ~PM_INRUSH_MASK) |
+ (inrush << PM_INRUSH_SHIFT) |
+ PM_POWUP);
+
+ start = ktime_get_ns();
+ while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 3000)
+ break;
+ }
+ }
+ if (!powok) {
+ dev_err(dev, "Timeout waiting for %s power OK\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_powup;
+ }
+
+ /* Disable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW);
+
+ /* Repair memory */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP);
+ start = ktime_get_ns();
+ while (!(PM_READ(pm_reg) & PM_MRDONE)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000) {
+ dev_err(dev, "Timeout waiting for %s memory repair\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_ispow;
+ }
+ }
+
+ /* Disable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC);
+
+ return 0;
+
+err_disable_ispow:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+err_disable_powup:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK));
+ return ret;
+}
+
+static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ return ret;
+ }
+
+ /* Wait 32 clocks for reset to propagate, 1 us will be enough */
+ udelay(1);
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Deassert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags);
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ goto err_enable_resets;
+ }
+
+ ret = bcm2835_asb_enable(power, asb_m_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB master for %s\n",
+ pd->base.name);
+ goto err_disable_clk;
+ }
+ ret = bcm2835_asb_enable(power, asb_s_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB slave for %s\n",
+ pd->base.name);
+ goto err_disable_asb_master;
+ }
+
+ return 0;
+
+err_disable_asb_master:
+ bcm2835_asb_disable(power, asb_m_reg);
+err_disable_clk:
+ clk_disable_unprepare(pd->clk);
+err_enable_resets:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+ return ret;
+}
+
+static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = bcm2835_asb_disable(power, asb_s_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB slave for %s\n",
+ pd->base.name);
+ return ret;
+ }
+ ret = bcm2835_asb_disable(power, asb_m_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB master for %s\n",
+ pd->base.name);
+ bcm2835_asb_enable(power, asb_s_reg);
+ return ret;
+ }
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Assert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+
+ return 0;
+}
+
+static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_on(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_on(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_on(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, PM_USB_CTRLEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD);
+ usleep_range(100, 200);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_off(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_off(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_off(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static void
+bcm2835_init_power_domain(struct bcm2835_power *power,
+ int pd_xlate_index, const char *name)
+{
+ struct device *dev = power->dev;
+ struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
+
+ dom->clk = devm_clk_get(dev->parent, name);
+
+ dom->base.name = name;
+ dom->base.power_on = bcm2835_power_pd_power_on;
+ dom->base.power_off = bcm2835_power_pd_power_off;
+
+ dom->domain = pd_xlate_index;
+ dom->power = power;
+
+ /* XXX: on/off at boot? */
+ pm_genpd_init(&dom->base, NULL, true);
+
+ power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+}
+
+/** bcm2835_reset_reset - Resets a block that has a reset line in the
+ * PM block.
+ *
+ * The consumer of the reset controller must have the power domain up
+ * -- there's no reset ability with the power domain down. To reset
+ * the sub-block, we just disable its access to memory through the
+ * ASB, reset, and re-enable.
+ */
+static int bcm2835_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+ struct bcm2835_power_domain *pd;
+ int ret;
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D];
+ break;
+ case BCM2835_RESET_H264:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264];
+ break;
+ case BCM2835_RESET_ISP:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP];
+ break;
+ default:
+ dev_err(power->dev, "Bad reset id %ld\n", id);
+ return -EINVAL;
+ }
+
+ ret = bcm2835_power_pd_power_off(&pd->base);
+ if (ret)
+ return ret;
+
+ return bcm2835_power_pd_power_on(&pd->base);
+}
+
+static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ return !PM_READ(PM_GRAFX & PM_V3DRSTN);
+ case BCM2835_RESET_H264:
+ return !PM_READ(PM_IMAGE & PM_H264RSTN);
+ case BCM2835_RESET_ISP:
+ return !PM_READ(PM_IMAGE & PM_ISPRSTN);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct reset_control_ops bcm2835_reset_ops = {
+ .reset = bcm2835_reset_reset,
+ .status = bcm2835_reset_status,
+};
+
+static const char *const power_domain_names[] = {
+ [BCM2835_POWER_DOMAIN_GRAFX] = "grafx",
+ [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d",
+
+ [BCM2835_POWER_DOMAIN_IMAGE] = "image",
+ [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image",
+ [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264",
+ [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp",
+
+ [BCM2835_POWER_DOMAIN_USB] = "usb",
+ [BCM2835_POWER_DOMAIN_DSI0] = "dsi0",
+ [BCM2835_POWER_DOMAIN_DSI1] = "dsi1",
+ [BCM2835_POWER_DOMAIN_CAM0] = "cam0",
+ [BCM2835_POWER_DOMAIN_CAM1] = "cam1",
+ [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx",
+ [BCM2835_POWER_DOMAIN_HDMI] = "hdmi",
+};
+
+static int bcm2835_power_probe(struct platform_device *pdev)
+{
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct bcm2835_power *power;
+ static const struct {
+ int parent, child;
+ } domain_deps[] = {
+ { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
+ };
+ int ret, i;
+ u32 id;
+
+ power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, power);
+
+ power->dev = dev;
+ power->base = pm->base;
+ power->asb = pm->asb;
+
+ id = ASB_READ(ASB_AXI_BRDG_ID);
+ if (id != 0x62726467 /* "BRDG" */) {
+ dev_err(dev, "ASB register ID returned 0x%08x\n", id);
+ return -ENODEV;
+ }
+
+ power->pd_xlate.domains = devm_kcalloc(dev,
+ ARRAY_SIZE(power_domain_names),
+ sizeof(*power->pd_xlate.domains),
+ GFP_KERNEL);
+ if (!power->pd_xlate.domains)
+ return -ENOMEM;
+
+ power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
+
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
+ bcm2835_init_power_domain(power, i, power_domain_names[i]);
+
+ for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
+ pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
+ &power->domains[domain_deps[i].child].base);
+ }
+
+ power->reset.owner = THIS_MODULE;
+ power->reset.nr_resets = BCM2835_RESET_COUNT;
+ power->reset.ops = &bcm2835_reset_ops;
+ power->reset.of_node = dev->parent->of_node;
+
+ ret = devm_reset_controller_register(dev, &power->reset);
+ if (ret)
+ return ret;
+
+ of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
+
+ dev_info(dev, "Broadcom BCM2835 power domains driver");
+ return 0;
+}
+
+static int bcm2835_power_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver bcm2835_power_driver = {
+ .probe = bcm2835_power_probe,
+ .remove = bcm2835_power_remove,
+ .driver = {
+ .name = "bcm2835-power",
+ },
+};
+module_platform_driver(bcm2835_power_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 8f80e8bbf29e..61f8e1433d0a 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -22,6 +22,7 @@ config FSL_GUTS
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
+ select SOC_BUS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index 5814d2f395a4..e13fd3ac1939 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -26,6 +26,7 @@
#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
struct dpio_cmd_open {
__le32 dpio_id;
@@ -47,4 +48,8 @@ struct dpio_rsp_get_attr {
__le32 qbman_version;
};
+struct dpio_stashing_dest {
+ u8 sdest;
+};
+
#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 2d4af32a0dec..c0cdc8946031 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
@@ -32,6 +33,46 @@ struct dpio_priv {
static cpumask_var_t cpus_unused_mask;
+static const struct soc_device_attribute ls1088a_soc[] = {
+ {.family = "QorIQ LS1088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2080a_soc[] = {
+ {.family = "QorIQ LS2080A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2088a_soc[] = {
+ {.family = "QorIQ LS2088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute lx2160a_soc[] = {
+ {.family = "QorIQ LX2160A"},
+ { /* sentinel */ }
+};
+
+static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int cluster_base, cluster_size;
+
+ if (soc_device_match(ls1088a_soc)) {
+ cluster_base = 2;
+ cluster_size = 4;
+ } else if (soc_device_match(ls2080a_soc) ||
+ soc_device_match(ls2088a_soc) ||
+ soc_device_match(lx2160a_soc)) {
+ cluster_base = 0;
+ cluster_size = 2;
+ } else {
+ dev_err(&dpio_dev->dev, "unknown SoC version\n");
+ return -1;
+ }
+
+ return cluster_base + cpu / cluster_size;
+}
+
static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
@@ -89,6 +130,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
int err = -ENOMEM;
struct device *dev = &dpio_dev->dev;
int possible_next_cpu;
+ int sdest;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -145,6 +187,16 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.cpu = possible_next_cpu;
cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
+ sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
+ if (sdest >= 0) {
+ err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
+ dpio_dev->mc_handle,
+ sdest);
+ if (err)
+ dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
+ desc.cpu);
+ }
+
/*
* Set the CENA regs to be the cache inhibited area of the portal to
* avoid coherency issues if a user migrates to another core.
@@ -220,12 +272,12 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
dev = &dpio_dev->dev;
priv = dev_get_drvdata(dev);
+ cpu = dpaa2_io_get_cpu(priv->io);
dpaa2_io_down(priv->io);
dpio_teardown_irqs(dpio_dev);
- cpu = dpaa2_io_get_cpu(priv->io);
cpumask_set_cpu(cpu, cpus_unused_mask);
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index bc801934602a..b9539ef2c3cd 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -473,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
* Return 0 for success, and negative error code for failure.
*/
int dpaa2_io_service_release(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
const u64 *buffers,
unsigned int num_buffers)
{
@@ -502,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
* Eg. if the buffer pool is empty, this will return zero.
*/
int dpaa2_io_service_acquire(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
u64 *buffers,
unsigned int num_buffers)
{
@@ -630,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
ret = NULL;
} else {
+ prefetch(&s->vaddr[s->idx]);
*is_last = 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index 521bc6946317..af74c597a675 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -166,6 +166,22 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 sdest)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_stashing_dest *dpio_cmd;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags, token);
+ dpio_cmd = (struct dpio_stashing_dest *)cmd.params;
+ dpio_cmd->sdest = sdest;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpio_get_api_version - Get Data Path I/O API version
* @mc_io: Pointer to MC portal's DPIO object
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index b2ac4ba4fb8e..da06f7258098 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -75,6 +75,11 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpio_attr *attr);
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 dest);
+
int dpio_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 0bddb85c0ae5..d02013556a1b 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -169,9 +169,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
3, /* RPM: Valid bit mode, RCR in array mode */
2, /* DCM: Discrete consumption ack mode */
3, /* EPM: Valid bit mode, EQCR in array mode */
- 0, /* mem stashing drop enable == FALSE */
+ 1, /* mem stashing drop enable == TRUE */
1, /* mem stashing priority == TRUE */
- 0, /* mem stashing enable == FALSE */
+ 1, /* mem stashing enable == TRUE */
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
@@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
if (!reg) {
pr_err("qbman: the portal is not enabled!\n");
+ kfree(p);
return NULL;
}
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 302e0c8d69d9..63f6df86f9e5 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -32,6 +32,7 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
+static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -114,7 +115,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
return NULL;
}
-u32 fsl_guts_get_svr(void)
+static u32 fsl_guts_get_svr(void)
{
u32 svr = 0;
@@ -128,11 +129,10 @@ u32 fsl_guts_get_svr(void)
return svr;
}
-EXPORT_SYMBOL(fsl_guts_get_svr);
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *root, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
@@ -155,9 +155,8 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- of_node_put(root);
if (machine)
- soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ soc_dev_attr.machine = machine;
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -192,6 +191,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
+ of_node_put(root);
return 0;
}
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 2112d18dbb7b..d80f899d22f9 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -2,7 +2,7 @@ menu "i.MX SoC drivers"
config IMX_GPCV2_PM_DOMAINS
bool "i.MX GPCv2 PM domains"
- depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF)
+ depends on ARCH_MXC || (COMPILE_TEST && OF)
depends on PM
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 8b4f48a2ca57..176f473127b6 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -8,6 +8,7 @@
* Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
+#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -65,6 +66,12 @@
#define GPC_M4_PU_PDN_FLG 0x1bc
+#define GPC_PU_PWRHSK 0x1fc
+
+#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
+#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
+#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -92,16 +99,21 @@
#define GPC_PGC_CTRL_PCR BIT(0)
+#define GPC_CLK_MAX 6
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
struct regulator *regulator;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
unsigned int pgc;
const struct {
u32 pxx;
u32 map;
+ u32 hsk;
} bits;
const int voltage;
@@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
const bool enable_power_control = !on;
const bool has_regulator = !IS_ERR(domain->regulator);
unsigned long deadline;
- int ret = 0;
+ int i, ret = 0;
regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
domain->bits.map, domain->bits.map);
@@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
}
}
+ /* Enable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_prepare_enable(domain->clk[i]);
+
if (enable_power_control)
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ if (domain->bits.hsk)
+ regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+ domain->bits.hsk, on ? domain->bits.hsk : 0);
+
regmap_update_bits(domain->regmap, offset,
domain->bits.pxx, domain->bits.pxx);
@@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, 0);
+ /* Disable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_disable_unprepare(domain->clk[i]);
+
if (has_regulator && !on) {
int err;
@@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
+ .hsk = IMX8M_GPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_GPU,
},
@@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
+ .hsk = IMX8M_VPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_VPU,
},
@@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
+ .hsk = IMX8M_DISP_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_DISP,
},
@@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
static const struct regmap_range imx8m_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
- GPC_M4_PU_PDN_FLG),
+ GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI),
GPC_PGC_SR(IMX8M_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1),
@@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.reg_access_table = &imx8m_access_table,
};
+static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(domain->dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(domain->dev, "more than %d clocks\n",
+ GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
+{
+ int i;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
@@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
+ ret = imx_pgc_get_clocks(domain);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's clocks\n");
+ return ret;
+ }
+
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
+ imx_pgc_put_clocks(domain);
return ret;
}
@@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
}
return ret;
@@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
return 0;
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index fcbf8a2e4080..1ee298f6bf17 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -98,6 +98,24 @@ config QCOM_RPMH
of hardware components aggregate requests for these resources and
help apply the aggregated state on the resource.
+config QCOM_RPMHPD
+ bool "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ bool "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM=y
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f25b54cd6cf8..ffe519b0cb66 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index 2e1e4f0a5db8..86600d97c36d 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] = {
SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
};
+static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
+{
+ return qcom_llcc_remove(pdev);
+}
+
static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
{
return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
@@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = {
.of_match_table = sdm845_qcom_llcc_of_match,
},
.probe = sdm845_qcom_llcc_probe,
+ .remove = sdm845_qcom_llcc_remove,
};
module_platform_driver(sdm845_qcom_llcc_driver);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 80667f7be52c..9090ea12eaf3 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -46,7 +46,7 @@
#define BANK_OFFSET_STRIDE 0x80000
-static struct llcc_drv_data *drv_data;
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
static const struct regmap_config llcc_regmap_config = {
.reg_bits = 32,
@@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
struct llcc_slice_desc *desc;
u32 sz, count;
+ if (IS_ERR(drv_data))
+ return ERR_CAST(drv_data);
+
cfg = drv_data->cfg;
sz = drv_data->cfg_size;
@@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid,
u32 slice_status;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
status_reg = LLCC_TRP_STATUSn(sid);
@@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc)
int ret;
u32 act_ctrl_val;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc)
u32 act_ctrl_val;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
return ret;
}
+int qcom_llcc_remove(struct platform_device *pdev)
+{
+ /* Set the global pointer to a error code to avoid referencing it */
+ drv_data = ERR_PTR(-ENODEV);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_remove);
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
int qcom_llcc_probe(struct platform_device *pdev,
const struct llcc_slice_config *llcc_cfg, u32 sz)
{
u32 num_banks;
struct device *dev = &pdev->dev;
- struct resource *llcc_banks_res, *llcc_bcast_res;
- void __iomem *llcc_banks_base, *llcc_bcast_base;
int ret, i;
struct platform_device *llcc_edac;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data)
- return -ENOMEM;
-
- llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_base");
- llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res);
- if (IS_ERR(llcc_banks_base))
- return PTR_ERR(llcc_banks_base);
-
- drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->regmap))
- return PTR_ERR(drv_data->regmap);
-
- llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_broadcast_base");
- llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res);
- if (IS_ERR(llcc_bcast_base))
- return PTR_ERR(llcc_bcast_base);
-
- drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->bcast_regmap))
- return PTR_ERR(drv_data->bcast_regmap);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+ if (IS_ERR(drv_data->regmap)) {
+ ret = PTR_ERR(drv_data->regmap);
+ goto err;
+ }
+
+ drv_data->bcast_regmap =
+ qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
&num_banks);
if (ret)
- return ret;
+ goto err;
num_banks &= LLCC_LB_CNT_MASK;
num_banks >>= LLCC_LB_CNT_SHIFT;
@@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
GFP_KERNEL);
- if (!drv_data->offsets)
- return -ENOMEM;
+ if (!drv_data->offsets) {
+ ret = -ENOMEM;
+ goto err;
+ }
for (i = 0; i < num_banks; i++)
drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
@@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->bitmap = devm_kcalloc(dev,
BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
GFP_KERNEL);
- if (!drv_data->bitmap)
- return -ENOMEM;
+ if (!drv_data->bitmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
@@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev,
ret = qcom_llcc_cfg_program(pdev);
if (ret)
- return ret;
+ goto err;
drv_data->ecc_irq = platform_get_irq(pdev, 0);
if (drv_data->ecc_irq >= 0) {
@@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev,
dev_err(dev, "Failed to register llcc edac driver\n");
}
+ return 0;
+err:
+ drv_data = ERR_PTR(-ENODEV);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_llcc_probe);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e70d63..038abc377fdb 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
- int i;
+ int i, ret;
u32 mask, gsbi_num;
const struct crci_config *config = NULL;
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gsbi);
- return of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
}
static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 97bb5989aa21..7200d762a951 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
char *buf);
-static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
@@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct class rmtfs_class = {
+ .owner = THIS_MODULE,
+ .name = "rmtfs",
+};
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
@@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = {
},
};
-static int qcom_rmtfs_mem_init(void)
+static int __init qcom_rmtfs_mem_init(void)
{
int ret;
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
- return ret;
+ goto unregister_class;
}
ret = platform_driver_register(&qcom_rmtfs_mem_driver);
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
- unregister_chrdev_region(qcom_rmtfs_mem_major,
- QCOM_RMTFS_MEM_DEV_MAX);
+ goto unregister_chrdev;
}
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
return ret;
}
module_init(qcom_rmtfs_mem_init);
-static void qcom_rmtfs_mem_exit(void)
+static void __exit qcom_rmtfs_mem_exit(void)
{
platform_driver_unregister(&qcom_rmtfs_mem_driver);
unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
}
module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index c7beb6841289..035091fd44b8 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
msg);
struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
rpm_msg->err = r;
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
complete(compl);
exit:
- if (rpm_msg->needs_free)
+ if (free)
kfree(rpm_msg);
}
@@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
- &rpm_msg->msg);
/* Clean up our call by spoofing tx_done */
+ ret = 0;
rpmh_tx_done(&rpm_msg->msg, ret);
}
@@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
{
struct batch_cache_req *req;
struct rpmh_request *rpm_msgs;
- DECLARE_COMPLETION_ONSTACK(compl);
+ struct completion *compls;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
unsigned long time_left;
int count = 0;
- int ret, i, j;
+ int ret, i;
+ void *ptr;
if (!cmd || !n)
return -EINVAL;
@@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
if (!count)
return -EINVAL;
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
GFP_ATOMIC);
- if (!req)
+ if (!ptr)
return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
req->count = count;
rpm_msgs = req->rpm_msgs;
@@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
for (i = 0; i < count; i++) {
- rpm_msgs[i].completion = &compl;
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
if (ret) {
pr_err("Error(%d) sending RPMH message addr=%#x\n",
ret, rpm_msgs[i].msg.cmds[0].addr);
- for (j = i; j < count; j++)
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
break;
}
}
time_left = RPMH_TIMEOUT_MS;
- for (i = 0; i < count; i++) {
- time_left = wait_for_completion_timeout(&compl, time_left);
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
if (!time_left) {
/*
* Better hope they never finish because they'll signal
- * the completion on our stack and that's bad once
- * we've returned from the function.
+ * the completion that we're going to free once
+ * we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT;
@@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
exit:
- kfree(req);
+ kfree(ptr);
return ret;
}
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644
index 000000000000..5741ec3fa814
--- /dev/null
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS 16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev: rpmh power domain controller device
+ * @pd: generic_pm_domain corrresponding to the power domain
+ * @peer: A peer power domain in case Active only Voting is
+ * supported
+ * @active_only: True if it represents an Active only peer
+ * @level: An array of level (vlvl) to corner (hlvl) mappings
+ * derived from cmd-db
+ * @level_count: Number of levels supported by the power domain. max
+ * being 16 (0 - 15)
+ * @enabled: true if the power domain is enabled
+ * @res_name: Resource name used for cmd-db lookup
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ */
+struct rpmhpd {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct rpmhpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ unsigned int active_corner;
+ u32 level[RPMH_ARC_MAX_LEVELS];
+ size_t level_count;
+ bool enabled;
+ const char *res_name;
+ u32 addr;
+};
+
+struct rpmhpd_desc {
+ struct rpmhpd **rpmhpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* SDM845 RPMH powerdomains */
+
+static struct rpmhpd sdm845_ebi = {
+ .pd = { .name = "ebi", },
+ .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd sdm845_lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd sdm845_lcx = {
+ .pd = { .name = "lcx", },
+ .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd sdm845_gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd sdm845_mss = {
+ .pd = { .name = "mss", },
+ .res_name = "mss.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao;
+static struct rpmhpd sdm845_mx = {
+ .pd = { .name = "mx", },
+ .peer = &sdm845_mx_ao,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &sdm845_mx,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao;
+static struct rpmhpd sdm845_cx = {
+ .pd = { .name = "cx", },
+ .peer = &sdm845_cx_ao,
+ .parent = &sdm845_mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &sdm845_cx,
+ .parent = &sdm845_mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd *sdm845_rpmhpds[] = {
+ [SDM845_EBI] = &sdm845_ebi,
+ [SDM845_MX] = &sdm845_mx,
+ [SDM845_MX_AO] = &sdm845_mx_ao,
+ [SDM845_CX] = &sdm845_cx,
+ [SDM845_CX_AO] = &sdm845_cx_ao,
+ [SDM845_LMX] = &sdm845_lmx,
+ [SDM845_LCX] = &sdm845_lcx,
+ [SDM845_GFX] = &sdm845_gfx,
+ [SDM845_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+ .rpmhpds = sdm845_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { }
+};
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+ unsigned int corner, bool sync)
+{
+ struct tcs_cmd cmd = {
+ .addr = pd->addr,
+ .data = corner,
+ };
+
+ /*
+ * Wait for an ack only when we are increasing the
+ * perf state of the power domain
+ */
+ if (sync)
+ return rpmh_write(pd->dev, state, &cmd, 1);
+ else
+ return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+ int ret;
+ struct rpmhpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+ active_corner > pd->active_corner);
+ if (ret)
+ return ret;
+
+ pd->active_corner = active_corner;
+
+ if (peer) {
+ peer->active_corner = active_corner;
+
+ ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+ active_corner, false);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+ false);
+ }
+
+ return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ if (pd->corner)
+ ret = rpmhpd_aggregate_corner(pd, pd->corner);
+
+ if (!ret)
+ pd->enabled = true;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
+
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int level)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0, i;
+
+ mutex_lock(&rpmhpd_lock);
+
+ for (i = 0; i < pd->level_count; i++)
+ if (level <= pd->level[i])
+ break;
+
+ /*
+ * If the level requested is more than that supported by the
+ * max corner, just set it to max anyway.
+ */
+ if (i == pd->level_count)
+ i--;
+
+ if (pd->enabled) {
+ ret = rpmhpd_aggregate_corner(pd, i);
+ if (ret)
+ goto out;
+ }
+
+ pd->corner = i;
+out:
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+ int i;
+ const u16 *buf;
+
+ buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* 2 bytes used for each command DB aux data entry */
+ rpmhpd->level_count >>= 1;
+
+ if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
+ rpmhpd->level[i] = buf[i];
+
+ /*
+ * The AUX data may be zero padded. These 0 valued entries at
+ * the end of the map must be ignored.
+ */
+ if (i > 0 && rpmhpd->level[i] == 0) {
+ rpmhpd->level_count = i;
+ break;
+ }
+ pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+ rpmhpd->level[i]);
+ }
+
+ return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ size_t num_pds;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *data;
+ struct rpmhpd **rpmhpds;
+ const struct rpmhpd_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmhpds = desc->rpmhpds;
+ num_pds = desc->num_pds;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num_pds;
+
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i]) {
+ dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ continue;
+ }
+
+ rpmhpds[i]->dev = dev;
+ rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+ if (!rpmhpds[i]->addr) {
+ dev_err(dev, "Could not find RPMh address for resource %s\n",
+ rpmhpds[i]->res_name);
+ return -ENODEV;
+ }
+
+ ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+ if (ret != CMD_DB_HW_ARC) {
+ dev_err(dev, "RPMh slave ID mismatch\n");
+ return -EINVAL;
+ }
+
+ ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+ if (ret)
+ return ret;
+
+ rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+ rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+ rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+ rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+ pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmhpds[i]->pd;
+ }
+
+ /* Add subdomains */
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+ if (rpmhpds[i]->parent)
+ pm_genpd_add_subdomain(rpmhpds[i]->parent,
+ &rpmhpds[i]->pd);
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+ .driver = {
+ .name = "qcom-rpmhpd",
+ .of_match_table = rpmhpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+ return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644
index 000000000000..005326050c23
--- /dev/null
+++ b/drivers/soc/qcom/rpmpd.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+
+/* Operation Keys */
+#define KEY_CORNER 0x6e726f63 /* corn */
+#define KEY_ENABLE 0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+
+#define MAX_RPMPD_STATE 6
+
+#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+ static struct rpmpd _platform##_##_active; \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .peer = &_platform##_##_active, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }; \
+ static struct rpmpd _platform##_##_active = { \
+ .pd = { .name = #_active, }, \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_LDOA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+
+#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+
+struct rpmpd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpmpd {
+ struct generic_pm_domain pd;
+ struct rpmpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ bool enabled;
+ const char *res_name;
+ const int res_type;
+ const int res_id;
+ struct qcom_smd_rpm *rpm;
+ __le32 key;
+};
+
+struct rpmpd_desc {
+ struct rpmpd **rpmpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
+DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+
+DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
+DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+ [MSM8996_VDDCX] = &msm8996_vddcx,
+ [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
+ [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
+ [MSM8996_VDDMX] = &msm8996_vddmx,
+ [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
+ [MSM8996_VDDSSCX] = &msm8996_vddsscx,
+ [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+ .rpmpds = msm8996_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { }
+};
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+ struct rpmpd_req req = {
+ .key = KEY_ENABLE,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(enable),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+ struct rpmpd_req req = {
+ .key = pd->key,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(corner),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+ int ret;
+ struct rpmpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, true);
+ if (ret)
+ goto out;
+
+ pd->enabled = true;
+
+ if (pd->corner)
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, false);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ int ret = 0;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > MAX_RPMPD_STATE)
+ goto out;
+
+ mutex_lock(&rpmpd_lock);
+
+ pd->corner = state;
+
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ goto out;
+
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+ int i;
+ size_t num;
+ struct genpd_onecell_data *data;
+ struct qcom_smd_rpm *rpm;
+ struct rpmpd **rpmpds;
+ const struct rpmpd_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmpds = desc->rpmpds;
+ num = desc->num_pds;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ data->num_domains = num;
+
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i]) {
+ dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+ i);
+ continue;
+ }
+
+ rpmpds[i]->rpm = rpm;
+ rpmpds[i]->pd.power_off = rpmpd_power_off;
+ rpmpds[i]->pd.power_on = rpmpd_power_on;
+ rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+ pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmpds[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+ .driver = {
+ .name = "qcom-rpmpd",
+ .of_match_table = rpmpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+ return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b8e63724a49d..9956bb2c63f2 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index fe4481676da6..a0b03443d8c1 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -76,6 +76,7 @@ config ARCH_TEGRA_210_SOC
select PINCTRL_TEGRA210
select SOC_TEGRA_FLOWCTRL
select SOC_TEGRA_PMC
+ select TEGRA_TIMER
help
Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1,
the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a33ee8ef8b6b..51625703399e 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fuse->base))
- return PTR_ERR(fuse->base);
+ if (IS_ERR(fuse->base)) {
+ err = PTR_ERR(fuse->base);
+ fuse->base = base;
+ return err;
+ }
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
+ fuse->base = base;
return PTR_ERR(fuse->clk);
}
@@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0)
+ if (err < 0) {
+ fuse->base = base;
return err;
+ }
}
if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 5373f4c16b54..8ed35d9851f8 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
- soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7ea3280279ff..0df258518693 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) "tegra-pmc: " fmt
-#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -30,16 +30,17 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_clk.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reboot.h>
@@ -145,6 +146,11 @@
#define WAKE_AOWAKE_CTRL 0x4f4
#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+/* for secure PMC */
+#define TEGRA_SMC_PMC 0xc2fffe00
+#define TEGRA_SMC_PMC_READ 0xaa
+#define TEGRA_SMC_PMC_WRITE 0xbb
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -216,6 +222,7 @@ struct tegra_pmc_soc {
bool has_gpu_clamps;
bool needs_mbist_war;
bool has_impl_33v_pwr;
+ bool maybe_tz_only;
const struct tegra_io_pad_soc *io_pads;
unsigned int num_io_pads;
@@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = {
* struct tegra_pmc - NVIDIA Tegra PMC
* @dev: pointer to PMC device structure
* @base: pointer to I/O remapped register region
+ * @wake: pointer to I/O remapped region for WAKE registers
+ * @aotag: pointer to I/O remapped region for AOTAG registers
+ * @scratch: pointer to I/O remapped region for scratch registers
* @clk: pointer to pclk clock
* @soc: pointer to SoC data structure
+ * @tz_only: flag specifying if the PMC can only be accessed via TrustZone
* @debugfs: pointer to debugfs entry
* @rate: currently configured rate of pclk
* @suspend_mode: lowest suspend mode available
@@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = {
* @lp0_vec_size: size of the LP0 warm boot code
* @powergates_available: Bitmap of available power gates
* @powergates_lock: mutex for power gate register access
+ * @pctl_dev: pin controller exposed by the PMC
+ * @domain: IRQ domain provided by the PMC
+ * @irq: chip implementation for the IRQ domain
*/
struct tegra_pmc {
struct device *dev;
@@ -302,6 +316,7 @@ struct tegra_pmc {
struct dentry *debugfs;
const struct tegra_pmc_soc *soc;
+ bool tz_only;
unsigned long rate;
@@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain)
return container_of(domain, struct tegra_powergate, genpd);
}
-static u32 tegra_pmc_readl(unsigned long offset)
+static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
{
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0,
+ 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+
+ return res.a1;
+ }
+
return readl(pmc->base + offset);
}
-static void tegra_pmc_writel(u32 value, unsigned long offset)
+static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
{
- writel(value, pmc->base + offset);
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset,
+ value, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+ } else {
+ writel(value, pmc->base + offset);
+ }
}
+static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+ if (pmc->tz_only)
+ return tegra_pmc_readl(pmc, offset);
+
+ return readl(pmc->scratch + offset);
+}
+
+static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
+{
+ if (pmc->tz_only)
+ tegra_pmc_writel(pmc, value, offset);
+ else
+ writel(value, pmc->scratch + offset);
+}
+
+/*
+ * TODO Figure out a way to call this with the struct tegra_pmc * passed in.
+ * This currently doesn't work because readx_poll_timeout() can only operate
+ * on functions that take a single argument.
+ */
static inline bool tegra_powergate_state(int id)
{
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
- return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+ return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
else
- return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+ return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
}
-static inline bool tegra_powergate_is_valid(int id)
+static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
{
return (pmc->soc && pmc->soc->powergates[id]);
}
-static inline bool tegra_powergate_is_available(int id)
+static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
{
return test_bit(id, pmc->powergates_available);
}
@@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return -EINVAL;
for (i = 0; i < pmc->soc->num_powergates; i++) {
- if (!tegra_powergate_is_valid(i))
+ if (!tegra_powergate_is_valid(pmc, i))
continue;
if (!strcmp(name, pmc->soc->powergates[i]))
@@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
/**
* tegra_powergate_set() - set the state of a partition
+ * @pmc: power management controller
* @id: partition ID
* @new_state: new state of the partition
*/
-static int tegra_powergate_set(unsigned int id, bool new_state)
+static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
{
bool status;
int err;
@@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return 0;
}
- tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
err = readx_poll_timeout(tegra_powergate_state, id, status,
status == new_state, 10, 100000);
@@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return err;
}
-static int __tegra_powergate_remove_clamping(unsigned int id)
+static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
+ unsigned int id)
{
u32 mask;
@@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
*/
if (id == TEGRA_POWERGATE_3D) {
if (pmc->soc->has_gpu_clamps) {
- tegra_pmc_writel(0, GPU_RG_CNTRL);
+ tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
goto out;
}
}
@@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
else
mask = (1 << id);
- tegra_pmc_writel(mask, REMOVE_CLAMPING);
+ tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
out:
mutex_unlock(&pmc->powergates_lock);
@@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, true);
+ err = tegra_powergate_set(pg->pmc, pg->id, true);
if (err < 0)
return err;
@@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = __tegra_powergate_remove_clamping(pg->id);
+ err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
if (err)
goto disable_clks;
@@ -533,7 +606,7 @@ disable_clks:
usleep_range(10, 20);
powergate_off:
- tegra_powergate_set(pg->id, false);
+ tegra_powergate_set(pg->pmc, pg->id, false);
return err;
}
@@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, false);
+ err = tegra_powergate_set(pg->pmc, pg->id, false);
if (err)
goto assert_resets;
@@ -579,12 +652,13 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
- err);
+ dev_err(dev, "failed to turn on PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- pr_err("failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
*/
int tegra_powergate_power_on(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id)
*/
int tegra_powergate_power_off(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, false);
+ return tegra_powergate_set(pmc, id, false);
}
EXPORT_SYMBOL(tegra_powergate_power_off);
/**
* tegra_powergate_is_powered() - check if partition is powered
+ * @pmc: power management controller
* @id: partition ID
*/
-int tegra_powergate_is_powered(unsigned int id)
+static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
{
- if (!tegra_powergate_is_valid(id))
+ if (!tegra_powergate_is_valid(pmc, id))
return -EINVAL;
return tegra_powergate_state(id);
@@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id)
*/
int tegra_powergate_remove_clamping(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return __tegra_powergate_remove_clamping(id);
+ return __tegra_powergate_remove_clamping(pmc, id);
}
EXPORT_SYMBOL(tegra_powergate_remove_clamping);
@@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct tegra_powergate *pg;
int err;
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
@@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
err = tegra_powergate_power_up(pg, false);
if (err)
- pr_err("failed to turn on partition %d: %d\n", id, err);
+ dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
+ err);
kfree(pg);
@@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
/**
* tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @pmc: power management controller
* @cpuid: CPU partition ID
*
* Returns the partition ID corresponding to the CPU partition ID or a
* negative error code on failure.
*/
-static int tegra_get_cpu_powergate_id(unsigned int cpuid)
+static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
+ unsigned int cpuid)
{
if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
return pmc->soc->cpu_powergates[cpuid];
@@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return false;
- return tegra_powergate_is_powered(id);
+ return tegra_powergate_is_powered(pmc, id);
}
/**
@@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
@@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
const char *cmd = data;
u32 value;
- value = readl(pmc->scratch + pmc->soc->regs->scratch0);
+ value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
value &= ~PMC_SCRATCH0_MODE_MASK;
if (cmd) {
@@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
value |= PMC_SCRATCH0_MODE_RCM;
}
- writel(value, pmc->scratch + pmc->soc->regs->scratch0);
+ tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_MAIN_RST;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
return NOTIFY_DONE;
}
@@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data)
seq_printf(s, "------------------\n");
for (i = 0; i < pmc->soc->num_powergates; i++) {
- status = tegra_powergate_is_powered(i);
+ status = tegra_powergate_is_powered(pmc, i);
if (status < 0)
continue;
@@ -855,12 +934,13 @@ err:
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device_node *np, bool off)
{
+ struct device *dev = pg->pmc->dev;
int err;
pg->reset = of_reset_control_array_get_exclusive(np);
if (IS_ERR(pg->reset)) {
err = PTR_ERR(pg->reset);
- pr_err("failed to get device resets: %d\n", err);
+ dev_err(dev, "failed to get device resets: %d\n", err);
return err;
}
@@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
+ struct device *dev = pmc->dev;
struct tegra_powergate *pg;
int id, err;
bool off;
@@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
+ dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
- off = !tegra_powergate_is_powered(pg->id);
+ off = !tegra_powergate_is_powered(pmc, pg->id);
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %pOFn: %d\n", np,
+ dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %pOFn: %d\n",
- np, err);
+ dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
- pr_debug("added PM domain %s\n", pg->genpd.name);
+ dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
return;
@@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
return NULL;
}
-static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
+static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc,
+ enum tegra_io_pad id,
unsigned long *request,
unsigned long *status,
u32 *mask)
@@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
pad = tegra_io_pad_find(pmc, id);
if (!pad) {
- pr_err("invalid I/O pad ID %u\n", id);
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
return -ENOENT;
}
@@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
return 0;
}
-static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
- unsigned long *status, u32 *mask)
+static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ unsigned long *request, unsigned long *status,
+ u32 *mask)
{
unsigned long rate, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask);
if (err)
return err;
if (pmc->clk) {
rate = clk_get_rate(pmc->clk);
if (!rate) {
- pr_err("failed to get clock rate\n");
+ dev_err(pmc->dev, "failed to get clock rate\n");
return -ENODEV;
}
- tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
/* must be at least 200 ns, in APB (PCLK) clock cycles */
value = DIV_ROUND_UP(1000000000, rate);
value = DIV_ROUND_UP(200, value);
- tegra_pmc_writel(value, SEL_DPD_TIM);
+ tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
}
return 0;
}
-static int tegra_io_pad_poll(unsigned long offset, u32 mask,
- u32 val, unsigned long timeout)
+static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
+ u32 mask, u32 val, unsigned long timeout)
{
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_after(timeout, jiffies)) {
- value = tegra_pmc_readl(offset);
+ value = tegra_pmc_readl(pmc, offset);
if ((value & mask) == val)
return 0;
@@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask,
return -ETIMEDOUT;
}
-static void tegra_io_pad_unprepare(void)
+static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
{
if (pmc->clk)
- tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
/**
@@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_pad_poll(status, mask, 0, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
if (err < 0) {
- pr_err("failed to enable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_pad_poll(status, mask, mask, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
if (err < 0) {
- pr_err("failed to disable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1147,22 +1230,24 @@ unlock:
}
EXPORT_SYMBOL(tegra_io_pad_power_disable);
-static int tegra_io_pad_is_powered(enum tegra_io_pad id)
+static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
unsigned long request, status;
u32 mask, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status,
+ &mask);
if (err)
return err;
- value = tegra_pmc_readl(status);
+ value = tegra_pmc_readl(pmc, status);
return !(value & mask);
}
-static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
+static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ int voltage)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
mutex_lock(&pmc->powergates_lock);
if (pmc->soc->has_impl_33v_pwr) {
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR);
+ tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
} else {
/* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
- value = tegra_pmc_readl(PMC_PWR_DET);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET);
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET);
/* update I/O voltage */
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
}
mutex_unlock(&pmc->powergates_lock);
@@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
return 0;
}
-static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
return -ENOTSUPP;
if (pmc->soc->has_impl_33v_pwr)
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
else
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if ((value & BIT(pad->voltage)) == 0)
return TEGRA_IO_PAD_VOLTAGE_1V8;
@@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
wmb();
pmc->rate = rate;
}
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
#endif
@@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
pinmux = 0;
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
(reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
- tegra_pmc_writel(value, PMC_SCRATCH54);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
value = PMC_SCRATCH55_RESET_TEGRA;
value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
@@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
- tegra_pmc_writel(value, PMC_SCRATCH55);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_ENABLE_RST;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
dev_info(pmc->dev, "emergency thermal reset enabled\n");
@@ -1470,12 +1555,16 @@ out:
static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
return pmc->soc->num_io_pads;
}
-static const char *tegra_io_pad_pinctrl_get_group_name(
- struct pinctrl_dev *pctl, unsigned int group)
+static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+ unsigned int group)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
+
return pmc->soc->io_pads[group].name;
}
@@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev,
const unsigned int **pins,
unsigned int *num_pins)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
*pins = &pmc->soc->io_pads[group].id;
*num_pins = 1;
+
return 0;
}
@@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = {
static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *config)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
enum pin_config_param param = pinconf_to_config_param(*config);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
int ret;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
switch (param) {
case PIN_CONFIG_POWER_SOURCE:
- ret = tegra_io_pad_get_voltage(pad->id);
+ ret = tegra_io_pad_get_voltage(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = ret;
break;
+
case PIN_CONFIG_LOW_POWER_MODE:
- ret = tegra_io_pad_is_powered(pad->id);
+ ret = tegra_io_pad_is_powered(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = !ret;
break;
+
default:
return -EINVAL;
}
@@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *configs,
unsigned int num_configs)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
enum pin_config_param param;
unsigned int i;
int err;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
@@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 &&
arg != TEGRA_IO_PAD_VOLTAGE_3V3)
return -EINVAL;
- err = tegra_io_pad_set_voltage(pad->id, arg);
+ err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
if (err)
return err;
break;
@@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = {
static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
{
- int err = 0;
+ int err;
if (!pmc->soc->num_pin_descs)
return 0;
@@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
pmc);
if (IS_ERR(pmc->pctl_dev)) {
err = PTR_ERR(pmc->pctl_dev);
- dev_err(pmc->dev, "unable to register pinctrl, %d\n", err);
+ dev_err(pmc->dev, "failed to register pin controller: %d\n",
+ err);
+ return err;
}
- return err;
+ return 0;
}
static ssize_t reset_reason_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_src;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_src = (value & pmc->soc->regs->rst_source_mask) >>
pmc->soc->regs->rst_source_shift;
@@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev,
static DEVICE_ATTR_RO(reset_reason);
static ssize_t reset_level_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_lvl;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
pmc->soc->regs->rst_level_shift;
@@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
err = device_create_file(dev, &dev_attr_reset_reason);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_reason\": %d\n",
- err);
+ "failed to create attr \"reset_reason\": %d\n",
+ err);
}
if (pmc->soc->reset_levels) {
err = device_create_file(dev, &dev_attr_reset_level);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_level\": %d\n",
- err);
+ "failed to create attr \"reset_level\": %d\n",
+ err);
}
}
@@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ platform_set_drvdata(pdev, pmc);
+
return 0;
cleanup_restart_handler:
@@ -1932,14 +2036,18 @@ cleanup_debugfs:
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
static int tegra_pmc_suspend(struct device *dev)
{
- tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
return 0;
}
static int tegra_pmc_resume(struct device *dev)
{
- tegra_pmc_writel(0x0, PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
return 0;
}
@@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
u32 value;
/* Always enable CPU power request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (pmc->sysclkreq_high)
value &= ~PMC_CNTRL_SYSCLK_POLARITY;
@@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
value |= PMC_CNTRL_SYSCLK_POLARITY;
/* configure the output polarity while the request is tristated */
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
/* now enable the request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_SYSCLK_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
{
u32 value;
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (invert)
value |= PMC_CNTRL_INTR_POLARITY;
else
value &= ~PMC_CNTRL_INTR_POLARITY;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
@@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.cpu_powergates = tegra30_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.cpu_powergates = tegra114_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra124_io_pads),
.io_pads = tegra124_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra124_pin_descs),
@@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
- .has_impl_33v_pwr = false,
.needs_mbist_war = true,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = true,
.num_io_pads = ARRAY_SIZE(tegra210_io_pads),
.io_pads = tegra210_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra210_pin_descs),
@@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
index = of_property_match_string(np, "reg-names", "wake");
if (index < 0) {
- pr_err("failed to find PMC wake registers\n");
+ dev_err(pmc->dev, "failed to find PMC wake registers\n");
return;
}
@@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
wake = ioremap_nocache(regs.start, resource_size(&regs));
if (!wake) {
- pr_err("failed to map PMC wake registers\n");
+ dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
}
@@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_wake_event tegra186_wake_events[] = {
- TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra186_io_pads),
.io_pads = tegra186_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra186_pin_descs),
@@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
.regs = &tegra186_pmc_regs,
@@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = {
};
builtin_platform_driver(tegra_pmc_driver);
+static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
+{
+ u32 value, saved;
+
+ saved = readl(pmc->base + pmc->soc->regs->scratch0);
+ value = saved ^ 0xffffffff;
+
+ if (value == 0xffffffff)
+ value = 0xdeadbeef;
+
+ /* write pattern and read it back */
+ writel(value, pmc->base + pmc->soc->regs->scratch0);
+ value = readl(pmc->base + pmc->soc->regs->scratch0);
+
+ /* if we read all-zeroes, access is restricted to TZ only */
+ if (value == 0) {
+ pr_info("access to PMC is restricted to TZ\n");
+ return true;
+ }
+
+ /* restore original value */
+ writel(saved, pmc->base + pmc->soc->regs->scratch0);
+
+ return false;
+}
+
/*
* Early initialization to allow access to registers in the very early boot
* process.
@@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void)
if (np) {
pmc->soc = match->data;
+ if (pmc->soc->maybe_tz_only)
+ pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
+
tegra_powergate_init(pmc, np);
/*
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index e05ab16d9a9e..6285cd8efb21 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma,
INIT_LIST_HEAD(&chan->list);
chan->dma = dma;
- chan->direction = DMA_NONE;
+ chan->direction = DMA_TRANS_NONE;
atomic_set(&chan->ref_count, 0);
spin_lock_init(&chan->lock);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 687c8f3cd955..01e76b58dd78 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -17,4 +17,24 @@ config XILINX_VCU
To compile this driver as a module, choose M here: the
module will be called xlnx_vcu.
+config ZYNQMP_POWER
+ bool "Enable Xilinx Zynq MPSoC Power Management driver"
+ depends on PM && ARCH_ZYNQMP
+ default y
+ help
+ Say yes to enable power management support for ZyqnMP SoC.
+ This driver uses firmware driver as an interface for power
+ management request to firmware. It registers isr to handle
+ power management callbacks from firmware.
+ If in doubt, say N.
+
+config ZYNQMP_PM_DOMAINS
+ bool "Enable Zynq MPSoC generic PM domains"
+ default y
+ depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+ select PM_GENERIC_DOMAINS
+ help
+ Say yes to enable device power management through PM domains
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index dee8fd51e303..f66bfea5de17 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
+obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
new file mode 100644
index 000000000000..354d256e6e00
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Generic PM domain support
+ *
+ * Copyright (C) 2015-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NUM_DOMAINS (100)
+/* Flag stating if PM nodes mapped to the PM domain has been requested */
+#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
+
+/**
+ * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
+ * @gpd: Generic power domain
+ * @node_id: PM node ID corresponding to device inside PM domain
+ * @flags: ZynqMP PM domain flags
+ */
+struct zynqmp_pm_domain {
+ struct generic_pm_domain gpd;
+ u32 node_id;
+ u8 flags;
+};
+
+/**
+ * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
+ * path
+ * @dev: Device to check for wakeup source path
+ * @not_used: Data member (not required)
+ *
+ * This function is checks device's child hierarchy and checks if any device is
+ * set as wakeup source.
+ *
+ * Return: 1 if device is in wakeup source path else 0
+ */
+static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
+{
+ int may_wakeup;
+
+ may_wakeup = device_may_wakeup(dev);
+ if (may_wakeup)
+ return may_wakeup;
+
+ return device_for_each_child(dev, NULL,
+ zynqmp_gpd_is_active_wakeup_path);
+}
+
+/**
+ * zynqmp_gpd_power_on() - Power on PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called before devices inside a PM domain are resumed, to
+ * power on PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+ ret = eemi_ops->set_requirement(pd->node_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ ZYNQMP_PM_MAX_QOS,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_power_off() - Power off PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called after devices inside a PM domain are suspended, to
+ * power off PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct pm_domain_data *pdd, *tmp;
+ struct zynqmp_pm_domain *pd;
+ u32 capabilities = 0;
+ bool may_wakeup;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If domain is already released there is nothing to be done */
+ if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
+ pr_debug("%s() %s domain is already released\n",
+ __func__, domain->name);
+ return 0;
+ }
+
+ list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) {
+ /* If device is in wakeup path, set capability to WAKEUP */
+ may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL);
+ if (may_wakeup) {
+ dev_dbg(pdd->dev, "device is in wakeup path in %s\n",
+ domain->name);
+ capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP;
+ break;
+ }
+ }
+
+ ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ /**
+ * If powering down of any node inside this domain fails,
+ * report and return the error
+ */
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_attach_dev() - Attach device to the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to attach
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->request_node)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the first device to attach there is nothing to do */
+ if (domain->device_count)
+ return 0;
+
+ ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ /* If requesting a node fails print and return the error */
+ if (ret) {
+ pr_err("%s() %s request failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s attached to %s domain\n", __func__,
+ dev_name(dev), domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_detach_dev() - Detach device from the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to detach
+ */
+static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->release_node)
+ return;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the last device to detach there is nothing to do */
+ if (domain->device_count)
+ return;
+
+ ret = eemi_ops->release_node(pd->node_id);
+ /* If releasing a node fails print the error and return */
+ if (ret) {
+ pr_err("%s() %s release failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return;
+ }
+
+ pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s detached from %s domain\n", __func__,
+ dev_name(dev), domain->name);
+}
+
+static struct generic_pm_domain *zynqmp_gpd_xlate
+ (struct of_phandle_args *genpdspec, void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int i, idx = genpdspec->args[0];
+ struct zynqmp_pm_domain *pd;
+
+ pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ /* Check for existing pm domains */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == idx)
+ goto done;
+ }
+
+ /**
+ * Add index in empty node_id of power domain list as no existing
+ * power domain found for current index.
+ */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == 0) {
+ pd[i].node_id = idx;
+ break;
+ }
+ }
+
+done:
+ if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS)
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[i];
+}
+
+static int zynqmp_gpd_probe(struct platform_device *pdev)
+{
+ int i;
+ struct genpd_onecell_data *zynqmp_pd_data;
+ struct generic_pm_domain **domains;
+ struct zynqmp_pm_domain *pd;
+ struct device *dev = &pdev->dev;
+
+ pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL);
+ if (!zynqmp_pd_data)
+ return -ENOMEM;
+
+ zynqmp_pd_data->xlate = zynqmp_gpd_xlate;
+
+ domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains),
+ GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
+ pd->node_id = 0;
+ pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
+ pd->gpd.power_off = zynqmp_gpd_power_off;
+ pd->gpd.power_on = zynqmp_gpd_power_on;
+ pd->gpd.attach_dev = zynqmp_gpd_attach_dev;
+ pd->gpd.detach_dev = zynqmp_gpd_detach_dev;
+
+ domains[i] = &pd->gpd;
+
+ /* Mark all PM domains as initially powered off */
+ pm_genpd_init(&pd->gpd, NULL, true);
+ }
+
+ zynqmp_pd_data->domains = domains;
+ zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS;
+ of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data);
+
+ return 0;
+}
+
+static int zynqmp_gpd_remove(struct platform_device *pdev)
+{
+ of_genpd_del_provider(pdev->dev.parent->of_node);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_power_domain_driver = {
+ .driver = {
+ .name = "zynqmp_power_controller",
+ },
+ .probe = zynqmp_gpd_probe,
+ .remove = zynqmp_gpd_remove,
+};
+module_platform_driver(zynqmp_power_domain_driver);
+
+MODULE_ALIAS("platform:zynqmp_power_controller");
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
new file mode 100644
index 000000000000..771cb59b9d22
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+enum pm_suspend_mode {
+ PM_SUSPEND_MODE_FIRST = 0,
+ PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
+ PM_SUSPEND_MODE_POWER_OFF,
+};
+
+#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD
+
+static const char *const suspend_modes[] = {
+ [PM_SUSPEND_MODE_STD] = "standard",
+ [PM_SUSPEND_MODE_POWER_OFF] = "power-off",
+};
+
+static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB,
+ PM_NOTIFY_CB,
+};
+
+static void zynqmp_pm_get_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t zynqmp_pm_isr(int irq, void *data)
+{
+ u32 payload[CB_PAYLOAD_SIZE];
+
+ zynqmp_pm_get_callback_data(payload);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ switch (payload[1]) {
+ case SUSPEND_SYSTEM_SHUTDOWN:
+ orderly_poweroff(true);
+ break;
+ case SUSPEND_POWER_REQUEST:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+ default:
+ pr_err("%s Unsupported InitSuspendCb reason "
+ "code %d\n", __func__, payload[1]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t suspend_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int md;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md]) {
+ if (md == suspend_mode)
+ s += sprintf(s, "[%s] ", suspend_modes[md]);
+ else
+ s += sprintf(s, "%s ", suspend_modes[md]);
+ }
+
+ /* Convert last space to newline */
+ if (s != buf)
+ *(s - 1) = '\n';
+ return (s - buf);
+}
+
+static ssize_t suspend_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int md, ret = -EINVAL;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_suspend_mode)
+ return ret;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md] &&
+ sysfs_streq(suspend_modes[md], buf)) {
+ ret = 0;
+ break;
+ }
+
+ if (!ret && md != suspend_mode) {
+ ret = eemi_ops->set_suspend_mode(md);
+ if (likely(!ret))
+ suspend_mode = md;
+ }
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(suspend_mode);
+
+static int zynqmp_pm_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ u32 pm_api_version;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+ return -ENXIO;
+
+ eemi_ops->init_finalize();
+ eemi_ops->get_api_version(&pm_api_version);
+
+ /* Check PM API version number */
+ if (pm_api_version < ZYNQMP_PM_VERSION)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
+ "with %d\n", irq, ret);
+ return ret;
+ }
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pm_remove(struct platform_device *pdev)
+{
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+
+ return 0;
+}
+
+static const struct of_device_id pm_of_match[] = {
+ { .compatible = "xlnx,zynqmp-power", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, pm_of_match);
+
+static struct platform_driver zynqmp_pm_platform_driver = {
+ .probe = zynqmp_pm_probe,
+ .remove = zynqmp_pm_remove,
+ .driver = {
+ .name = "zynqmp_power",
+ .of_match_table = pm_of_match,
+ },
+};
+module_platform_driver(zynqmp_pm_platform_driver);
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 48d262ae2f04..56263ae3b1d7 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -5,3 +5,4 @@ optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
optee-objs += shm_pool.o
+optee-objs += device.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 947f9b28de9e..0842b6e6af82 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -634,6 +634,10 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
+ rc = optee_enumerate_devices();
+ if (rc)
+ goto err;
+
pr_info("initialized driver\n");
return optee;
err:
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
new file mode 100644
index 000000000000..e3a148521ec1
--- /dev/null
+++ b/drivers/tee/optee/device.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "optee_private.h"
+
+/*
+ * Get device UUIDs
+ *
+ * [out] memref[0] Array of device UUIDs
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_SHORT_BUFFER - Output buffer size less than required
+ */
+#define PTA_CMD_GET_DEVICES 0x0
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int get_devices(struct tee_context *ctx, u32 session,
+ struct tee_shm *device_shm, u32 *shm_size)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke PTA_CMD_GET_DEVICES function */
+ inv_arg.func = PTA_CMD_GET_DEVICES;
+ inv_arg.session = session;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = device_shm;
+ param[0].u.memref.size = *shm_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(ctx, &inv_arg, param);
+ if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
+ (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ *shm_size = param[0].u.memref.size;
+
+ return 0;
+}
+
+static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
+{
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+
+ optee_device = kzalloc(sizeof(*optee_device), GFP_KERNEL);
+ if (!optee_device)
+ return -ENOMEM;
+
+ optee_device->dev.bus = &tee_bus_type;
+ dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
+ uuid_copy(&optee_device->id.uuid, device_uuid);
+
+ rc = device_register(&optee_device->dev);
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ kfree(optee_device);
+ }
+
+ return rc;
+}
+
+int optee_enumerate_devices(void)
+{
+ const uuid_t pta_uuid =
+ UUID_INIT(0x7011a688, 0xddde, 0x4053,
+ 0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *device_shm = NULL;
+ const uuid_t *device_uuid = NULL;
+ struct tee_context *ctx = NULL;
+ u32 shm_size = 0, idx, num_devices = 0;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with OP-TEE driver */
+ ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(ctx))
+ return -ENODEV;
+
+ /* Open session with device enumeration pseudo TA */
+ memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != TEEC_SUCCESS)) {
+ /* Device enumeration pseudo TA not found */
+ rc = 0;
+ goto out_ctx;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size);
+ if (rc < 0 || !shm_size)
+ goto out_sess;
+
+ device_shm = tee_shm_alloc(ctx, shm_size,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(device_shm)) {
+ pr_err("tee_shm_alloc failed\n");
+ rc = PTR_ERR(device_shm);
+ goto out_sess;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size);
+ if (rc < 0)
+ goto out_shm;
+
+ device_uuid = tee_shm_get_va(device_shm, 0);
+ if (IS_ERR(device_uuid)) {
+ pr_err("tee_shm_get_va failed\n");
+ rc = PTR_ERR(device_uuid);
+ goto out_shm;
+ }
+
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+ rc = optee_register_device(&device_uuid[idx], idx);
+ if (rc)
+ goto out_shm;
+ }
+
+out_shm:
+ tee_shm_free(device_shm);
+out_sess:
+ tee_client_close_session(ctx, sess_arg.session);
+out_ctx:
+ tee_client_close_context(ctx);
+
+ return rc;
+}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 30504901be80..795bc19ae17a 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef _OPTEE_MSG_H
#define _OPTEE_MSG_H
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 35e79386c556..a5e84afd5013 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -28,6 +28,7 @@
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -181,6 +182,8 @@ void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
+int optee_enumerate_devices(void);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index bbf0cf028c16..c72122d9c997 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef OPTEE_SMC_H
#define OPTEE_SMC_H
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 43626e15703a..92f56b8645e3 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -88,10 +88,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
- struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ struct optee_supp_req *req;
bool interruptable;
u32 ret;
+ /*
+ * Return in case there is no supplicant available and
+ * non-blocking request.
+ */
+ if (!supp->ctx && ctx->supp_nowait)
+ return TEEC_ERROR_COMMUNICATION;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return TEEC_ERROR_OUT_OF_MEMORY;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 7b2bb4c50058..17c64fccbb10 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/cdev.h>
-#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -106,6 +105,11 @@ static int tee_open(struct inode *inode, struct file *filp)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ /*
+ * Default user-space behaviour is to wait for tee-supplicant
+ * if not present for any requests in this context.
+ */
+ ctx->supp_nowait = false;
filp->private_data = ctx;
return 0;
}
@@ -982,6 +986,16 @@ tee_client_open_context(struct tee_context *start,
} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
put_device(put_dev);
+ /*
+ * Default behaviour for in kernel client is to not wait for
+ * tee-supplicant if not present for any requests in this context.
+ * Also this flag could be configured again before call to
+ * tee_client_open_session() if any in kernel client requires
+ * different behaviour.
+ */
+ if (!IS_ERR(ctx))
+ ctx->supp_nowait = true;
+
return ctx;
}
EXPORT_SYMBOL_GPL(tee_client_open_context);
@@ -1027,6 +1041,48 @@ int tee_client_invoke_func(struct tee_context *ctx,
}
EXPORT_SYMBOL_GPL(tee_client_invoke_func);
+int tee_client_cancel_req(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg *arg)
+{
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
+ arg->session);
+}
+
+static int tee_client_device_match(struct device *dev,
+ struct device_driver *drv)
+{
+ const struct tee_client_device_id *id_table;
+ struct tee_client_device *tee_device;
+
+ id_table = to_tee_client_driver(drv)->id_table;
+ tee_device = to_tee_client_device(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int tee_client_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
+
+ return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
+}
+
+struct bus_type tee_bus_type = {
+ .name = "tee",
+ .match = tee_client_device_match,
+ .uevent = tee_client_device_uevent,
+};
+EXPORT_SYMBOL_GPL(tee_bus_type);
+
static int __init tee_init(void)
{
int rc;
@@ -1040,18 +1096,32 @@ static int __init tee_init(void)
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
if (rc) {
pr_err("failed to allocate char dev region\n");
- class_destroy(tee_class);
- tee_class = NULL;
+ goto out_unreg_class;
+ }
+
+ rc = bus_register(&tee_bus_type);
+ if (rc) {
+ pr_err("failed to register tee bus\n");
+ goto out_unreg_chrdev;
}
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+out_unreg_class:
+ class_destroy(tee_class);
+ tee_class = NULL;
+
return rc;
}
static void __exit tee_exit(void)
{
+ bus_unregister(&tee_bus_type);
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
class_destroy(tee_class);
tee_class = NULL;
- unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
}
subsys_initcall(tee_init);
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index a55cbba40a5a..ca8a94f15ac0 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -40,6 +41,8 @@ struct da8xx_ohci_hcd {
struct regulator *vbus_reg;
struct notifier_block nb;
unsigned int reg_enabled;
+ struct gpio_desc *vbus_gpio;
+ struct gpio_desc *oc_gpio;
};
#define to_da8xx_ohci(hcd) (struct da8xx_ohci_hcd *)(hcd_to_ohci(hcd)->priv)
@@ -86,12 +89,13 @@ static void ohci_da8xx_disable(struct usb_hcd *hcd)
static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
+ struct device *dev = hcd->self.controller;
int ret;
- if (hub && hub->set_power)
- return hub->set_power(1, on);
+ if (da8xx_ohci->vbus_gpio) {
+ gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
+ return 0;
+ }
if (!da8xx_ohci->vbus_reg)
return 0;
@@ -119,11 +123,9 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->get_power)
- return hub->get_power(1);
+ if (da8xx_ohci->vbus_gpio)
+ return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -134,13 +136,11 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
unsigned int flags;
int ret;
- if (hub && hub->get_oci)
- return hub->get_oci(1);
+ if (da8xx_ohci->oc_gpio)
+ return gpiod_get_value_cansleep(da8xx_ohci->oc_gpio);
if (!da8xx_ohci->vbus_reg)
return 0;
@@ -158,10 +158,8 @@ static int ohci_da8xx_get_oci(struct usb_hcd *hcd)
static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->set_power)
+ if (da8xx_ohci->vbus_gpio)
return 1;
if (da8xx_ohci->vbus_reg)
@@ -173,10 +171,8 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
static int ohci_da8xx_has_oci(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
- if (hub && hub->get_oci)
+ if (da8xx_ohci->oc_gpio)
return 1;
if (da8xx_ohci->vbus_reg)
@@ -196,19 +192,6 @@ static int ohci_da8xx_has_potpgt(struct usb_hcd *hcd)
return 0;
}
-/*
- * Handle the port over-current indicator change.
- */
-static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
- unsigned port)
-{
- ocic_mask |= 1 << port;
-
- /* Once over-current is detected, the port needs to be powered down */
- if (hub->get_oci(port) > 0)
- hub->set_power(port, 0);
-}
-
static int ohci_da8xx_regulator_event(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -223,16 +206,23 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
+static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+{
+ struct da8xx_ohci_hcd *da8xx_ohci = data;
+
+ if (gpiod_get_value(da8xx_ohci->oc_gpio))
+ gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+
+ return IRQ_HANDLED;
+}
+
static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int ret = 0;
- if (hub && hub->ocic_notify) {
- ret = hub->ocic_notify(ohci_da8xx_ocic_handler);
- } else if (da8xx_ohci->vbus_reg) {
+ if (!da8xx_ohci->oc_gpio && da8xx_ohci->vbus_reg) {
da8xx_ohci->nb.notifier_call = ohci_da8xx_regulator_event;
ret = devm_regulator_register_notifier(da8xx_ohci->vbus_reg,
&da8xx_ohci->nb);
@@ -244,15 +234,6 @@ static int ohci_da8xx_register_notify(struct usb_hcd *hcd)
return ret;
}
-static void ohci_da8xx_unregister_notify(struct usb_hcd *hcd)
-{
- struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
-
- if (hub && hub->ocic_notify)
- hub->ocic_notify(NULL);
-}
-
static int ohci_da8xx_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
@@ -402,34 +383,35 @@ MODULE_DEVICE_TABLE(of, da8xx_ohci_ids);
static int ohci_da8xx_probe(struct platform_device *pdev)
{
struct da8xx_ohci_hcd *da8xx_ohci;
+ struct device *dev = &pdev->dev;
+ int error, hcd_irq, oc_irq;
struct usb_hcd *hcd;
struct resource *mem;
- int error, irq;
- hcd = usb_create_hcd(&ohci_da8xx_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
+
+ hcd = usb_create_hcd(&ohci_da8xx_hc_driver, dev, dev_name(dev));
if (!hcd)
return -ENOMEM;
da8xx_ohci = to_da8xx_ohci(hcd);
da8xx_ohci->hcd = hcd;
- da8xx_ohci->usb11_clk = devm_clk_get(&pdev->dev, NULL);
+ da8xx_ohci->usb11_clk = devm_clk_get(dev, NULL);
if (IS_ERR(da8xx_ohci->usb11_clk)) {
error = PTR_ERR(da8xx_ohci->usb11_clk);
if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get clock.\n");
+ dev_err(dev, "Failed to get clock.\n");
goto err;
}
- da8xx_ohci->usb11_phy = devm_phy_get(&pdev->dev, "usb-phy");
+ da8xx_ohci->usb11_phy = devm_phy_get(dev, "usb-phy");
if (IS_ERR(da8xx_ohci->usb11_phy)) {
error = PTR_ERR(da8xx_ohci->usb11_phy);
if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get phy.\n");
+ dev_err(dev, "Failed to get phy.\n");
goto err;
}
- da8xx_ohci->vbus_reg = devm_regulator_get_optional(&pdev->dev, "vbus");
+ da8xx_ohci->vbus_reg = devm_regulator_get_optional(dev, "vbus");
if (IS_ERR(da8xx_ohci->vbus_reg)) {
error = PTR_ERR(da8xx_ohci->vbus_reg);
if (error == -ENODEV) {
@@ -437,13 +419,34 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
} else if (error == -EPROBE_DEFER) {
goto err;
} else {
- dev_err(&pdev->dev, "Failed to get regulator\n");
+ dev_err(dev, "Failed to get regulator\n");
goto err;
}
}
+ da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(da8xx_ohci->vbus_gpio))
+ goto err;
+
+ da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
+ if (IS_ERR(da8xx_ohci->oc_gpio))
+ goto err;
+
+ if (da8xx_ohci->oc_gpio) {
+ oc_irq = gpiod_to_irq(da8xx_ohci->oc_gpio);
+ if (oc_irq < 0)
+ goto err;
+
+ error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "OHCI over-current indicator", da8xx_ohci);
+ if (error)
+ goto err;
+ }
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, mem);
+ hcd->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(hcd->regs)) {
error = PTR_ERR(hcd->regs);
goto err;
@@ -451,13 +454,13 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
hcd->rsrc_start = mem->start;
hcd->rsrc_len = resource_size(mem);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
+ hcd_irq = platform_get_irq(pdev, 0);
+ if (hcd_irq < 0) {
error = -ENODEV;
goto err;
}
- error = usb_add_hcd(hcd, irq, 0);
+ error = usb_add_hcd(hcd, hcd_irq, 0);
if (error)
goto err;
@@ -480,7 +483,6 @@ static int ohci_da8xx_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- ohci_da8xx_unregister_notify(hcd);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index ed05514cc2dc..1834524ae373 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/types.h>
+#include <linux/mfd/bcm2835-pm.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/watchdog.h>
@@ -47,6 +48,8 @@ struct bcm2835_wdt {
spinlock_t lock;
};
+static struct bcm2835_wdt *bcm2835_power_off_wdt;
+
static unsigned int heartbeat;
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -148,10 +151,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
*/
static void bcm2835_power_off(void)
{
- struct device_node *np =
- of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt");
- struct platform_device *pdev = of_find_device_by_node(np);
- struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+ struct bcm2835_wdt *wdt = bcm2835_power_off_wdt;
u32 val;
/*
@@ -169,7 +169,7 @@ static void bcm2835_power_off(void)
static int bcm2835_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct bcm2835_wdt *wdt;
int err;
@@ -181,10 +181,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(wdt->base))
- return PTR_ERR(wdt->base);
+ wdt->base = pm->base;
watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
@@ -211,8 +208,10 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
return err;
}
- if (pm_power_off == NULL)
+ if (pm_power_off == NULL) {
pm_power_off = bcm2835_power_off;
+ bcm2835_power_off_wdt = wdt;
+ }
dev_info(dev, "Broadcom BCM2835 watchdog timer");
return 0;
@@ -226,18 +225,11 @@ static int bcm2835_wdt_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bcm2835_wdt_of_match[] = {
- { .compatible = "brcm,bcm2835-pm-wdt", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm2835_wdt_of_match);
-
static struct platform_driver bcm2835_wdt_driver = {
.probe = bcm2835_wdt_probe,
.remove = bcm2835_wdt_remove,
.driver = {
.name = "bcm2835-wdt",
- .of_match_table = bcm2835_wdt_of_match,
},
};
module_platform_driver(bcm2835_wdt_driver);