diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 01:25:28 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-02 01:25:28 +0300 |
commit | 866147b8fa59530812fc769027a94468d89401e7 (patch) | |
tree | 1d97cf66b7db6765570ba824c51bd3dc84b561fc /drivers | |
parent | 634135a07b887a8ad8904da8c147407650747a38 (diff) | |
parent | 06779631d18ff2901af604eadea0d7b2193db7a1 (diff) | |
download | linux-866147b8fa59530812fc769027a94468d89401e7.tar.xz |
Merge tag 'drivers-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann:
"These are updates for drivers that are tied to a particular SoC,
including the correspondig device tree bindings:
- A couple of reset controller changes for unisoc, uniphier, renesas
and zte platforms
- memory controller driver fixes for omap and tegra
- Rockchip io domain driver updates
- Lots of updates for qualcomm platforms, mostly touching their
firmware and power management drivers
- Tegra FUSE and firmware driver updateѕ
- Support for virtio transports in the SCMI firmware framework
- cleanup of ixp4xx drivers, towards enabling multiplatform support
and bringing it up to date with modern platforms
- Minor updates for keystone, mediatek, omap, renesas"
* tag 'drivers-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (96 commits)
reset: simple: remove ZTE details in Kconfig help
soc: rockchip: io-domain: Remove unneeded semicolon
soc: rockchip: io-domain: add rk3568 support
dt-bindings: power: add rk3568-pmu-io-domain support
bus: ixp4xx: return on error in ixp4xx_exp_probe()
soc: renesas: Prefer memcpy() over strcpy()
firmware: tegra: Stop using seq_get_buf()
soc/tegra: fuse: Enable fuse clock on suspend for Tegra124
soc/tegra: fuse: Add runtime PM support
soc/tegra: fuse: Clear fuse->clk on driver probe failure
soc/tegra: pmc: Prevent racing with cpuilde driver
soc/tegra: bpmp: Remove unused including <linux/version.h>
dt-bindings: soc: ti: pruss: Add dma-coherent property
soc: ti: Remove pm_runtime_irq_safe() usage for smartreflex
soc: ti: pruss: Enable support for ICSSG subsystems on K3 AM64x SoCs
dt-bindings: soc: ti: pruss: Update bindings for K3 AM64x SoCs
firmware: arm_scmi: Use WARN_ON() to check configured transports
firmware: arm_scmi: Fix boolconv.cocci warnings
soc: mediatek: mmsys: Fix missing UFOE component in mt8173 table routing
soc: mediatek: mmsys: add MT8365 support
...
Diffstat (limited to 'drivers')
59 files changed, 3213 insertions, 703 deletions
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c index 5881d64af943..99c63087c8ae 100644 --- a/drivers/ata/pata_ixp4xx_cf.c +++ b/drivers/ata/pata_ixp4xx_cf.c @@ -13,45 +13,134 @@ */ #include <linux/kernel.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/libata.h> #include <linux/irq.h> #include <linux/platform_device.h> -#include <linux/platform_data/pata_ixp4xx_cf.h> +#include <linux/regmap.h> #include <scsi/scsi_host.h> #define DRV_NAME "pata_ixp4xx_cf" -#define DRV_VERSION "0.2" +#define DRV_VERSION "1.0" -static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error) +struct ixp4xx_pata { + struct ata_host *host; + struct regmap *rmap; + u32 cmd_csreg; + void __iomem *cmd; + void __iomem *ctl; +}; + +#define IXP4XX_EXP_TIMING_STRIDE 0x04 +/* The timings for the chipselect is in bits 29..16 */ +#define IXP4XX_EXP_T1_T5_MASK GENMASK(29, 16) +#define IXP4XX_EXP_PIO_0_8 0x0a470000 +#define IXP4XX_EXP_PIO_1_8 0x06430000 +#define IXP4XX_EXP_PIO_2_8 0x02410000 +#define IXP4XX_EXP_PIO_3_8 0x00820000 +#define IXP4XX_EXP_PIO_4_8 0x00400000 +#define IXP4XX_EXP_PIO_0_16 0x29640000 +#define IXP4XX_EXP_PIO_1_16 0x05030000 +#define IXP4XX_EXP_PIO_2_16 0x00b20000 +#define IXP4XX_EXP_PIO_3_16 0x00820000 +#define IXP4XX_EXP_PIO_4_16 0x00400000 +#define IXP4XX_EXP_BW_MASK (BIT(6)|BIT(0)) +#define IXP4XX_EXP_BYTE_RD16 BIT(6) /* Byte reads on half-word devices */ +#define IXP4XX_EXP_BYTE_EN BIT(0) /* Use 8bit data bus if set */ + +static void ixp4xx_set_8bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode) +{ + switch (pio_mode) { + case XFER_PIO_0: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_8); + break; + case XFER_PIO_1: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_8); + break; + case XFER_PIO_2: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_8); + break; + case XFER_PIO_3: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_8); + break; + case XFER_PIO_4: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_8); + break; + default: + break; + } + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16|IXP4XX_EXP_BYTE_EN); +} + +static void ixp4xx_set_16bit_timing(struct ixp4xx_pata *ixpp, u8 pio_mode) { - struct ata_device *dev; - - ata_for_each_dev(dev, link, ENABLED) { - ata_dev_info(dev, "configured for PIO0\n"); - dev->pio_mode = XFER_PIO_0; - dev->xfer_mode = XFER_PIO_0; - dev->xfer_shift = ATA_SHIFT_PIO; - dev->flags |= ATA_DFLAG_PIO; + switch (pio_mode){ + case XFER_PIO_0: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_0_16); + break; + case XFER_PIO_1: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_1_16); + break; + case XFER_PIO_2: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_2_16); + break; + case XFER_PIO_3: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_3_16); + break; + case XFER_PIO_4: + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_T1_T5_MASK, IXP4XX_EXP_PIO_4_16); + break; + default: + break; } - return 0; + regmap_update_bits(ixpp->rmap, ixpp->cmd_csreg, + IXP4XX_EXP_BW_MASK, IXP4XX_EXP_BYTE_RD16); +} + +/* This sets up the timing on the chipselect CMD accordingly */ +static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + struct ixp4xx_pata *ixpp = ap->host->private_data; + + ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n", + adev->pio_mode - XFER_PIO_0); + ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); } + static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, - unsigned char *buf, unsigned int buflen, int rw) + unsigned char *buf, unsigned int buflen, int rw) { unsigned int i; unsigned int words = buflen >> 1; u16 *buf16 = (u16 *) buf; + struct ata_device *adev = qc->dev; struct ata_port *ap = qc->dev->link->ap; void __iomem *mmio = ap->ioaddr.data_addr; - struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev); + struct ixp4xx_pata *ixpp = ap->host->private_data; + unsigned long flags; + + ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE", + buflen); + spin_lock_irqsave(ap->lock, flags); /* set the expansion bus in 16bit mode and restore * 8 bit mode after the transaction. */ - *data->cs0_cfg &= ~(0x01); - udelay(100); + ixp4xx_set_16bit_timing(ixpp, adev->pio_mode); + udelay(5); /* Transfer multiple of 2 bytes */ if (rw == READ) @@ -76,8 +165,10 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc, words++; } - udelay(100); - *data->cs0_cfg |= 0x01; + ixp4xx_set_8bit_timing(ixpp, adev->pio_mode); + udelay(5); + + spin_unlock_irqrestore(ap->lock, flags); return words << 1; } @@ -90,79 +181,98 @@ static struct ata_port_operations ixp4xx_port_ops = { .inherits = &ata_sff_port_ops, .sff_data_xfer = ixp4xx_mmio_data_xfer, .cable_detect = ata_cable_40wire, - .set_mode = ixp4xx_set_mode, + .set_piomode = ixp4xx_set_piomode, +}; + +static struct ata_port_info ixp4xx_port_info = { + .flags = ATA_FLAG_NO_ATAPI, + .pio_mask = ATA_PIO4, + .port_ops = &ixp4xx_port_ops, }; static void ixp4xx_setup_port(struct ata_port *ap, - struct ixp4xx_pata_data *data, - unsigned long raw_cs0, unsigned long raw_cs1) + struct ixp4xx_pata *ixpp, + unsigned long raw_cmd, unsigned long raw_ctl) { struct ata_ioports *ioaddr = &ap->ioaddr; - unsigned long raw_cmd = raw_cs0; - unsigned long raw_ctl = raw_cs1 + 0x06; - ioaddr->cmd_addr = data->cs0; - ioaddr->altstatus_addr = data->cs1 + 0x06; - ioaddr->ctl_addr = data->cs1 + 0x06; + raw_ctl += 0x06; + ioaddr->cmd_addr = ixpp->cmd; + ioaddr->altstatus_addr = ixpp->ctl + 0x06; + ioaddr->ctl_addr = ixpp->ctl + 0x06; ata_sff_std_ports(ioaddr); -#ifndef __ARMEB__ - - /* adjust the addresses to handle the address swizzling of the - * ixp4xx in little endian mode. - */ - - *(unsigned long *)&ioaddr->data_addr ^= 0x02; - *(unsigned long *)&ioaddr->cmd_addr ^= 0x03; - *(unsigned long *)&ioaddr->altstatus_addr ^= 0x03; - *(unsigned long *)&ioaddr->ctl_addr ^= 0x03; - *(unsigned long *)&ioaddr->error_addr ^= 0x03; - *(unsigned long *)&ioaddr->feature_addr ^= 0x03; - *(unsigned long *)&ioaddr->nsect_addr ^= 0x03; - *(unsigned long *)&ioaddr->lbal_addr ^= 0x03; - *(unsigned long *)&ioaddr->lbam_addr ^= 0x03; - *(unsigned long *)&ioaddr->lbah_addr ^= 0x03; - *(unsigned long *)&ioaddr->device_addr ^= 0x03; - *(unsigned long *)&ioaddr->status_addr ^= 0x03; - *(unsigned long *)&ioaddr->command_addr ^= 0x03; - - raw_cmd ^= 0x03; - raw_ctl ^= 0x03; -#endif + if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) { + /* adjust the addresses to handle the address swizzling of the + * ixp4xx in little endian mode. + */ + + *(unsigned long *)&ioaddr->data_addr ^= 0x02; + *(unsigned long *)&ioaddr->cmd_addr ^= 0x03; + *(unsigned long *)&ioaddr->altstatus_addr ^= 0x03; + *(unsigned long *)&ioaddr->ctl_addr ^= 0x03; + *(unsigned long *)&ioaddr->error_addr ^= 0x03; + *(unsigned long *)&ioaddr->feature_addr ^= 0x03; + *(unsigned long *)&ioaddr->nsect_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbal_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbam_addr ^= 0x03; + *(unsigned long *)&ioaddr->lbah_addr ^= 0x03; + *(unsigned long *)&ioaddr->device_addr ^= 0x03; + *(unsigned long *)&ioaddr->status_addr ^= 0x03; + *(unsigned long *)&ioaddr->command_addr ^= 0x03; + + raw_cmd ^= 0x03; + raw_ctl ^= 0x03; + } ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", raw_cmd, raw_ctl); } static int ixp4xx_pata_probe(struct platform_device *pdev) { - struct resource *cs0, *cs1; - struct ata_host *host; - struct ata_port *ap; - struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev); + struct resource *cmd, *ctl; + struct ata_port_info pi = ixp4xx_port_info; + const struct ata_port_info *ppi[] = { &pi, NULL }; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct ixp4xx_pata *ixpp; + u32 csindex; int ret; int irq; - cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cmd = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ctl = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!cs0 || !cs1) + if (!cmd || !ctl) return -EINVAL; - /* allocate host */ - host = ata_host_alloc(&pdev->dev, 1); - if (!host) + ixpp = devm_kzalloc(dev, sizeof(*ixpp), GFP_KERNEL); + if (!ixpp) return -ENOMEM; - /* acquire resources and fill host */ - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ixpp->rmap = syscon_node_to_regmap(np->parent); + if (IS_ERR(ixpp->rmap)) + return dev_err_probe(dev, PTR_ERR(ixpp->rmap), "no regmap\n"); + /* Inspect our address to figure out what chipselect the CMD is on */ + ret = of_property_read_u32_index(np, "reg", 0, &csindex); if (ret) - return ret; + return dev_err_probe(dev, ret, "can't inspect CMD address\n"); + dev_info(dev, "using CS%d for PIO timing configuration\n", csindex); + ixpp->cmd_csreg = csindex * IXP4XX_EXP_TIMING_STRIDE; - data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000); - data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000); + ixpp->host = ata_host_alloc_pinfo(dev, ppi, 1); + if (!ixpp->host) + return -ENOMEM; + ixpp->host->private_data = ixpp; - if (!data->cs0 || !data->cs1) + ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + ixpp->cmd = devm_ioremap_resource(dev, cmd); + ixpp->ctl = devm_ioremap_resource(dev, ctl); + if (IS_ERR(ixpp->cmd) || IS_ERR(ixpp->ctl)) return -ENOMEM; irq = platform_get_irq(pdev, 0); @@ -173,27 +283,23 @@ static int ixp4xx_pata_probe(struct platform_device *pdev) else return -EINVAL; - /* Setup expansion bus chip selects */ - *data->cs0_cfg = data->cs0_bits; - *data->cs1_cfg = data->cs1_bits; - - ap = host->ports[0]; - - ap->ops = &ixp4xx_port_ops; - ap->pio_mask = ATA_PIO4; - ap->flags |= ATA_FLAG_NO_ATAPI; + /* Just one port to set up */ + ixp4xx_setup_port(ixpp->host->ports[0], ixpp, cmd->start, ctl->start); - ixp4xx_setup_port(ap, data, cs0->start, cs1->start); + ata_print_version_once(dev, DRV_VERSION); - ata_print_version_once(&pdev->dev, DRV_VERSION); - - /* activate host */ - return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht); + return ata_host_activate(ixpp->host, irq, ata_sff_interrupt, 0, &ixp4xx_sht); } +static const struct of_device_id ixp4xx_pata_of_match[] = { + { .compatible = "intel,ixp4xx-compact-flash", }, + { }, +}; + static struct platform_driver ixp4xx_pata_platform_driver = { .driver = { .name = DRV_NAME, + .of_match_table = ixp4xx_pata_of_match, }, .probe = ixp4xx_pata_probe, .remove = ata_platform_remove_one, diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index e7f7eee6ee9a..a5b96f3aad67 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -95,6 +95,17 @@ config IMX_WEIM The WEIM(Wireless External Interface Module) works like a bus. You can attach many different devices on it, such as NOR, onenand. +config INTEL_IXP4XX_EB + bool "Intel IXP4xx expansion bus interface driver" + depends on HAS_IOMEM + depends on ARCH_IXP4XX || COMPILE_TEST + default ARCH_IXP4XX + select MFD_SYSCON + help + Driver for the Intel IXP4xx expansion bus interface. The driver is + needed to set up various chip select configuration parameters before + devices on the expansion bus can be discovered. + config MIPS_CDMM bool "MIPS Common Device Memory Map (CDMM) Driver" depends on CPU_MIPSR2 || CPU_MIPSR5 diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 397e35392bff..1c29c5e8ffb8 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ obj-$(CONFIG_BT1_APB) += bt1-apb.o obj-$(CONFIG_BT1_AXI) += bt1-axi.o obj-$(CONFIG_IMX_WEIM) += imx-weim.o +obj-$(CONFIG_INTEL_IXP4XX_EB) += intel-ixp4xx-eb.o obj-$(CONFIG_MIPS_CDMM) += mips_cdmm.o obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o diff --git a/drivers/bus/intel-ixp4xx-eb.c b/drivers/bus/intel-ixp4xx-eb.c new file mode 100644 index 000000000000..a4388440aca7 --- /dev/null +++ b/drivers/bus/intel-ixp4xx-eb.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel IXP4xx Expansion Bus Controller + * Copyright (C) 2021 Linaro Ltd. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/log2.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define IXP4XX_EXP_NUM_CS 8 + +#define IXP4XX_EXP_TIMING_CS0 0x00 +#define IXP4XX_EXP_TIMING_CS1 0x04 +#define IXP4XX_EXP_TIMING_CS2 0x08 +#define IXP4XX_EXP_TIMING_CS3 0x0c +#define IXP4XX_EXP_TIMING_CS4 0x10 +#define IXP4XX_EXP_TIMING_CS5 0x14 +#define IXP4XX_EXP_TIMING_CS6 0x18 +#define IXP4XX_EXP_TIMING_CS7 0x1c + +/* Bits inside each CS timing register */ +#define IXP4XX_EXP_TIMING_STRIDE 0x04 +#define IXP4XX_EXP_CS_EN BIT(31) +#define IXP456_EXP_PAR_EN BIT(30) /* Only on IXP45x and IXP46x */ +#define IXP4XX_EXP_T1_MASK GENMASK(28, 27) +#define IXP4XX_EXP_T1_SHIFT 28 +#define IXP4XX_EXP_T2_MASK GENMASK(27, 26) +#define IXP4XX_EXP_T2_SHIFT 26 +#define IXP4XX_EXP_T3_MASK GENMASK(25, 22) +#define IXP4XX_EXP_T3_SHIFT 22 +#define IXP4XX_EXP_T4_MASK GENMASK(21, 20) +#define IXP4XX_EXP_T4_SHIFT 20 +#define IXP4XX_EXP_T5_MASK GENMASK(19, 16) +#define IXP4XX_EXP_T5_SHIFT 16 +#define IXP4XX_EXP_CYC_TYPE_MASK GENMASK(15, 14) +#define IXP4XX_EXP_CYC_TYPE_SHIFT 14 +#define IXP4XX_EXP_SIZE_MASK GENMASK(13, 10) +#define IXP4XX_EXP_SIZE_SHIFT 10 +#define IXP4XX_EXP_CNFG_0 BIT(9) /* Always zero */ +#define IXP43X_EXP_SYNC_INTEL BIT(8) /* Only on IXP43x */ +#define IXP43X_EXP_EXP_CHIP BIT(7) /* Only on IXP43x */ +#define IXP4XX_EXP_BYTE_RD16 BIT(6) +#define IXP4XX_EXP_HRDY_POL BIT(5) /* Only on IXP42x */ +#define IXP4XX_EXP_MUX_EN BIT(4) +#define IXP4XX_EXP_SPLT_EN BIT(3) +#define IXP4XX_EXP_WORD BIT(2) /* Always zero */ +#define IXP4XX_EXP_WR_EN BIT(1) +#define IXP4XX_EXP_BYTE_EN BIT(0) +#define IXP42X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(8)|BIT(7)|IXP4XX_EXP_WORD) +#define IXP43X_RESERVED (BIT(30)|IXP4XX_EXP_CNFG_0|BIT(5)|IXP4XX_EXP_WORD) + +#define IXP4XX_EXP_CNFG0 0x20 +#define IXP4XX_EXP_CNFG0_MEM_MAP BIT(31) +#define IXP4XX_EXP_CNFG1 0x24 + +#define IXP4XX_EXP_BOOT_BASE 0x00000000 +#define IXP4XX_EXP_NORMAL_BASE 0x50000000 +#define IXP4XX_EXP_STRIDE 0x01000000 + +/* Fuses on the IXP43x */ +#define IXP43X_EXP_UNIT_FUSE_RESET 0x28 +#define IXP43x_EXP_FUSE_SPEED_MASK GENMASK(23, 22) + +/* Number of device tree values in "reg" */ +#define IXP4XX_OF_REG_SIZE 3 + +struct ixp4xx_eb { + struct device *dev; + struct regmap *rmap; + u32 bus_base; + bool is_42x; + bool is_43x; +}; + +struct ixp4xx_exp_tim_prop { + const char *prop; + u32 max; + u32 mask; + u16 shift; +}; + +static const struct ixp4xx_exp_tim_prop ixp4xx_exp_tim_props[] = { + { + .prop = "intel,ixp4xx-eb-t1", + .max = 3, + .mask = IXP4XX_EXP_T1_MASK, + .shift = IXP4XX_EXP_T1_SHIFT, + }, + { + .prop = "intel,ixp4xx-eb-t2", + .max = 3, + .mask = IXP4XX_EXP_T2_MASK, + .shift = IXP4XX_EXP_T2_SHIFT, + }, + { + .prop = "intel,ixp4xx-eb-t3", + .max = 15, + .mask = IXP4XX_EXP_T3_MASK, + .shift = IXP4XX_EXP_T3_SHIFT, + }, + { + .prop = "intel,ixp4xx-eb-t4", + .max = 3, + .mask = IXP4XX_EXP_T4_MASK, + .shift = IXP4XX_EXP_T4_SHIFT, + }, + { + .prop = "intel,ixp4xx-eb-t5", + .max = 15, + .mask = IXP4XX_EXP_T5_MASK, + .shift = IXP4XX_EXP_T5_SHIFT, + }, + { + .prop = "intel,ixp4xx-eb-byte-access-on-halfword", + .max = 1, + .mask = IXP4XX_EXP_BYTE_RD16, + }, + { + .prop = "intel,ixp4xx-eb-hpi-hrdy-pol-high", + .max = 1, + .mask = IXP4XX_EXP_HRDY_POL, + }, + { + .prop = "intel,ixp4xx-eb-mux-address-and-data", + .max = 1, + .mask = IXP4XX_EXP_MUX_EN, + }, + { + .prop = "intel,ixp4xx-eb-ahb-split-transfers", + .max = 1, + .mask = IXP4XX_EXP_SPLT_EN, + }, + { + .prop = "intel,ixp4xx-eb-write-enable", + .max = 1, + .mask = IXP4XX_EXP_WR_EN, + }, + { + .prop = "intel,ixp4xx-eb-byte-access", + .max = 1, + .mask = IXP4XX_EXP_BYTE_EN, + }, +}; + +static void ixp4xx_exp_setup_chipselect(struct ixp4xx_eb *eb, + struct device_node *np, + u32 cs_index, + u32 cs_size) +{ + u32 cs_cfg; + u32 val; + u32 cur_cssize; + u32 cs_order; + int ret; + int i; + + if (eb->is_42x && (cs_index > 7)) { + dev_err(eb->dev, + "invalid chipselect %u, we only support 0-7\n", + cs_index); + return; + } + if (eb->is_43x && (cs_index > 3)) { + dev_err(eb->dev, + "invalid chipselect %u, we only support 0-3\n", + cs_index); + return; + } + + /* Several chip selects can be joined into one device */ + if (cs_size > IXP4XX_EXP_STRIDE) + cur_cssize = IXP4XX_EXP_STRIDE; + else + cur_cssize = cs_size; + + + /* + * The following will read/modify/write the configuration for one + * chipselect, attempting to leave the boot defaults in place unless + * something is explicitly defined. + */ + regmap_read(eb->rmap, IXP4XX_EXP_TIMING_CS0 + + IXP4XX_EXP_TIMING_STRIDE * cs_index, &cs_cfg); + dev_info(eb->dev, "CS%d at %#08x, size %#08x, config before: %#08x\n", + cs_index, eb->bus_base + IXP4XX_EXP_STRIDE * cs_index, + cur_cssize, cs_cfg); + + /* Size set-up first align to 2^9 .. 2^24 */ + cur_cssize = roundup_pow_of_two(cur_cssize); + if (cur_cssize < 512) + cur_cssize = 512; + cs_order = ilog2(cur_cssize); + if (cs_order < 9 || cs_order > 24) { + dev_err(eb->dev, "illegal size order %d\n", cs_order); + return; + } + dev_dbg(eb->dev, "CS%d size order: %d\n", cs_index, cs_order); + cs_cfg &= ~(IXP4XX_EXP_SIZE_MASK); + cs_cfg |= ((cs_order - 9) << IXP4XX_EXP_SIZE_SHIFT); + + for (i = 0; i < ARRAY_SIZE(ixp4xx_exp_tim_props); i++) { + const struct ixp4xx_exp_tim_prop *ip = &ixp4xx_exp_tim_props[i]; + + /* All are regular u32 values */ + ret = of_property_read_u32(np, ip->prop, &val); + if (ret) + continue; + + /* Handle bools (single bits) first */ + if (ip->max == 1) { + if (val) + cs_cfg |= ip->mask; + else + cs_cfg &= ~ip->mask; + dev_info(eb->dev, "CS%d %s %s\n", cs_index, + val ? "enabled" : "disabled", + ip->prop); + continue; + } + + if (val > ip->max) { + dev_err(eb->dev, + "CS%d too high value for %s: %u, capped at %u\n", + cs_index, ip->prop, val, ip->max); + val = ip->max; + } + /* This assumes max value fills all the assigned bits (and it does) */ + cs_cfg &= ~ip->mask; + cs_cfg |= (val << ip->shift); + dev_info(eb->dev, "CS%d set %s to %u\n", cs_index, ip->prop, val); + } + + ret = of_property_read_u32(np, "intel,ixp4xx-eb-cycle-type", &val); + if (!ret) { + if (val > 3) { + dev_err(eb->dev, "illegal cycle type %d\n", val); + return; + } + dev_info(eb->dev, "CS%d set cycle type %d\n", cs_index, val); + cs_cfg &= ~IXP4XX_EXP_CYC_TYPE_MASK; + cs_cfg |= val << IXP4XX_EXP_CYC_TYPE_SHIFT; + } + + if (eb->is_42x) + cs_cfg &= ~IXP42X_RESERVED; + if (eb->is_43x) { + cs_cfg &= ~IXP43X_RESERVED; + /* + * This bit for Intel strata flash is currently unused, but let's + * report it if we find one. + */ + if (cs_cfg & IXP43X_EXP_SYNC_INTEL) + dev_info(eb->dev, "claims to be Intel strata flash\n"); + } + cs_cfg |= IXP4XX_EXP_CS_EN; + + regmap_write(eb->rmap, + IXP4XX_EXP_TIMING_CS0 + IXP4XX_EXP_TIMING_STRIDE * cs_index, + cs_cfg); + dev_info(eb->dev, "CS%d wrote %#08x into CS config\n", cs_index, cs_cfg); + + /* + * If several chip selects are joined together into one big + * device area, we call ourselves recursively for each successive + * chip select. For a 32MB flash chip this results in two calls + * for example. + */ + if (cs_size > IXP4XX_EXP_STRIDE) + ixp4xx_exp_setup_chipselect(eb, np, + cs_index + 1, + cs_size - IXP4XX_EXP_STRIDE); +} + +static void ixp4xx_exp_setup_child(struct ixp4xx_eb *eb, + struct device_node *np) +{ + u32 cs_sizes[IXP4XX_EXP_NUM_CS]; + int num_regs; + u32 csindex; + u32 cssize; + int ret; + int i; + + num_regs = of_property_count_elems_of_size(np, "reg", IXP4XX_OF_REG_SIZE); + if (num_regs <= 0) + return; + dev_dbg(eb->dev, "child %s has %d register sets\n", + of_node_full_name(np), num_regs); + + for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) + cs_sizes[csindex] = 0; + + for (i = 0; i < num_regs; i++) { + u32 rbase, rsize; + + ret = of_property_read_u32_index(np, "reg", + i * IXP4XX_OF_REG_SIZE, &csindex); + if (ret) + break; + ret = of_property_read_u32_index(np, "reg", + i * IXP4XX_OF_REG_SIZE + 1, &rbase); + if (ret) + break; + ret = of_property_read_u32_index(np, "reg", + i * IXP4XX_OF_REG_SIZE + 2, &rsize); + if (ret) + break; + + if (csindex >= IXP4XX_EXP_NUM_CS) { + dev_err(eb->dev, "illegal CS %d\n", csindex); + continue; + } + /* + * The memory window always starts from CS base so we need to add + * the start and size to get to the size from the start of the CS + * base. For example if CS0 is at 0x50000000 and the reg is + * <0 0xe40000 0x40000> the size is e80000. + * + * Roof this if we have several regs setting the same CS. + */ + cssize = rbase + rsize; + dev_dbg(eb->dev, "CS%d size %#08x\n", csindex, cssize); + if (cs_sizes[csindex] < cssize) + cs_sizes[csindex] = cssize; + } + + for (csindex = 0; csindex < IXP4XX_EXP_NUM_CS; csindex++) { + cssize = cs_sizes[csindex]; + if (!cssize) + continue; + /* Just this one, so set it up and return */ + ixp4xx_exp_setup_chipselect(eb, np, csindex, cssize); + } +} + +static int ixp4xx_exp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct ixp4xx_eb *eb; + struct device_node *child; + bool have_children = false; + u32 val; + int ret; + + eb = devm_kzalloc(dev, sizeof(*eb), GFP_KERNEL); + if (!eb) + return -ENOMEM; + + eb->dev = dev; + eb->is_42x = of_device_is_compatible(np, "intel,ixp42x-expansion-bus-controller"); + eb->is_43x = of_device_is_compatible(np, "intel,ixp43x-expansion-bus-controller"); + + eb->rmap = syscon_node_to_regmap(np); + if (IS_ERR(eb->rmap)) + return dev_err_probe(dev, PTR_ERR(eb->rmap), "no regmap\n"); + + /* We check that the regmap work only on first read */ + ret = regmap_read(eb->rmap, IXP4XX_EXP_CNFG0, &val); + if (ret) + return dev_err_probe(dev, ret, "cannot read regmap\n"); + if (val & IXP4XX_EXP_CNFG0_MEM_MAP) + eb->bus_base = IXP4XX_EXP_BOOT_BASE; + else + eb->bus_base = IXP4XX_EXP_NORMAL_BASE; + dev_info(dev, "expansion bus at %08x\n", eb->bus_base); + + if (eb->is_43x) { + /* Check some fuses */ + regmap_read(eb->rmap, IXP43X_EXP_UNIT_FUSE_RESET, &val); + switch (FIELD_GET(IXP43x_EXP_FUSE_SPEED_MASK, val)) { + case 0: + dev_info(dev, "IXP43x at 533 MHz\n"); + break; + case 1: + dev_info(dev, "IXP43x at 400 MHz\n"); + break; + case 2: + dev_info(dev, "IXP43x at 667 MHz\n"); + break; + default: + dev_info(dev, "IXP43x unknown speed\n"); + break; + } + } + + /* Walk over the child nodes and see what chipselects we use */ + for_each_available_child_of_node(np, child) { + ixp4xx_exp_setup_child(eb, child); + /* We have at least one child */ + have_children = true; + } + + if (have_children) + return of_platform_default_populate(np, NULL, dev); + + return 0; +} + +static const struct of_device_id ixp4xx_exp_of_match[] = { + { .compatible = "intel,ixp42x-expansion-bus-controller", }, + { .compatible = "intel,ixp43x-expansion-bus-controller", }, + { .compatible = "intel,ixp45x-expansion-bus-controller", }, + { .compatible = "intel,ixp46x-expansion-bus-controller", }, + { } +}; + +static struct platform_driver ixp4xx_exp_driver = { + .probe = ixp4xx_exp_probe, + .driver = { + .name = "intel-extbus", + .of_match_table = ixp4xx_exp_of_match, + }, +}; +module_platform_driver(ixp4xx_exp_driver); +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Intel IXP4xx external bus driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 148a4dd8cb9a..a51c2a8feed9 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -855,7 +855,7 @@ static int sysc_check_registers(struct sysc *ddata) } /** - * syc_ioremap - ioremap register space for the interconnect target module + * sysc_ioremap - ioremap register space for the interconnect target module * @ddata: device driver data * * Note that the interconnect target module registers can be anywhere @@ -1446,10 +1446,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET), SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), - SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, @@ -1501,6 +1497,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_MODULE_QUIRK_SGX), SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, + SYSC_QUIRK_SWSUP_SIDLE), SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0, SYSC_MODULE_QUIRK_RTC_UNLOCK), SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff, @@ -1557,7 +1555,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0), SYSC_QUIRK("keypad", 0x4a31c000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, 0), - SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0), SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0), SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0), SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0), @@ -1585,6 +1582,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0), SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0), SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0), + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0), + SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff, 0), SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0), SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000013, 0xffffffff, 0), @@ -3115,9 +3114,8 @@ static int sysc_probe(struct platform_device *pdev) goto unprepare; pm_runtime_enable(ddata->dev); - error = pm_runtime_get_sync(ddata->dev); + error = pm_runtime_resume_and_get(ddata->dev); if (error < 0) { - pm_runtime_put_noidle(ddata->dev); pm_runtime_disable(ddata->dev); goto unprepare; } @@ -3175,9 +3173,8 @@ static int sysc_remove(struct platform_device *pdev) cancel_delayed_work_sync(&ddata->idle_work); - error = pm_runtime_get_sync(ddata->dev); + error = pm_runtime_resume_and_get(ddata->dev); if (error < 0) { - pm_runtime_put_noidle(ddata->dev); pm_runtime_disable(ddata->dev); goto unprepare; } diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c index 9396745e1c17..cbb184953510 100644 --- a/drivers/clocksource/timer-ixp4xx.c +++ b/drivers/clocksource/timer-ixp4xx.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> /* Goes away with OF conversion */ #include <linux/platform_data/timer-ixp4xx.h> @@ -29,9 +30,6 @@ #define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */ #define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */ #define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */ -#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */ -#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */ -#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */ #define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */ /* @@ -45,17 +43,10 @@ #define IXP4XX_OSST_TIMER_1_PEND 0x00000001 #define IXP4XX_OSST_TIMER_2_PEND 0x00000002 #define IXP4XX_OSST_TIMER_TS_PEND 0x00000004 -#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008 -#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010 - -#define IXP4XX_WDT_KEY 0x0000482E -#define IXP4XX_WDT_RESET_ENABLE 0x00000001 -#define IXP4XX_WDT_IRQ_ENABLE 0x00000002 -#define IXP4XX_WDT_COUNT_ENABLE 0x00000004 +/* Remaining registers are for the watchdog and defined in the watchdog driver */ struct ixp4xx_timer { void __iomem *base; - unsigned int tick_rate; u32 latch; struct clock_event_device clkevt; #ifdef CONFIG_ARM @@ -181,7 +172,6 @@ static __init int ixp4xx_timer_register(void __iomem *base, if (!tmr) return -ENOMEM; tmr->base = base; - tmr->tick_rate = timer_freq; /* * The timer register doesn't allow to specify the two least @@ -239,6 +229,40 @@ static __init int ixp4xx_timer_register(void __iomem *base, return 0; } +static struct platform_device ixp4xx_watchdog_device = { + .name = "ixp4xx-watchdog", + .id = -1, +}; + +/* + * This probe gets called after the timer is already up and running. The main + * function on this platform is to spawn the watchdog device as a child. + */ +static int ixp4xx_timer_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + /* Pass the base address as platform data and nothing else */ + ixp4xx_watchdog_device.dev.platform_data = local_ixp4xx_timer->base; + ixp4xx_watchdog_device.dev.parent = dev; + return platform_device_register(&ixp4xx_watchdog_device); +} + +static const struct of_device_id ixp4xx_timer_dt_id[] = { + { .compatible = "intel,ixp4xx-timer", }, + { /* sentinel */ }, +}; + +static struct platform_driver ixp4xx_timer_driver = { + .probe = ixp4xx_timer_probe, + .driver = { + .name = "ixp4xx-timer", + .of_match_table = ixp4xx_timer_dt_id, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(ixp4xx_timer_driver); + /** * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles * @timerbase: physical base of timer block diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 8070fd664bfc..cacc725ca545 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -198,12 +198,12 @@ struct sdma_script_start_addrs { s32 per_2_firi_addr; s32 mcu_2_firi_addr; s32 uart_2_per_addr; - s32 uart_2_mcu_addr; + s32 uart_2_mcu_ram_addr; s32 per_2_app_addr; s32 mcu_2_app_addr; s32 per_2_per_addr; s32 uartsh_2_per_addr; - s32 uartsh_2_mcu_addr; + s32 uartsh_2_mcu_ram_addr; s32 per_2_shp_addr; s32 mcu_2_shp_addr; s32 ata_2_mcu_addr; @@ -230,6 +230,10 @@ struct sdma_script_start_addrs { s32 zcanfd_2_mcu_addr; s32 zqspi_2_mcu_addr; s32 mcu_2_ecspi_addr; + s32 mcu_2_sai_addr; + s32 sai_2_mcu_addr; + s32 uart_2_mcu_addr; + s32 uartsh_2_mcu_addr; /* End of v3 array */ s32 mcu_2_zqspi_addr; /* End of v4 array */ @@ -433,9 +437,10 @@ struct sdma_channel { unsigned long watermark_level; u32 shp_addr, per_addr; enum dma_status status; - bool context_loaded; struct imx_dma_data data; struct work_struct terminate_worker; + struct list_head terminated; + bool is_ram_script; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -476,6 +481,13 @@ struct sdma_driver_data { int num_events; struct sdma_script_start_addrs *script_addrs; bool check_ratio; + /* + * ecspi ERR009165 fixed should be done in sdma script + * and it has been fixed in soc from i.mx6ul. + * please get more information from the below link: + * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf + */ + bool ecspi_fixed; }; struct sdma_engine { @@ -499,6 +511,7 @@ struct sdma_engine { struct sdma_buffer_descriptor *bd0; /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ bool clk_ratio; + bool fw_loaded; }; static int sdma_config_write(struct dma_chan *chan, @@ -595,6 +608,13 @@ static struct sdma_driver_data sdma_imx6q = { .script_addrs = &sdma_script_imx6q, }; +static struct sdma_driver_data sdma_imx6ul = { + .chnenbl0 = SDMA_CHNENBL0_IMX35, + .num_events = 48, + .script_addrs = &sdma_script_imx6q, + .ecspi_fixed = true, +}; + static struct sdma_script_start_addrs sdma_script_imx7d = { .ap_2_ap_addr = 644, .uart_2_mcu_addr = 819, @@ -628,6 +648,7 @@ static const struct of_device_id sdma_dt_ids[] = { { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, + { .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, }, { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, }, { /* sentinel */ } }; @@ -919,6 +940,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, sdmac->pc_to_device = 0; sdmac->device_to_device = 0; sdmac->pc_to_pc = 0; + sdmac->is_ram_script = false; switch (peripheral_type) { case IMX_DMATYPE_MEMORY: @@ -945,6 +967,17 @@ static void sdma_get_pc(struct sdma_channel *sdmac, emi_2_per = sdma->script_addrs->mcu_2_ata_addr; break; case IMX_DMATYPE_CSPI: + per_2_emi = sdma->script_addrs->app_2_mcu_addr; + + /* Use rom script mcu_2_app if ERR009165 fixed */ + if (sdmac->sdma->drvdata->ecspi_fixed) { + emi_2_per = sdma->script_addrs->mcu_2_app_addr; + } else { + emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr; + sdmac->is_ram_script = true; + } + + break; case IMX_DMATYPE_EXT: case IMX_DMATYPE_SSI: case IMX_DMATYPE_SAI: @@ -954,6 +987,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, case IMX_DMATYPE_SSI_DUAL: per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; + sdmac->is_ram_script = true; break; case IMX_DMATYPE_SSI_SP: case IMX_DMATYPE_MMC: @@ -968,6 +1002,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; per_2_per = sdma->script_addrs->per_2_per_addr; + sdmac->is_ram_script = true; break; case IMX_DMATYPE_ASRC_SP: per_2_emi = sdma->script_addrs->shp_2_mcu_addr; @@ -1008,9 +1043,6 @@ static int sdma_load_context(struct sdma_channel *sdmac) int ret; unsigned long flags; - if (sdmac->context_loaded) - return 0; - if (sdmac->direction == DMA_DEV_TO_MEM) load_address = sdmac->pc_from_device; else if (sdmac->direction == DMA_DEV_TO_DEV) @@ -1053,8 +1085,6 @@ static int sdma_load_context(struct sdma_channel *sdmac) spin_unlock_irqrestore(&sdma->channel_0_lock, flags); - sdmac->context_loaded = true; - return ret; } @@ -1078,9 +1108,6 @@ static void sdma_channel_terminate_work(struct work_struct *work) { struct sdma_channel *sdmac = container_of(work, struct sdma_channel, terminate_worker); - unsigned long flags; - LIST_HEAD(head); - /* * According to NXP R&D team a delay of one BD SDMA cost time * (maximum is 1ms) should be added after disable of the channel @@ -1089,11 +1116,7 @@ static void sdma_channel_terminate_work(struct work_struct *work) */ usleep_range(1000, 2000); - spin_lock_irqsave(&sdmac->vc.lock, flags); - vchan_get_all_descriptors(&sdmac->vc, &head); - spin_unlock_irqrestore(&sdmac->vc.lock, flags); - vchan_dma_desc_free_list(&sdmac->vc, &head); - sdmac->context_loaded = false; + vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated); } static int sdma_terminate_all(struct dma_chan *chan) @@ -1107,6 +1130,13 @@ static int sdma_terminate_all(struct dma_chan *chan) if (sdmac->desc) { vchan_terminate_vdesc(&sdmac->desc->vd); + /* + * move out current descriptor into terminated list so that + * it could be free in sdma_channel_terminate_work alone + * later without potential involving next descriptor raised + * up before the last descriptor terminated. + */ + vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated); sdmac->desc = NULL; schedule_work(&sdmac->terminate_worker); } @@ -1168,7 +1198,6 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) static int sdma_config_channel(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); - int ret; sdma_disable_channel(chan); @@ -1208,9 +1237,7 @@ static int sdma_config_channel(struct dma_chan *chan) sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ } - ret = sdma_load_context(sdmac); - - return ret; + return 0; } static int sdma_set_channel_priority(struct sdma_channel *sdmac, @@ -1361,7 +1388,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan) sdmac->event_id0 = 0; sdmac->event_id1 = 0; - sdmac->context_loaded = false; sdma_set_channel_priority(sdmac, 0); @@ -1374,6 +1400,11 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, { struct sdma_desc *desc; + if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) { + dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n"); + goto err_out; + } + desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); if (!desc) goto err_out; @@ -1722,8 +1753,8 @@ static void sdma_issue_pending(struct dma_chan *chan) #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 -#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45 +#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 static void sdma_add_scripts(struct sdma_engine *sdma, const struct sdma_script_start_addrs *addr) @@ -1747,6 +1778,19 @@ static void sdma_add_scripts(struct sdma_engine *sdma, for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; + + /* + * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because + * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr + * to be compatible with legacy freescale/nxp sdma firmware, and they + * are located in the bottom part of sdma_script_start_addrs which are + * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1. + */ + if (addr->uart_2_mcu_addr) + sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr; + if (addr->uartsh_2_mcu_addr) + sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr; + } static void sdma_load_firmware(const struct firmware *fw, void *context) @@ -1803,6 +1847,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) sdma_add_scripts(sdma, addr); + sdma->fw_loaded = true; + dev_info(sdma->dev, "loaded firmware %d.%d\n", header->version_major, header->version_minor); @@ -2086,6 +2132,7 @@ static int sdma_probe(struct platform_device *pdev) sdmac->channel = i; sdmac->vc.desc_free = sdma_desc_free; + INIT_LIST_HEAD(&sdmac->terminated); INIT_WORK(&sdmac->terminate_worker, sdma_channel_terminate_work); /* diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 5d3fd803d2bb..220a58cf0a44 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -6,39 +6,7 @@ menu "Firmware Drivers" -config ARM_SCMI_PROTOCOL - tristate "ARM System Control and Management Interface (SCMI) Message Protocol" - depends on ARM || ARM64 || COMPILE_TEST - depends on MAILBOX || HAVE_ARM_SMCCC_DISCOVERY - help - ARM System Control and Management Interface (SCMI) protocol is a - set of operating system-independent software interfaces that are - used in system management. SCMI is extensible and currently provides - interfaces for: Discovery and self-description of the interfaces - it supports, Power domain management which is the ability to place - a given device or domain into the various power-saving states that - it supports, Performance management which is the ability to control - the performance of a domain that is composed of compute engines - such as application processors and other accelerators, Clock - management which is the ability to set and inquire rates on platform - managed clocks and Sensor management which is the ability to read - sensor data, and be notified of sensor value. - - This protocol library provides interface for all the client drivers - making use of the features offered by the SCMI. - -config ARM_SCMI_POWER_DOMAIN - tristate "SCMI power domain driver" - depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) - default y - select PM_GENERIC_DOMAINS if PM - help - This enables support for the SCMI power domains which can be - enabled or disabled via the SCP firmware - - This driver can also be built as a module. If so, the module - will be called scmi_pm_domain. Note this may needed early in boot - before rootfs may be available. +source "drivers/firmware/arm_scmi/Kconfig" config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" @@ -235,7 +203,7 @@ config INTEL_STRATIX10_RSU Say Y here if you want Intel RSU support. config QCOM_SCM - bool + tristate "Qcom SCM driver" depends on ARM || ARM64 depends on HAVE_ARM_SMCCC select RESET_CONTROLLER diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 705fabe88156..5ced0673d94b 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -17,7 +17,8 @@ obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o -obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_SCM) += qcom-scm.o +qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o obj-$(CONFIG_SYSFB) += sysfb.o obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig new file mode 100644 index 000000000000..7f4d2435503b --- /dev/null +++ b/drivers/firmware/arm_scmi/Kconfig @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "ARM System Control and Management Interface Protocol" + +config ARM_SCMI_PROTOCOL + tristate "ARM System Control and Management Interface (SCMI) Message Protocol" + depends on ARM || ARM64 || COMPILE_TEST + help + ARM System Control and Management Interface (SCMI) protocol is a + set of operating system-independent software interfaces that are + used in system management. SCMI is extensible and currently provides + interfaces for: Discovery and self-description of the interfaces + it supports, Power domain management which is the ability to place + a given device or domain into the various power-saving states that + it supports, Performance management which is the ability to control + the performance of a domain that is composed of compute engines + such as application processors and other accelerators, Clock + management which is the ability to set and inquire rates on platform + managed clocks and Sensor management which is the ability to read + sensor data, and be notified of sensor value. + + This protocol library provides interface for all the client drivers + making use of the features offered by the SCMI. + +if ARM_SCMI_PROTOCOL + +config ARM_SCMI_HAVE_TRANSPORT + bool + help + This declares whether at least one SCMI transport has been configured. + Used to trigger a build bug when trying to build SCMI without any + configured transport. + +config ARM_SCMI_HAVE_SHMEM + bool + help + This declares whether a shared memory based transport for SCMI is + available. + +config ARM_SCMI_HAVE_MSG + bool + help + This declares whether a message passing based transport for SCMI is + available. + +config ARM_SCMI_TRANSPORT_MAILBOX + bool "SCMI transport based on Mailbox" + depends on MAILBOX + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable mailbox based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on mailboxes, answer Y. + +config ARM_SCMI_TRANSPORT_SMC + bool "SCMI transport based on SMC" + depends on HAVE_ARM_SMCCC_DISCOVERY + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_SHMEM + default y + help + Enable SMC based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on SMC, answer Y. + +config ARM_SCMI_TRANSPORT_VIRTIO + bool "SCMI transport based on VirtIO" + depends on VIRTIO + select ARM_SCMI_HAVE_TRANSPORT + select ARM_SCMI_HAVE_MSG + help + This enables the virtio based transport for SCMI. + + If you want the ARM SCMI PROTOCOL stack to include support for a + transport based on VirtIO, answer Y. + +endif #ARM_SCMI_PROTOCOL + +config ARM_SCMI_POWER_DOMAIN + tristate "SCMI power domain driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + default y + select PM_GENERIC_DOMAINS if PM + help + This enables support for the SCMI power domains which can be + enabled or disabled via the SCP firmware + + This driver can also be built as a module. If so, the module + will be called scmi_pm_domain. Note this may needed early in boot + before rootfs may be available. + +endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 6a2ef63306d6..1dcf123d64ab 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -1,9 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only scmi-bus-y = bus.o scmi-driver-y = driver.o notify.o -scmi-transport-y = shmem.o -scmi-transport-$(CONFIG_MAILBOX) += mailbox.o -scmi-transport-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smc.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_SHMEM) = shmem.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_MAILBOX) += mailbox.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o +scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o +scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 8685619d38f9..dea1bfbe1052 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -14,8 +14,12 @@ #include <linux/device.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/hashtable.h> +#include <linux/list.h> #include <linux/module.h> +#include <linux/refcount.h> #include <linux/scmi_protocol.h> +#include <linux/spinlock.h> #include <linux/types.h> #include <asm/unaligned.h> @@ -65,11 +69,22 @@ struct scmi_msg_resp_prot_version { #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) +/* + * Size of @pending_xfers hashtable included in @scmi_xfers_info; ideally, in + * order to minimize space and collisions, this should equal max_msg, i.e. the + * maximum number of in-flight messages on a specific platform, but such value + * is only available at runtime while kernel hashtables are statically sized: + * pick instead as a fixed static size the maximum number of entries that can + * fit the whole table into one 4k page. + */ +#define SCMI_PENDING_XFERS_HT_ORDER_SZ 9 + /** * struct scmi_msg_hdr - Message(Tx/Rx) header * * @id: The identifier of the message being sent * @protocol_id: The identifier of the protocol used to send @id message + * @type: The SCMI type for this message * @seq: The token to identify the message. When a message returns, the * platform returns the whole message header unmodified including the * token @@ -80,6 +95,7 @@ struct scmi_msg_resp_prot_version { struct scmi_msg_hdr { u8 id; u8 protocol_id; + u8 type; u16 seq; u32 status; bool poll_completion; @@ -89,13 +105,14 @@ struct scmi_msg_hdr { * pack_scmi_header() - packs and returns 32-bit header * * @hdr: pointer to header containing all the information on message id, - * protocol id and sequence id. + * protocol id, sequence id and type. * * Return: 32-bit packed message header to be sent to the platform. */ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) { return FIELD_PREP(MSG_ID_MASK, hdr->id) | + FIELD_PREP(MSG_TYPE_MASK, hdr->type) | FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) | FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); } @@ -110,6 +127,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) { hdr->id = MSG_XTRACT_ID(msg_hdr); hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr); + hdr->type = MSG_XTRACT_TYPE(msg_hdr); } /** @@ -134,6 +152,27 @@ struct scmi_msg { * buffer for the rx path as we use for the tx path. * @done: command message transmit completion event * @async_done: pointer to delayed response message received event completion + * @pending: True for xfers added to @pending_xfers hashtable + * @node: An hlist_node reference used to store this xfer, alternatively, on + * the free list @free_xfers or in the @pending_xfers hashtable + * @users: A refcount to track the active users for this xfer. + * This is meant to protect against the possibility that, when a command + * transaction times out concurrently with the reception of a valid + * response message, the xfer could be finally put on the TX path, and + * so vanish, while on the RX path scmi_rx_callback() is still + * processing it: in such a case this refcounting will ensure that, even + * though the timed-out transaction will anyway cause the command + * request to be reported as failed by time-out, the underlying xfer + * cannot be discarded and possibly reused until the last one user on + * the RX path has released it. + * @busy: An atomic flag to ensure exclusive write access to this xfer + * @state: The current state of this transfer, with states transitions deemed + * valid being: + * - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ] + * - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK + * (Missing synchronous response is assumed OK and ignored) + * @lock: A spinlock to protect state and busy fields. + * @priv: A pointer for transport private usage. */ struct scmi_xfer { int transfer_id; @@ -142,8 +181,36 @@ struct scmi_xfer { struct scmi_msg rx; struct completion done; struct completion *async_done; + bool pending; + struct hlist_node node; + refcount_t users; +#define SCMI_XFER_FREE 0 +#define SCMI_XFER_BUSY 1 + atomic_t busy; +#define SCMI_XFER_SENT_OK 0 +#define SCMI_XFER_RESP_OK 1 +#define SCMI_XFER_DRESP_OK 2 + int state; + /* A lock to protect state and busy fields */ + spinlock_t lock; + void *priv; }; +/* + * An helper macro to lookup an xfer from the @pending_xfers hashtable + * using the message sequence number token as a key. + */ +#define XFER_FIND(__ht, __k) \ +({ \ + typeof(__k) k_ = __k; \ + struct scmi_xfer *xfer_ = NULL; \ + \ + hash_for_each_possible((__ht), xfer_, node, k_) \ + if (xfer_->hdr.seq == k_) \ + break; \ + xfer_; \ +}) + struct scmi_xfer_ops; /** @@ -283,9 +350,13 @@ struct scmi_chan_info { /** * struct scmi_transport_ops - Structure representing a SCMI transport ops * + * @link_supplier: Optional callback to add link to a supplier device * @chan_available: Callback to check if channel is available or not * @chan_setup: Callback to allocate and setup a channel * @chan_free: Callback to free a channel + * @get_max_msg: Optional callback to provide max_msg dynamically + * Returns the maximum number of messages for the channel type + * (tx or rx) that can be pending simultaneously in the system * @send_message: Callback to send a message * @mark_txdone: Callback to mark tx as done * @fetch_response: Callback to fetch response @@ -294,10 +365,12 @@ struct scmi_chan_info { * @poll_done: Callback to poll transfer status */ struct scmi_transport_ops { + int (*link_supplier)(struct device *dev); bool (*chan_available)(struct device *dev, int idx); int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev, bool tx); int (*chan_free)(int id, void *p, void *data); + unsigned int (*get_max_msg)(struct scmi_chan_info *base_cinfo); int (*send_message)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer); void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret); @@ -317,25 +390,39 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, /** * struct scmi_desc - Description of SoC integration * + * @transport_init: An optional function that a transport can provide to + * initialize some transport-specific setup during SCMI core + * initialization, so ahead of SCMI core probing. + * @transport_exit: An optional function that a transport can provide to + * de-initialize some transport-specific setup during SCMI core + * de-initialization, so after SCMI core removal. * @ops: Pointer to the transport specific ops structure * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) - * @max_msg: Maximum number of messages that can be pending - * simultaneously in the system + * @max_msg: Maximum number of messages for a channel type (tx or rx) that can + * be pending simultaneously in the system. May be overridden by the + * get_max_msg op. * @max_msg_size: Maximum size of data per message that can be handled. */ struct scmi_desc { + int (*transport_init)(void); + void (*transport_exit)(void); const struct scmi_transport_ops *ops; int max_rx_timeout_ms; int max_msg; int max_msg_size; }; +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX extern const struct scmi_desc scmi_mailbox_desc; -#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY +#endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC extern const struct scmi_desc scmi_smc_desc; #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO +extern const struct scmi_desc scmi_virtio_desc; +#endif -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr); +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv); void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); /* shmem related declarations */ @@ -352,8 +439,22 @@ void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem); bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); +/* declarations for message passing transports */ +struct scmi_msg_payld; + +/* Maximum overhead of message w.r.t. struct scmi_desc.max_msg_size */ +#define SCMI_MSG_MAX_PROT_OVERHEAD (2 * sizeof(__le32)) + +size_t msg_response_size(struct scmi_xfer *xfer); +size_t msg_command_size(struct scmi_xfer *xfer); +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer); +u32 msg_read_header(struct scmi_msg_payld *msg); +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer); +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer); + void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv); void *scmi_notification_instance_data_get(const struct scmi_handle *handle); - #endif /* _SCMI_COMMON_H */ diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 9b2e8d42a992..b406b3f78f46 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/ktime.h> +#include <linux/hashtable.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of_address.h> @@ -67,16 +68,23 @@ struct scmi_requested_dev { /** * struct scmi_xfers_info - Structure to manage transfer information * - * @xfer_block: Preallocated Message array * @xfer_alloc_table: Bitmap table for allocated messages. * Index of this bitmap table is also used for message * sequence identifier. * @xfer_lock: Protection for message allocation + * @max_msg: Maximum number of messages that can be pending + * @free_xfers: A free list for available to use xfers. It is initialized with + * a number of xfers equal to the maximum allowed in-flight + * messages. + * @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the + * currently in-flight messages. */ struct scmi_xfers_info { - struct scmi_xfer *xfer_block; unsigned long *xfer_alloc_table; spinlock_t xfer_lock; + int max_msg; + struct hlist_head free_xfers; + DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ); }; /** @@ -172,19 +180,6 @@ static inline int scmi_to_linux_errno(int errno) return -EIO; } -/** - * scmi_dump_header_dbg() - Helper to dump a message header. - * - * @dev: Device pointer corresponding to the SCMI entity - * @hdr: pointer to header. - */ -static inline void scmi_dump_header_dbg(struct device *dev, - struct scmi_msg_hdr *hdr) -{ - dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n", - hdr->id, hdr->seq, hdr->protocol_id); -} - void scmi_notification_instance_data_set(const struct scmi_handle *handle, void *priv) { @@ -205,45 +200,189 @@ void *scmi_notification_instance_data_get(const struct scmi_handle *handle) } /** + * scmi_xfer_token_set - Reserve and set new token for the xfer at hand + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + * + * Pick the next unused monotonically increasing token and set it into + * xfer->hdr.seq: picking a monotonically increasing value avoids immediate + * reuse of freshly completed or timed-out xfers, thus mitigating the risk + * of incorrect association of a late and expired xfer with a live in-flight + * transaction, both happening to re-use the same token identifier. + * + * Since platform is NOT required to answer our request in-order we should + * account for a few rare but possible scenarios: + * + * - exactly 'next_token' may be NOT available so pick xfer_id >= next_token + * using find_next_zero_bit() starting from candidate next_token bit + * + * - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we + * are plenty of free tokens at start, so try a second pass using + * find_next_zero_bit() and starting from 0. + * + * X = used in-flight + * + * Normal + * ------ + * + * |- xfer_id picked + * -----------+---------------------------------------------------------- + * | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Out-of-order pending at start + * ----------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| | + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * + * Out-of-order pending at end + * --------------------------- + * + * |- xfer_id picked, last_token fixed + * -----+---------------------------------------------------------------- + * |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X| + * ---------------------------------------------------------------------- + * ^ + * |- next_token + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: 0 on Success or error + */ +static int scmi_xfer_token_set(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + unsigned long xfer_id, next_token; + + /* + * Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1] + * using the pre-allocated transfer_id as a base. + * Note that the global transfer_id is shared across all message types + * so there could be holes in the allocated set of monotonic sequence + * numbers, but that is going to limit the effectiveness of the + * mitigation only in very rare limit conditions. + */ + next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1)); + + /* Pick the next available xfer_id >= next_token */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, next_token); + if (xfer_id == MSG_TOKEN_MAX) { + /* + * After heavily out-of-order responses, there are no free + * tokens ahead, but only at start of xfer_alloc_table so + * try again from the beginning. + */ + xfer_id = find_next_zero_bit(minfo->xfer_alloc_table, + MSG_TOKEN_MAX, 0); + /* + * Something is wrong if we got here since there can be a + * maximum number of (MSG_TOKEN_MAX - 1) in-flight messages + * but we have not found any free token [0, MSG_TOKEN_MAX - 1]. + */ + if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX)) + return -ENOMEM; + } + + /* Update +/- last_token accordingly if we skipped some hole */ + if (xfer_id != next_token) + atomic_add((int)(xfer_id - next_token), &transfer_last_id); + + /* Set in-flight */ + set_bit(xfer_id, minfo->xfer_alloc_table); + xfer->hdr.seq = (u16)xfer_id; + + return 0; +} + +/** + * scmi_xfer_token_clear - Release the token + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer: The xfer to act upon + */ +static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo, + struct scmi_xfer *xfer) +{ + clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); +} + +/** * scmi_xfer_get() - Allocate one message * * @handle: Pointer to SCMI entity handle * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @set_pending: If true a monotonic token is picked and the xfer is added to + * the pending hash table. * * Helper function which is used by various message functions that are * exposed to clients of this driver for allocating a message traffic event. * - * This function can sleep depending on pending requests already in the system - * for the SCMI entity. Further, this also holds a spinlock to maintain - * integrity of internal data structures. + * Picks an xfer from the free list @free_xfers (if any available) and, if + * required, sets a monotonically increasing token and stores the inflight xfer + * into the @pending_xfers hashtable for later retrieval. + * + * The successfully initialized xfer is refcounted. + * + * Context: Holds @xfer_lock while manipulating @xfer_alloc_table and + * @free_xfers. * * Return: 0 if all went fine, else corresponding error. */ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, - struct scmi_xfers_info *minfo) + struct scmi_xfers_info *minfo, + bool set_pending) { - u16 xfer_id; + int ret; + unsigned long flags; struct scmi_xfer *xfer; - unsigned long flags, bit_pos; - struct scmi_info *info = handle_to_scmi_info(handle); - /* Keep the locked section as small as possible */ spin_lock_irqsave(&minfo->xfer_lock, flags); - bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, - info->desc->max_msg); - if (bit_pos == info->desc->max_msg) { + if (hlist_empty(&minfo->free_xfers)) { spin_unlock_irqrestore(&minfo->xfer_lock, flags); return ERR_PTR(-ENOMEM); } - set_bit(bit_pos, minfo->xfer_alloc_table); - spin_unlock_irqrestore(&minfo->xfer_lock, flags); - xfer_id = bit_pos; + /* grab an xfer from the free_list */ + xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node); + hlist_del_init(&xfer->node); - xfer = &minfo->xfer_block[xfer_id]; - xfer->hdr.seq = xfer_id; + /* + * Allocate transfer_id early so that can be used also as base for + * monotonic sequence number generation if needed. + */ xfer->transfer_id = atomic_inc_return(&transfer_last_id); + if (set_pending) { + /* Pick and set monotonic token */ + ret = scmi_xfer_token_set(minfo, xfer); + if (!ret) { + hash_add(minfo->pending_xfers, &xfer->node, + xfer->hdr.seq); + xfer->pending = true; + } else { + dev_err(handle->dev, + "Failed to get monotonic token %d\n", ret); + hlist_add_head(&xfer->node, &minfo->free_xfers); + xfer = ERR_PTR(ret); + } + } + + if (!IS_ERR(xfer)) { + refcount_set(&xfer->users, 1); + atomic_set(&xfer->busy, SCMI_XFER_FREE); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return xfer; } @@ -253,6 +392,9 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle, * @minfo: Pointer to Tx/Rx Message management info based on channel type * @xfer: message that was reserved by scmi_xfer_get * + * After refcount check, possibly release an xfer, clearing the token slot, + * removing xfer from @pending_xfers and putting it back into free_xfers. + * * This holds a spinlock to maintain integrity of internal data structures. */ static void @@ -260,17 +402,215 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) { unsigned long flags; + spin_lock_irqsave(&minfo->xfer_lock, flags); + if (refcount_dec_and_test(&xfer->users)) { + if (xfer->pending) { + scmi_xfer_token_clear(minfo, xfer); + hash_del(&xfer->node); + xfer->pending = false; + } + hlist_add_head(&xfer->node, &minfo->free_xfers); + } + spin_unlock_irqrestore(&minfo->xfer_lock, flags); +} + +/** + * scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id + * + * @minfo: Pointer to Tx/Rx Message management info based on channel type + * @xfer_id: Token ID to lookup in @pending_xfers + * + * Refcounting is untouched. + * + * Context: Assumes to be called with @xfer_lock already acquired. + * + * Return: A valid xfer on Success or error otherwise + */ +static struct scmi_xfer * +scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id) +{ + struct scmi_xfer *xfer = NULL; + + if (test_bit(xfer_id, minfo->xfer_alloc_table)) + xfer = XFER_FIND(minfo->pending_xfers, xfer_id); + + return xfer ?: ERR_PTR(-EINVAL); +} + +/** + * scmi_msg_response_validate - Validate message type against state of related + * xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_type: Message type to check + * @xfer: A reference to the xfer to validate against @msg_type + * + * This function checks if @msg_type is congruent with the current state of + * a pending @xfer; if an asynchronous delayed response is received before the + * related synchronous response (Out-of-Order Delayed Response) the missing + * synchronous response is assumed to be OK and completed, carrying on with the + * Delayed Response: this is done to address the case in which the underlying + * SCMI transport can deliver such out-of-order responses. + * + * Context: Assumes to be called with xfer->lock already acquired. + * + * Return: 0 on Success, error otherwise + */ +static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo, + u8 msg_type, + struct scmi_xfer *xfer) +{ /* - * Keep the locked section as small as possible - * NOTE: we might escape with smp_mb and no lock here.. - * but just be conservative and symmetric. + * Even if a response was indeed expected on this slot at this point, + * a buggy platform could wrongly reply feeding us an unexpected + * delayed response we're not prepared to handle: bail-out safely + * blaming firmware. */ + if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) { + dev_err(cinfo->dev, + "Delayed Response for %d not expected! Buggy F/W ?\n", + xfer->hdr.seq); + return -EINVAL; + } + + switch (xfer->state) { + case SCMI_XFER_SENT_OK: + if (msg_type == MSG_TYPE_DELAYED_RESP) { + /* + * Delayed Response expected but delivered earlier. + * Assume message RESPONSE was OK and skip state. + */ + xfer->hdr.status = SCMI_SUCCESS; + xfer->state = SCMI_XFER_RESP_OK; + complete(&xfer->done); + dev_warn(cinfo->dev, + "Received valid OoO Delayed Response for %d\n", + xfer->hdr.seq); + } + break; + case SCMI_XFER_RESP_OK: + if (msg_type != MSG_TYPE_DELAYED_RESP) + return -EINVAL; + break; + case SCMI_XFER_DRESP_OK: + /* No further message expected once in SCMI_XFER_DRESP_OK */ + return -EINVAL; + } + + return 0; +} + +/** + * scmi_xfer_state_update - Update xfer state + * + * @xfer: A reference to the xfer to update + * @msg_type: Type of message being processed. + * + * Note that this message is assumed to have been already successfully validated + * by @scmi_msg_response_validate(), so here we just update the state. + * + * Context: Assumes to be called on an xfer exclusively acquired using the + * busy flag. + */ +static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type) +{ + xfer->hdr.type = msg_type; + + /* Unknown command types were already discarded earlier */ + if (xfer->hdr.type == MSG_TYPE_COMMAND) + xfer->state = SCMI_XFER_RESP_OK; + else + xfer->state = SCMI_XFER_DRESP_OK; +} + +static bool scmi_xfer_acquired(struct scmi_xfer *xfer) +{ + int ret; + + ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY); + + return ret == SCMI_XFER_FREE; +} + +/** + * scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer + * + * @cinfo: A reference to the channel descriptor. + * @msg_hdr: A message header to use as lookup key + * + * When a valid xfer is found for the sequence number embedded in the provided + * msg_hdr, reference counting is properly updated and exclusive access to this + * xfer is granted till released with @scmi_xfer_command_release. + * + * Return: A valid @xfer on Success or error otherwise. + */ +static inline struct scmi_xfer * +scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) +{ + int ret; + unsigned long flags; + struct scmi_xfer *xfer; + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + struct scmi_xfers_info *minfo = &info->tx_minfo; + u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); + u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); + + /* Are we even expecting this? */ spin_lock_irqsave(&minfo->xfer_lock, flags); - clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table); + xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id); + if (IS_ERR(xfer)) { + dev_err(cinfo->dev, + "Message for %d type %d is not expected!\n", + xfer_id, msg_type); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + return xfer; + } + refcount_inc(&xfer->users); spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + spin_lock_irqsave(&xfer->lock, flags); + ret = scmi_msg_response_validate(cinfo, msg_type, xfer); + /* + * If a pending xfer was found which was also in a congruent state with + * the received message, acquire exclusive access to it setting the busy + * flag. + * Spins only on the rare limit condition of concurrent reception of + * RESP and DRESP for the same xfer. + */ + if (!ret) { + spin_until_cond(scmi_xfer_acquired(xfer)); + scmi_xfer_state_update(xfer, msg_type); + } + spin_unlock_irqrestore(&xfer->lock, flags); + + if (ret) { + dev_err(cinfo->dev, + "Invalid message type:%d for %d - HDR:0x%X state:%d\n", + msg_type, xfer_id, msg_hdr, xfer->state); + /* On error the refcount incremented above has to be dropped */ + __scmi_xfer_put(minfo, xfer); + xfer = ERR_PTR(-EINVAL); + } + + return xfer; +} + +static inline void scmi_xfer_command_release(struct scmi_info *info, + struct scmi_xfer *xfer) +{ + atomic_set(&xfer->busy, SCMI_XFER_FREE); + __scmi_xfer_put(&info->tx_minfo, xfer); +} + +static inline void scmi_clear_channel(struct scmi_info *info, + struct scmi_chan_info *cinfo) +{ + if (info->desc->ops->clear_channel) + info->desc->ops->clear_channel(cinfo); } -static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) +static void scmi_handle_notification(struct scmi_chan_info *cinfo, + u32 msg_hdr, void *priv) { struct scmi_xfer *xfer; struct device *dev = cinfo->dev; @@ -279,16 +619,17 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) ktime_t ts; ts = ktime_get_boottime(); - xfer = scmi_xfer_get(cinfo->handle, minfo); + xfer = scmi_xfer_get(cinfo->handle, minfo, false); if (IS_ERR(xfer)) { dev_err(dev, "failed to get free message slot (%ld)\n", PTR_ERR(xfer)); - info->desc->ops->clear_channel(cinfo); + scmi_clear_channel(info, cinfo); return; } unpack_scmi_header(msg_hdr, &xfer->hdr); - scmi_dump_header_dbg(dev, &xfer->hdr); + if (priv) + xfer->priv = priv; info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, @@ -300,59 +641,41 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr) __scmi_xfer_put(minfo, xfer); - info->desc->ops->clear_channel(cinfo); + scmi_clear_channel(info, cinfo); } static void scmi_handle_response(struct scmi_chan_info *cinfo, - u16 xfer_id, u8 msg_type) + u32 msg_hdr, void *priv) { struct scmi_xfer *xfer; - struct device *dev = cinfo->dev; struct scmi_info *info = handle_to_scmi_info(cinfo->handle); - struct scmi_xfers_info *minfo = &info->tx_minfo; - /* Are we even expecting this? */ - if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { - dev_err(dev, "message for %d is not expected!\n", xfer_id); - info->desc->ops->clear_channel(cinfo); - return; - } - - xfer = &minfo->xfer_block[xfer_id]; - /* - * Even if a response was indeed expected on this slot at this point, - * a buggy platform could wrongly reply feeding us an unexpected - * delayed response we're not prepared to handle: bail-out safely - * blaming firmware. - */ - if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) { - dev_err(dev, - "Delayed Response for %d not expected! Buggy F/W ?\n", - xfer_id); - info->desc->ops->clear_channel(cinfo); - /* It was unexpected, so nobody will clear the xfer if not us */ - __scmi_xfer_put(minfo, xfer); + xfer = scmi_xfer_command_acquire(cinfo, msg_hdr); + if (IS_ERR(xfer)) { + scmi_clear_channel(info, cinfo); return; } /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */ - if (msg_type == MSG_TYPE_DELAYED_RESP) + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) xfer->rx.len = info->desc->max_msg_size; - scmi_dump_header_dbg(dev, &xfer->hdr); - + if (priv) + xfer->priv = priv; info->desc->ops->fetch_response(cinfo, xfer); trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, - msg_type); + xfer->hdr.type); - if (msg_type == MSG_TYPE_DELAYED_RESP) { - info->desc->ops->clear_channel(cinfo); + if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { + scmi_clear_channel(info, cinfo); complete(xfer->async_done); } else { complete(&xfer->done); } + + scmi_xfer_command_release(info, xfer); } /** @@ -360,6 +683,7 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * * @cinfo: SCMI channel info * @msg_hdr: Message header + * @priv: Transport specific private data. * * Processes one received message to appropriate transfer information and * signals completion of the transfer. @@ -367,18 +691,17 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, * NOTE: This function will be invoked in IRQ context, hence should be * as optimal as possible. */ -void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) +void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr, void *priv) { - u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr); u8 msg_type = MSG_XTRACT_TYPE(msg_hdr); switch (msg_type) { case MSG_TYPE_NOTIFICATION: - scmi_handle_notification(cinfo, msg_hdr); + scmi_handle_notification(cinfo, msg_hdr, priv); break; case MSG_TYPE_COMMAND: case MSG_TYPE_DELAYED_RESP: - scmi_handle_response(cinfo, xfer_id, msg_type); + scmi_handle_response(cinfo, msg_hdr, priv); break; default: WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type); @@ -390,7 +713,7 @@ void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr) * xfer_put() - Release a transmit message * * @ph: Pointer to SCMI protocol handle - * @xfer: message that was reserved by scmi_xfer_get + * @xfer: message that was reserved by xfer_get_init */ static void xfer_put(const struct scmi_protocol_handle *ph, struct scmi_xfer *xfer) @@ -408,7 +731,12 @@ static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo, { struct scmi_info *info = handle_to_scmi_info(cinfo->handle); + /* + * Poll also on xfer->done so that polling can be forcibly terminated + * in case of out-of-order receptions of delayed responses + */ return info->desc->ops->poll_done(cinfo, xfer) || + try_wait_for_completion(&xfer->done) || ktime_after(ktime_get(), stop); } @@ -432,6 +760,12 @@ static int do_xfer(const struct scmi_protocol_handle *ph, struct device *dev = info->dev; struct scmi_chan_info *cinfo; + if (xfer->hdr.poll_completion && !info->desc->ops->poll_done) { + dev_warn_once(dev, + "Polling mode is not supported by transport.\n"); + return -EINVAL; + } + /* * Initialise protocol id now from protocol handle to avoid it being * overridden by mistake (or malice) by the protocol code mangling with @@ -448,6 +782,16 @@ static int do_xfer(const struct scmi_protocol_handle *ph, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.poll_completion); + xfer->state = SCMI_XFER_SENT_OK; + /* + * Even though spinlocking is not needed here since no race is possible + * on xfer->state due to the monotonically increasing tokens allocation, + * we must anyway ensure xfer->state initialization is not re-ordered + * after the .send_message() to be sure that on the RX path an early + * ISR calling scmi_rx_callback() cannot see an old stale xfer->state. + */ + smp_mb(); + ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); @@ -458,11 +802,22 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS); spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); - - if (ktime_before(ktime_get(), stop)) - info->desc->ops->fetch_response(cinfo, xfer); - else + if (ktime_before(ktime_get(), stop)) { + unsigned long flags; + + /* + * Do not fetch_response if an out-of-order delayed + * response is being processed. + */ + spin_lock_irqsave(&xfer->lock, flags); + if (xfer->state == SCMI_XFER_SENT_OK) { + info->desc->ops->fetch_response(cinfo, xfer); + xfer->state = SCMI_XFER_RESP_OK; + } + spin_unlock_irqrestore(&xfer->lock, flags); + } else { ret = -ETIMEDOUT; + } } else { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); @@ -557,7 +912,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph, tx_size > info->desc->max_msg_size) return -ERANGE; - xfer = scmi_xfer_get(pi->handle, minfo); + xfer = scmi_xfer_get(pi->handle, minfo, true); if (IS_ERR(xfer)) { ret = PTR_ERR(xfer); dev_err(dev, "failed to get free message slot(%d)\n", ret); @@ -566,6 +921,7 @@ static int xfer_get_init(const struct scmi_protocol_handle *ph, xfer->tx.len = tx_size; xfer->rx.len = rx_size ? : info->desc->max_msg_size; + xfer->hdr.type = MSG_TYPE_COMMAND; xfer->hdr.id = msg_id; xfer->hdr.poll_completion = false; @@ -1026,25 +1382,32 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc; /* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) { dev_err(dev, "Invalid maximum messages %d, not in range [1 - %lu]\n", - desc->max_msg, MSG_TOKEN_MAX); + info->max_msg, MSG_TOKEN_MAX); return -EINVAL; } - info->xfer_block = devm_kcalloc(dev, desc->max_msg, - sizeof(*info->xfer_block), GFP_KERNEL); - if (!info->xfer_block) - return -ENOMEM; + hash_init(info->pending_xfers); - info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg), + /* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */ + info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(MSG_TOKEN_MAX), sizeof(long), GFP_KERNEL); if (!info->xfer_alloc_table) return -ENOMEM; - /* Pre-initialize the buffer pointer to pre-allocated buffers */ - for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) { + /* + * Preallocate a number of xfers equal to max inflight messages, + * pre-initialize the buffer pointer to pre-allocated buffers and + * attach all of them to the free list + */ + INIT_HLIST_HEAD(&info->free_xfers); + for (i = 0; i < info->max_msg; i++) { + xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL); + if (!xfer) + return -ENOMEM; + xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size, GFP_KERNEL); if (!xfer->rx.buf) @@ -1052,6 +1415,10 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, xfer->tx.buf = xfer->rx.buf; init_completion(&xfer->done); + spin_lock_init(&xfer->lock); + + /* Add initialized xfer to the free list */ + hlist_add_head(&xfer->node, &info->free_xfers); } spin_lock_init(&info->xfer_lock); @@ -1059,10 +1426,40 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, return 0; } +static int scmi_channels_max_msg_configure(struct scmi_info *sinfo) +{ + const struct scmi_desc *desc = sinfo->desc; + + if (!desc->ops->get_max_msg) { + sinfo->tx_minfo.max_msg = desc->max_msg; + sinfo->rx_minfo.max_msg = desc->max_msg; + } else { + struct scmi_chan_info *base_cinfo; + + base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE); + if (!base_cinfo) + return -EINVAL; + sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo); + + /* RX channel is optional so can be skipped */ + base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE); + if (base_cinfo) + sinfo->rx_minfo.max_msg = + desc->ops->get_max_msg(base_cinfo); + } + + return 0; +} + static int scmi_xfer_info_init(struct scmi_info *sinfo) { - int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); + int ret; + + ret = scmi_channels_max_msg_configure(sinfo); + if (ret) + return ret; + ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo); if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE)) ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo); @@ -1390,6 +1787,21 @@ void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table) mutex_unlock(&scmi_requested_devices_mtx); } +static int scmi_cleanup_txrx_channels(struct scmi_info *info) +{ + int ret; + struct idr *idr = &info->tx_idr; + + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->tx_idr); + + idr = &info->rx_idr; + ret = idr_for_each(idr, info->desc->ops->chan_free, idr); + idr_destroy(&info->rx_idr); + + return ret; +} + static int scmi_probe(struct platform_device *pdev) { int ret; @@ -1424,13 +1836,19 @@ static int scmi_probe(struct platform_device *pdev) handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; + if (desc->ops->link_supplier) { + ret = desc->ops->link_supplier(dev); + if (ret) + return ret; + } + ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE); if (ret) return ret; ret = scmi_xfer_info_init(info); if (ret) - return ret; + goto clear_txrx_setup; if (scmi_notification_init(handle)) dev_err(dev, "SCMI Notifications NOT available.\n"); @@ -1443,7 +1861,7 @@ static int scmi_probe(struct platform_device *pdev) ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE); if (ret) { dev_err(dev, "unable to communicate with SCMI\n"); - return ret; + goto notification_exit; } mutex_lock(&scmi_list_mutex); @@ -1482,6 +1900,12 @@ static int scmi_probe(struct platform_device *pdev) } return 0; + +notification_exit: + scmi_notification_exit(&info->handle); +clear_txrx_setup: + scmi_cleanup_txrx_channels(info); + return ret; } void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) @@ -1493,7 +1917,6 @@ static int scmi_remove(struct platform_device *pdev) { int ret = 0, id; struct scmi_info *info = platform_get_drvdata(pdev); - struct idr *idr = &info->tx_idr; struct device_node *child; mutex_lock(&scmi_list_mutex); @@ -1517,14 +1940,7 @@ static int scmi_remove(struct platform_device *pdev) idr_destroy(&info->active_protocols); /* Safe to free channels since no more users */ - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->tx_idr); - - idr = &info->rx_idr; - ret = idr_for_each(idr, info->desc->ops->chan_free, idr); - idr_destroy(&info->rx_idr); - - return ret; + return scmi_cleanup_txrx_channels(info); } static ssize_t protocol_version_show(struct device *dev, @@ -1575,12 +1991,15 @@ ATTRIBUTE_GROUPS(versions); /* Each compatible listed below must have descriptor associated with it */ static const struct of_device_id scmi_of_match[] = { -#ifdef CONFIG_MAILBOX +#ifdef CONFIG_ARM_SCMI_TRANSPORT_MAILBOX { .compatible = "arm,scmi", .data = &scmi_mailbox_desc }, #endif -#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY +#ifdef CONFIG_ARM_SCMI_TRANSPORT_SMC { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc}, #endif +#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO + { .compatible = "arm,scmi-virtio", .data = &scmi_virtio_desc}, +#endif { /* Sentinel */ }, }; @@ -1596,10 +2015,69 @@ static struct platform_driver scmi_driver = { .remove = scmi_remove, }; +/** + * __scmi_transports_setup - Common helper to call transport-specific + * .init/.exit code if provided. + * + * @init: A flag to distinguish between init and exit. + * + * Note that, if provided, we invoke .init/.exit functions for all the + * transports currently compiled in. + * + * Return: 0 on Success. + */ +static inline int __scmi_transports_setup(bool init) +{ + int ret = 0; + const struct of_device_id *trans; + + for (trans = scmi_of_match; trans->data; trans++) { + const struct scmi_desc *tdesc = trans->data; + + if ((init && !tdesc->transport_init) || + (!init && !tdesc->transport_exit)) + continue; + + if (init) + ret = tdesc->transport_init(); + else + tdesc->transport_exit(); + + if (ret) { + pr_err("SCMI transport %s FAILED initialization!\n", + trans->compatible); + break; + } + } + + return ret; +} + +static int __init scmi_transports_init(void) +{ + return __scmi_transports_setup(true); +} + +static void __exit scmi_transports_exit(void) +{ + __scmi_transports_setup(false); +} + static int __init scmi_driver_init(void) { + int ret; + + /* Bail out if no SCMI transport was configured */ + if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT))) + return -EINVAL; + scmi_bus_init(); + /* Initialize any compiled-in transport which provided an init/exit */ + ret = scmi_transports_init(); + if (ret) + return ret; + scmi_base_register(); scmi_clock_register(); @@ -1628,6 +2106,8 @@ static void __exit scmi_driver_exit(void) scmi_bus_exit(); + scmi_transports_exit(); + platform_driver_unregister(&scmi_driver); } module_exit(scmi_driver_exit); diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index e3dcb58314ae..e09eb12bf421 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -43,7 +43,7 @@ static void rx_callback(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem)); + scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL); } static bool mailbox_chan_available(struct device *dev, int idx) diff --git a/drivers/firmware/arm_scmi/msg.c b/drivers/firmware/arm_scmi/msg.c new file mode 100644 index 000000000000..d33a704e5814 --- /dev/null +++ b/drivers/firmware/arm_scmi/msg.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * For transports using message passing. + * + * Derived from shm.c. + * + * Copyright (C) 2019-2021 ARM Ltd. + * Copyright (C) 2020-2021 OpenSynergy GmbH + */ + +#include <linux/types.h> + +#include "common.h" + +/* + * struct scmi_msg_payld - Transport SDU layout + * + * The SCMI specification requires all parameters, message headers, return + * arguments or any protocol data to be expressed in little endian format only. + */ +struct scmi_msg_payld { + __le32 msg_header; + __le32 msg_payload[]; +}; + +/** + * msg_command_size() - Actual size of transport SDU for command. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_command_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + xfer->tx.len; +} + +/** + * msg_response_size() - Maximum size of transport SDU for response. + * + * @xfer: message which core has prepared for sending + * + * Return: transport SDU size. + */ +size_t msg_response_size(struct scmi_xfer *xfer) +{ + return sizeof(struct scmi_msg_payld) + sizeof(__le32) + xfer->rx.len; +} + +/** + * msg_tx_prepare() - Set up transport SDU for command. + * + * @msg: transport SDU for command + * @xfer: message which is being sent + */ +void msg_tx_prepare(struct scmi_msg_payld *msg, struct scmi_xfer *xfer) +{ + msg->msg_header = cpu_to_le32(pack_scmi_header(&xfer->hdr)); + if (xfer->tx.buf) + memcpy(msg->msg_payload, xfer->tx.buf, xfer->tx.len); +} + +/** + * msg_read_header() - Read SCMI header from transport SDU. + * + * @msg: transport SDU + * + * Return: SCMI header + */ +u32 msg_read_header(struct scmi_msg_payld *msg) +{ + return le32_to_cpu(msg->msg_header); +} + +/** + * msg_fetch_response() - Fetch response SCMI payload from transport SDU. + * + * @msg: transport SDU with response + * @len: transport SDU size + * @xfer: message being responded to + */ +void msg_fetch_response(struct scmi_msg_payld *msg, size_t len, + struct scmi_xfer *xfer) +{ + size_t prefix_len = sizeof(*msg) + sizeof(msg->msg_payload[0]); + + xfer->hdr.status = le32_to_cpu(msg->msg_payload[0]); + xfer->rx.len = min_t(size_t, xfer->rx.len, + len >= prefix_len ? len - prefix_len : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, &msg->msg_payload[1], xfer->rx.len); +} + +/** + * msg_fetch_notification() - Fetch notification payload from transport SDU. + * + * @msg: transport SDU with notification + * @len: transport SDU size + * @max_len: maximum SCMI payload size to fetch + * @xfer: notification message + */ +void msg_fetch_notification(struct scmi_msg_payld *msg, size_t len, + size_t max_len, struct scmi_xfer *xfer) +{ + xfer->rx.len = min_t(size_t, max_len, + len >= sizeof(*msg) ? len - sizeof(*msg) : 0); + + /* Take a copy to the rx buffer.. */ + memcpy(xfer->rx.buf, msg->msg_payload, xfer->rx.len); +} diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index bed5596c7209..4effecc3bb46 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -154,7 +154,8 @@ static int smc_send_message(struct scmi_chan_info *cinfo, if (scmi_info->irq) wait_for_completion(&scmi_info->tx_complete); - scmi_rx_callback(scmi_info->cinfo, shmem_read_header(scmi_info->shmem)); + scmi_rx_callback(scmi_info->cinfo, + shmem_read_header(scmi_info->shmem), NULL); mutex_unlock(&scmi_info->shmem_lock); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c new file mode 100644 index 000000000000..224577f86928 --- /dev/null +++ b/drivers/firmware/arm_scmi/virtio.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Virtio Transport driver for Arm System Control and Management Interface + * (SCMI). + * + * Copyright (C) 2020-2021 OpenSynergy. + * Copyright (C) 2021 ARM Ltd. + */ + +/** + * DOC: Theory of Operation + * + * The scmi-virtio transport implements a driver for the virtio SCMI device. + * + * There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx + * channel (virtio eventq, P2A channel). Each channel is implemented through a + * virtqueue. Access to each virtqueue is protected by spinlocks. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> + +#include <uapi/linux/virtio_ids.h> +#include <uapi/linux/virtio_scmi.h> + +#include "common.h" + +#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */ +#define VIRTIO_SCMI_MAX_PDU_SIZE \ + (VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD) +#define DESCRIPTORS_PER_TX_MSG 2 + +/** + * struct scmi_vio_channel - Transport channel information + * + * @vqueue: Associated virtqueue + * @cinfo: SCMI Tx or Rx channel + * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only + * @is_rx: Whether channel is an Rx channel + * @ready: Whether transport user is ready to hear about channel + * @max_msg: Maximum number of pending messages for this channel. + * @lock: Protects access to all members except ready. + * @ready_lock: Protects access to ready. If required, it must be taken before + * lock. + */ +struct scmi_vio_channel { + struct virtqueue *vqueue; + struct scmi_chan_info *cinfo; + struct list_head free_list; + bool is_rx; + bool ready; + unsigned int max_msg; + /* lock to protect access to all members except ready. */ + spinlock_t lock; + /* lock to rotects access to ready flag. */ + spinlock_t ready_lock; +}; + +/** + * struct scmi_vio_msg - Transport PDU information + * + * @request: SDU used for commands + * @input: SDU used for (delayed) responses and notifications + * @list: List which scmi_vio_msg may be part of + * @rx_len: Input SDU size in bytes, once input has been received + */ +struct scmi_vio_msg { + struct scmi_msg_payld *request; + struct scmi_msg_payld *input; + struct list_head list; + unsigned int rx_len; +}; + +/* Only one SCMI VirtIO device can possibly exist */ +static struct virtio_device *scmi_vdev; + +static bool scmi_vio_have_vq_rx(struct virtio_device *vdev) +{ + return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS); +} + +static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + struct scatterlist sg_in; + int rc; + unsigned long flags; + + sg_init_one(&sg_in, msg->input, VIRTIO_SCMI_MAX_PDU_SIZE); + + spin_lock_irqsave(&vioch->lock, flags); + + rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC); + if (rc) + dev_err_once(vioch->cinfo->dev, + "failed to add to virtqueue (%d)\n", rc); + else + virtqueue_kick(vioch->vqueue); + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +static void scmi_finalize_message(struct scmi_vio_channel *vioch, + struct scmi_vio_msg *msg) +{ + if (vioch->is_rx) { + scmi_vio_feed_vq_rx(vioch, msg); + } else { + unsigned long flags; + + spin_lock_irqsave(&vioch->lock, flags); + list_add(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->lock, flags); + } +} + +static void scmi_vio_complete_cb(struct virtqueue *vqueue) +{ + unsigned long ready_flags; + unsigned long flags; + unsigned int length; + struct scmi_vio_channel *vioch; + struct scmi_vio_msg *msg; + bool cb_enabled = true; + + if (WARN_ON_ONCE(!vqueue->vdev->priv)) + return; + vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index]; + + for (;;) { + spin_lock_irqsave(&vioch->ready_lock, ready_flags); + + if (!vioch->ready) { + if (!cb_enabled) + (void)virtqueue_enable_cb(vqueue); + goto unlock_ready_out; + } + + spin_lock_irqsave(&vioch->lock, flags); + if (cb_enabled) { + virtqueue_disable_cb(vqueue); + cb_enabled = false; + } + msg = virtqueue_get_buf(vqueue, &length); + if (!msg) { + if (virtqueue_enable_cb(vqueue)) + goto unlock_out; + cb_enabled = true; + } + spin_unlock_irqrestore(&vioch->lock, flags); + + if (msg) { + msg->rx_len = length; + scmi_rx_callback(vioch->cinfo, + msg_read_header(msg->input), msg); + + scmi_finalize_message(vioch, msg); + } + + spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); + } + +unlock_out: + spin_unlock_irqrestore(&vioch->lock, flags); +unlock_ready_out: + spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); +} + +static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" }; + +static vq_callback_t *scmi_vio_complete_callbacks[] = { + scmi_vio_complete_cb, + scmi_vio_complete_cb +}; + +static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo) +{ + struct scmi_vio_channel *vioch = base_cinfo->transport_info; + + return vioch->max_msg; +} + +static int virtio_link_supplier(struct device *dev) +{ + if (!scmi_vdev) { + dev_notice_once(dev, + "Deferring probe after not finding a bound scmi-virtio device\n"); + return -EPROBE_DEFER; + } + + if (!device_link_add(dev, &scmi_vdev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER)) { + dev_err(dev, "Adding link to supplier virtio device failed\n"); + return -ECANCELED; + } + + return 0; +} + +static bool virtio_chan_available(struct device *dev, int idx) +{ + struct scmi_vio_channel *channels, *vioch = NULL; + + if (WARN_ON_ONCE(!scmi_vdev)) + return false; + + channels = (struct scmi_vio_channel *)scmi_vdev->priv; + + switch (idx) { + case VIRTIO_SCMI_VQ_TX: + vioch = &channels[VIRTIO_SCMI_VQ_TX]; + break; + case VIRTIO_SCMI_VQ_RX: + if (scmi_vio_have_vq_rx(scmi_vdev)) + vioch = &channels[VIRTIO_SCMI_VQ_RX]; + break; + default: + return false; + } + + return vioch && !vioch->cinfo; +} + +static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, + bool tx) +{ + unsigned long flags; + struct scmi_vio_channel *vioch; + int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX; + int i; + + if (!scmi_vdev) + return -EPROBE_DEFER; + + vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index]; + + for (i = 0; i < vioch->max_msg; i++) { + struct scmi_vio_msg *msg; + + msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (tx) { + msg->request = devm_kzalloc(cinfo->dev, + VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->request) + return -ENOMEM; + } + + msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, + GFP_KERNEL); + if (!msg->input) + return -ENOMEM; + + if (tx) { + spin_lock_irqsave(&vioch->lock, flags); + list_add_tail(&msg->list, &vioch->free_list); + spin_unlock_irqrestore(&vioch->lock, flags); + } else { + scmi_vio_feed_vq_rx(vioch, msg); + } + } + + spin_lock_irqsave(&vioch->lock, flags); + cinfo->transport_info = vioch; + /* Indirectly setting channel not available any more */ + vioch->cinfo = cinfo; + spin_unlock_irqrestore(&vioch->lock, flags); + + spin_lock_irqsave(&vioch->ready_lock, flags); + vioch->ready = true; + spin_unlock_irqrestore(&vioch->ready_lock, flags); + + return 0; +} + +static int virtio_chan_free(int id, void *p, void *data) +{ + unsigned long flags; + struct scmi_chan_info *cinfo = p; + struct scmi_vio_channel *vioch = cinfo->transport_info; + + spin_lock_irqsave(&vioch->ready_lock, flags); + vioch->ready = false; + spin_unlock_irqrestore(&vioch->ready_lock, flags); + + scmi_free_channel(cinfo, data, id); + + spin_lock_irqsave(&vioch->lock, flags); + vioch->cinfo = NULL; + spin_unlock_irqrestore(&vioch->lock, flags); + + return 0; +} + +static int virtio_send_message(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_channel *vioch = cinfo->transport_info; + struct scatterlist sg_out; + struct scatterlist sg_in; + struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in }; + unsigned long flags; + int rc; + struct scmi_vio_msg *msg; + + spin_lock_irqsave(&vioch->lock, flags); + + if (list_empty(&vioch->free_list)) { + spin_unlock_irqrestore(&vioch->lock, flags); + return -EBUSY; + } + + msg = list_first_entry(&vioch->free_list, typeof(*msg), list); + list_del(&msg->list); + + msg_tx_prepare(msg->request, xfer); + + sg_init_one(&sg_out, msg->request, msg_command_size(xfer)); + sg_init_one(&sg_in, msg->input, msg_response_size(xfer)); + + rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC); + if (rc) { + list_add(&msg->list, &vioch->free_list); + dev_err_once(vioch->cinfo->dev, + "%s() failed to add to virtqueue (%d)\n", __func__, + rc); + } else { + virtqueue_kick(vioch->vqueue); + } + + spin_unlock_irqrestore(&vioch->lock, flags); + + return rc; +} + +static void virtio_fetch_response(struct scmi_chan_info *cinfo, + struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) { + msg_fetch_response(msg->input, msg->rx_len, xfer); + xfer->priv = NULL; + } +} + +static void virtio_fetch_notification(struct scmi_chan_info *cinfo, + size_t max_len, struct scmi_xfer *xfer) +{ + struct scmi_vio_msg *msg = xfer->priv; + + if (msg) { + msg_fetch_notification(msg->input, msg->rx_len, max_len, xfer); + xfer->priv = NULL; + } +} + +static const struct scmi_transport_ops scmi_virtio_ops = { + .link_supplier = virtio_link_supplier, + .chan_available = virtio_chan_available, + .chan_setup = virtio_chan_setup, + .chan_free = virtio_chan_free, + .get_max_msg = virtio_get_max_msg, + .send_message = virtio_send_message, + .fetch_response = virtio_fetch_response, + .fetch_notification = virtio_fetch_notification, +}; + +static int scmi_vio_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct scmi_vio_channel *channels; + bool have_vq_rx; + int vq_cnt; + int i; + int ret; + struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; + + /* Only one SCMI VirtiO device allowed */ + if (scmi_vdev) + return -EINVAL; + + have_vq_rx = scmi_vio_have_vq_rx(vdev); + vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; + + channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL); + if (!channels) + return -ENOMEM; + + if (have_vq_rx) + channels[VIRTIO_SCMI_VQ_RX].is_rx = true; + + ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_complete_callbacks, + scmi_vio_vqueue_names, NULL); + if (ret) { + dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt); + return ret; + } + + for (i = 0; i < vq_cnt; i++) { + unsigned int sz; + + spin_lock_init(&channels[i].lock); + spin_lock_init(&channels[i].ready_lock); + INIT_LIST_HEAD(&channels[i].free_list); + channels[i].vqueue = vqs[i]; + + sz = virtqueue_get_vring_size(channels[i].vqueue); + /* Tx messages need multiple descriptors. */ + if (!channels[i].is_rx) + sz /= DESCRIPTORS_PER_TX_MSG; + + if (sz > MSG_TOKEN_MAX) { + dev_info_once(dev, + "%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n", + channels[i].is_rx ? "rx" : "tx", + sz, MSG_TOKEN_MAX); + sz = MSG_TOKEN_MAX; + } + channels[i].max_msg = sz; + } + + vdev->priv = channels; + scmi_vdev = vdev; + + return 0; +} + +static void scmi_vio_remove(struct virtio_device *vdev) +{ + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + scmi_vdev = NULL; +} + +static int scmi_vio_validate(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, + "device does not comply with spec version 1.x\n"); + return -EINVAL; + } + + return 0; +} + +static unsigned int features[] = { + VIRTIO_SCMI_F_P2A_CHANNELS, +}; + +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID }, + { 0 } +}; + +static struct virtio_driver virtio_scmi_driver = { + .driver.name = "scmi-virtio", + .driver.owner = THIS_MODULE, + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .id_table = id_table, + .probe = scmi_vio_probe, + .remove = scmi_vio_remove, + .validate = scmi_vio_validate, +}; + +static int __init virtio_scmi_init(void) +{ + return register_virtio_driver(&virtio_scmi_driver); +} + +static void __exit virtio_scmi_exit(void) +{ + unregister_virtio_driver(&virtio_scmi_driver); +} + +const struct scmi_desc scmi_virtio_desc = { + .transport_init = virtio_scmi_init, + .transport_exit = virtio_scmi_exit, + .ops = &scmi_virtio_ops, + .max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */ + .max_msg = 0, /* overridden by virtio_get_max_msg() */ + .max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE, +}; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 47ea2bd42b10..ced1964faf42 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -71,7 +71,7 @@ static struct qcom_scm_wb_entry qcom_scm_wb[] = { { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, }; -static const char *qcom_scm_convention_names[] = { +static const char * const qcom_scm_convention_names[] = { [SMC_CONVENTION_UNKNOWN] = "unknown", [SMC_CONVENTION_ARM_32] = "smc arm 32", [SMC_CONVENTION_ARM_64] = "smc arm 64", @@ -331,7 +331,7 @@ int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) .owner = ARM_SMCCC_OWNER_SIP, }; - if (!cpus || (cpus && cpumask_empty(cpus))) + if (!cpus || cpumask_empty(cpus)) return -EINVAL; for_each_cpu(cpu, cpus) { @@ -1299,6 +1299,7 @@ static const struct of_device_id qcom_scm_dt_match[] = { { .compatible = "qcom,scm" }, {} }; +MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); static struct platform_driver qcom_scm_driver = { .driver = { @@ -1315,3 +1316,6 @@ static int __init qcom_scm_init(void) return platform_driver_register(&qcom_scm_driver); } subsys_initcall(qcom_scm_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c index 440d99c63638..3e9fa4b54358 100644 --- a/drivers/firmware/tegra/bpmp-debugfs.c +++ b/drivers/firmware/tegra/bpmp-debugfs.c @@ -296,25 +296,61 @@ static int bpmp_debug_show(struct seq_file *m, void *p) struct file *file = m->private; struct inode *inode = file_inode(file); struct tegra_bpmp *bpmp = inode->i_private; - char *databuf = NULL; char fnamebuf[256]; const char *filename; - uint32_t nbytes = 0; - size_t len; - int err; - - len = seq_get_buf(m, &databuf); - if (!databuf) - return -ENOMEM; + struct mrq_debug_request req = { + .cmd = cpu_to_le32(CMD_DEBUG_READ), + }; + struct mrq_debug_response resp; + struct tegra_bpmp_message msg = { + .mrq = MRQ_DEBUG, + .tx = { + .data = &req, + .size = sizeof(req), + }, + .rx = { + .data = &resp, + .size = sizeof(resp), + }, + }; + uint32_t fd = 0, len = 0; + int remaining, err; filename = get_filename(bpmp, file, fnamebuf, sizeof(fnamebuf)); if (!filename) return -ENOENT; - err = mrq_debug_read(bpmp, filename, databuf, len, &nbytes); - if (!err) - seq_commit(m, nbytes); + mutex_lock(&bpmp_debug_lock); + err = mrq_debug_open(bpmp, filename, &fd, &len, 0); + if (err) + goto out; + + req.frd.fd = fd; + remaining = len; + + while (remaining > 0) { + err = tegra_bpmp_transfer(bpmp, &msg); + if (err < 0) { + goto close; + } else if (msg.rx.ret < 0) { + err = -EINVAL; + goto close; + } + if (resp.frd.readlen > remaining) { + pr_err("%s: read data length invalid\n", __func__); + err = -EINVAL; + goto close; + } + + seq_write(m, resp.frd.data, resp.frd.readlen); + remaining -= resp.frd.readlen; + } + +close: + err = mrq_debug_close(bpmp, fd); +out: + mutex_unlock(&bpmp_debug_lock); return err; } diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 07b7c25cbed8..f61516c17589 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -253,6 +253,7 @@ config SPAPR_TCE_IOMMU config ARM_SMMU tristate "ARM Ltd. System MMU (SMMU) Support" depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM @@ -382,6 +383,7 @@ config QCOM_IOMMU # Note: iommu drivers cannot (yet?) be built as modules bool "Qualcomm IOMMU Support" depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on QCOM_SCM=y select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index f80c2ea39ca4..be0858bff4d3 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -9,6 +9,7 @@ * Copyright (C) 2009 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> */ +#include <linux/cpu_pm.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/init.h> @@ -232,7 +233,10 @@ struct gpmc_device { int irq; struct irq_chip irq_chip; struct gpio_chip gpio_chip; + struct notifier_block nb; + struct omap3_gpmc_regs context; int nirqs; + unsigned int is_suspended:1; }; static struct irq_domain *gpmc_irq_domain; @@ -2384,6 +2388,106 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc) return 0; } +static void omap3_gpmc_save_context(struct gpmc_device *gpmc) +{ + struct omap3_gpmc_regs *gpmc_context; + int i; + + if (!gpmc || !gpmc_base) + return; + + gpmc_context = &gpmc->context; + + gpmc_context->sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); + gpmc_context->irqenable = gpmc_read_reg(GPMC_IRQENABLE); + gpmc_context->timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); + gpmc_context->config = gpmc_read_reg(GPMC_CONFIG); + gpmc_context->prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); + gpmc_context->prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); + gpmc_context->prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); + for (i = 0; i < gpmc_cs_num; i++) { + gpmc_context->cs_context[i].is_valid = gpmc_cs_mem_enabled(i); + if (gpmc_context->cs_context[i].is_valid) { + gpmc_context->cs_context[i].config1 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); + gpmc_context->cs_context[i].config2 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); + gpmc_context->cs_context[i].config3 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); + gpmc_context->cs_context[i].config4 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); + gpmc_context->cs_context[i].config5 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); + gpmc_context->cs_context[i].config6 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); + gpmc_context->cs_context[i].config7 = + gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); + } + } +} + +static void omap3_gpmc_restore_context(struct gpmc_device *gpmc) +{ + struct omap3_gpmc_regs *gpmc_context; + int i; + + if (!gpmc || !gpmc_base) + return; + + gpmc_context = &gpmc->context; + + gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context->sysconfig); + gpmc_write_reg(GPMC_IRQENABLE, gpmc_context->irqenable); + gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context->timeout_ctrl); + gpmc_write_reg(GPMC_CONFIG, gpmc_context->config); + gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context->prefetch_config1); + gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context->prefetch_config2); + gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context->prefetch_control); + for (i = 0; i < gpmc_cs_num; i++) { + if (gpmc_context->cs_context[i].is_valid) { + gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, + gpmc_context->cs_context[i].config1); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, + gpmc_context->cs_context[i].config2); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, + gpmc_context->cs_context[i].config3); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, + gpmc_context->cs_context[i].config4); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, + gpmc_context->cs_context[i].config5); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, + gpmc_context->cs_context[i].config6); + gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, + gpmc_context->cs_context[i].config7); + } else { + gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, 0); + } + } +} + +static int omap_gpmc_context_notifier(struct notifier_block *nb, + unsigned long cmd, void *v) +{ + struct gpmc_device *gpmc; + + gpmc = container_of(nb, struct gpmc_device, nb); + if (gpmc->is_suspended || pm_runtime_suspended(gpmc->dev)) + return NOTIFY_OK; + + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + omap3_gpmc_save_context(gpmc); + break; + case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */ + break; + case CPU_CLUSTER_PM_EXIT: + omap3_gpmc_restore_context(gpmc); + break; + } + + return NOTIFY_OK; +} + static int gpmc_probe(struct platform_device *pdev) { int rc; @@ -2472,6 +2576,9 @@ static int gpmc_probe(struct platform_device *pdev) gpmc_probe_dt_children(pdev); + gpmc->nb.notifier_call = omap_gpmc_context_notifier; + cpu_pm_register_notifier(&gpmc->nb); + return 0; gpio_init_failed: @@ -2486,6 +2593,7 @@ static int gpmc_remove(struct platform_device *pdev) { struct gpmc_device *gpmc = platform_get_drvdata(pdev); + cpu_pm_unregister_notifier(&gpmc->nb); gpmc_free_irq(gpmc); gpmc_mem_exit(); pm_runtime_put_sync(&pdev->dev); @@ -2497,15 +2605,23 @@ static int gpmc_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int gpmc_suspend(struct device *dev) { - omap3_gpmc_save_context(); + struct gpmc_device *gpmc = dev_get_drvdata(dev); + + omap3_gpmc_save_context(gpmc); pm_runtime_put_sync(dev); + gpmc->is_suspended = 1; + return 0; } static int gpmc_resume(struct device *dev) { + struct gpmc_device *gpmc = dev_get_drvdata(dev); + pm_runtime_get_sync(dev); - omap3_gpmc_restore_context(); + omap3_gpmc_restore_context(gpmc); + gpmc->is_suspended = 0; + return 0; } #endif @@ -2527,74 +2643,3 @@ static __init int gpmc_init(void) return platform_driver_register(&gpmc_driver); } postcore_initcall(gpmc_init); - -static struct omap3_gpmc_regs gpmc_context; - -void omap3_gpmc_save_context(void) -{ - int i; - - if (!gpmc_base) - return; - - gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); - gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); - gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); - gpmc_context.config = gpmc_read_reg(GPMC_CONFIG); - gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1); - gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2); - gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL); - for (i = 0; i < gpmc_cs_num; i++) { - gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i); - if (gpmc_context.cs_context[i].is_valid) { - gpmc_context.cs_context[i].config1 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG1); - gpmc_context.cs_context[i].config2 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG2); - gpmc_context.cs_context[i].config3 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG3); - gpmc_context.cs_context[i].config4 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG4); - gpmc_context.cs_context[i].config5 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG5); - gpmc_context.cs_context[i].config6 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG6); - gpmc_context.cs_context[i].config7 = - gpmc_cs_read_reg(i, GPMC_CS_CONFIG7); - } - } -} - -void omap3_gpmc_restore_context(void) -{ - int i; - - if (!gpmc_base) - return; - - gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); - gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); - gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); - gpmc_write_reg(GPMC_CONFIG, gpmc_context.config); - gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1); - gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2); - gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control); - for (i = 0; i < gpmc_cs_num; i++) { - if (gpmc_context.cs_context[i].is_valid) { - gpmc_cs_write_reg(i, GPMC_CS_CONFIG1, - gpmc_context.cs_context[i].config1); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG2, - gpmc_context.cs_context[i].config2); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG3, - gpmc_context.cs_context[i].config3); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG4, - gpmc_context.cs_context[i].config4); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG5, - gpmc_context.cs_context[i].config5); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG6, - gpmc_context.cs_context[i].config6); - gpmc_cs_write_reg(i, GPMC_CS_CONFIG7, - gpmc_context.cs_context[i].config7); - } - } -} diff --git a/drivers/memory/tegra/tegra186.c b/drivers/memory/tegra/tegra186.c index e65eac5764d4..3d153881abc1 100644 --- a/drivers/memory/tegra/tegra186.c +++ b/drivers/memory/tegra/tegra186.c @@ -71,6 +71,7 @@ static int tegra186_mc_resume(struct tegra_mc *mc) return 0; } +#if IS_ENABLED(CONFIG_IOMMU_API) static void tegra186_mc_client_sid_override(struct tegra_mc *mc, const struct tegra_mc_client *client, unsigned int sid) @@ -108,6 +109,7 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc, writel(sid, mc->regs + client->regs.sid.override); } } +#endif static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev) { diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 40f91bc8514d..741289e385d5 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -44,6 +44,7 @@ config ATH10K_SNOC tristate "Qualcomm ath10k SNOC support" depends on ATH10K depends on ARCH_QCOM || COMPILE_TEST + depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y select QCOM_QMI_HELPERS help This module adds support for integrated WCN3990 chip connected diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 5656cac04b4c..be799a5abf8a 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -181,6 +181,13 @@ config RESET_RASPBERRYPI interfacing with RPi4's co-processor and model these firmware initialization routines as reset lines. +config RESET_RZG2L_USBPHY_CTRL + tristate "Renesas RZ/G2L USBPHY control driver" + depends on ARCH_R9A07G044 || COMPILE_TEST + help + Support for USBPHY Control found on RZ/G2L family. It mainly + controls reset and power down of the USB/PHY. + config RESET_SCMI tristate "Reset driver controlled via ARM SCMI interface" depends on ARM_SCMI_PROTOCOL || COMPILE_TEST @@ -207,7 +214,6 @@ config RESET_SIMPLE - Realtek SoCs - RCC reset controller in STM32 MCUs - Allwinner SoCs - - ZTE's zx2967 family - SiFive FU740 SoCs config RESET_SOCFPGA diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index ea8b8d9ca565..21d46d8869ff 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o +obj-$(CONFIG_RESET_RZG2L_USBPHY_CTRL) += reset-rzg2l-usbphy-ctrl.o obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o diff --git a/drivers/reset/reset-qcom-pdc.c b/drivers/reset/reset-qcom-pdc.c index ab74bccd4a5b..f22bb49a4ac8 100644 --- a/drivers/reset/reset-qcom-pdc.c +++ b/drivers/reset/reset-qcom-pdc.c @@ -11,18 +11,26 @@ #include <dt-bindings/reset/qcom,sdm845-pdc.h> -#define RPMH_PDC_SYNC_RESET 0x100 +#define RPMH_SDM845_PDC_SYNC_RESET 0x100 +#define RPMH_SC7280_PDC_SYNC_RESET 0x1000 struct qcom_pdc_reset_map { u8 bit; }; +struct qcom_pdc_reset_desc { + const struct qcom_pdc_reset_map *resets; + size_t num_resets; + unsigned int offset; +}; + struct qcom_pdc_reset_data { struct reset_controller_dev rcdev; struct regmap *regmap; + const struct qcom_pdc_reset_desc *desc; }; -static const struct regmap_config sdm845_pdc_regmap_config = { +static const struct regmap_config pdc_regmap_config = { .name = "pdc-reset", .reg_bits = 32, .reg_stride = 4, @@ -44,6 +52,33 @@ static const struct qcom_pdc_reset_map sdm845_pdc_resets[] = { [PDC_MODEM_SYNC_RESET] = {9}, }; +static const struct qcom_pdc_reset_desc sdm845_pdc_reset_desc = { + .resets = sdm845_pdc_resets, + .num_resets = ARRAY_SIZE(sdm845_pdc_resets), + .offset = RPMH_SDM845_PDC_SYNC_RESET, +}; + +static const struct qcom_pdc_reset_map sc7280_pdc_resets[] = { + [PDC_APPS_SYNC_RESET] = {0}, + [PDC_SP_SYNC_RESET] = {1}, + [PDC_AUDIO_SYNC_RESET] = {2}, + [PDC_SENSORS_SYNC_RESET] = {3}, + [PDC_AOP_SYNC_RESET] = {4}, + [PDC_DEBUG_SYNC_RESET] = {5}, + [PDC_GPU_SYNC_RESET] = {6}, + [PDC_DISPLAY_SYNC_RESET] = {7}, + [PDC_COMPUTE_SYNC_RESET] = {8}, + [PDC_MODEM_SYNC_RESET] = {9}, + [PDC_WLAN_RF_SYNC_RESET] = {10}, + [PDC_WPSS_SYNC_RESET] = {11}, +}; + +static const struct qcom_pdc_reset_desc sc7280_pdc_reset_desc = { + .resets = sc7280_pdc_resets, + .num_resets = ARRAY_SIZE(sc7280_pdc_resets), + .offset = RPMH_SC7280_PDC_SYNC_RESET, +}; + static inline struct qcom_pdc_reset_data *to_qcom_pdc_reset_data( struct reset_controller_dev *rcdev) { @@ -54,19 +89,18 @@ static int qcom_pdc_control_assert(struct reset_controller_dev *rcdev, unsigned long idx) { struct qcom_pdc_reset_data *data = to_qcom_pdc_reset_data(rcdev); + u32 mask = BIT(data->desc->resets[idx].bit); - return regmap_update_bits(data->regmap, RPMH_PDC_SYNC_RESET, - BIT(sdm845_pdc_resets[idx].bit), - BIT(sdm845_pdc_resets[idx].bit)); + return regmap_update_bits(data->regmap, data->desc->offset, mask, mask); } static int qcom_pdc_control_deassert(struct reset_controller_dev *rcdev, unsigned long idx) { struct qcom_pdc_reset_data *data = to_qcom_pdc_reset_data(rcdev); + u32 mask = BIT(data->desc->resets[idx].bit); - return regmap_update_bits(data->regmap, RPMH_PDC_SYNC_RESET, - BIT(sdm845_pdc_resets[idx].bit), 0); + return regmap_update_bits(data->regmap, data->desc->offset, mask, 0); } static const struct reset_control_ops qcom_pdc_reset_ops = { @@ -76,22 +110,27 @@ static const struct reset_control_ops qcom_pdc_reset_ops = { static int qcom_pdc_reset_probe(struct platform_device *pdev) { + const struct qcom_pdc_reset_desc *desc; struct qcom_pdc_reset_data *data; struct device *dev = &pdev->dev; void __iomem *base; struct resource *res; + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + data->desc = desc; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); - data->regmap = devm_regmap_init_mmio(dev, base, - &sdm845_pdc_regmap_config); + data->regmap = devm_regmap_init_mmio(dev, base, &pdc_regmap_config); if (IS_ERR(data->regmap)) { dev_err(dev, "Unable to initialize regmap\n"); return PTR_ERR(data->regmap); @@ -99,14 +138,15 @@ static int qcom_pdc_reset_probe(struct platform_device *pdev) data->rcdev.owner = THIS_MODULE; data->rcdev.ops = &qcom_pdc_reset_ops; - data->rcdev.nr_resets = ARRAY_SIZE(sdm845_pdc_resets); + data->rcdev.nr_resets = desc->num_resets; data->rcdev.of_node = dev->of_node; return devm_reset_controller_register(dev, &data->rcdev); } static const struct of_device_id qcom_pdc_reset_of_match[] = { - { .compatible = "qcom,sdm845-pdc-global" }, + { .compatible = "qcom,sc7280-pdc-global", .data = &sc7280_pdc_reset_desc }, + { .compatible = "qcom,sdm845-pdc-global", .data = &sdm845_pdc_reset_desc }, {} }; MODULE_DEVICE_TABLE(of, qcom_pdc_reset_of_match); diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c new file mode 100644 index 000000000000..e0704fd2b533 --- /dev/null +++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/G2L USBPHY control driver + * + * Copyright (C) 2021 Renesas Electronics Corporation + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/reset-controller.h> + +#define RESET 0x000 + +#define RESET_SEL_PLLRESET BIT(12) +#define RESET_PLLRESET BIT(8) + +#define RESET_SEL_P2RESET BIT(5) +#define RESET_SEL_P1RESET BIT(4) +#define RESET_PHYRST_2 BIT(1) +#define RESET_PHYRST_1 BIT(0) + +#define PHY_RESET_PORT2 (RESET_SEL_P2RESET | RESET_PHYRST_2) +#define PHY_RESET_PORT1 (RESET_SEL_P1RESET | RESET_PHYRST_1) + +#define NUM_PORTS 2 + +struct rzg2l_usbphy_ctrl_priv { + struct reset_controller_dev rcdev; + struct reset_control *rstc; + void __iomem *base; + + spinlock_t lock; +}; + +#define rcdev_to_priv(x) container_of(x, struct rzg2l_usbphy_ctrl_priv, rcdev) + +static int rzg2l_usbphy_ctrl_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev); + u32 port_mask = PHY_RESET_PORT1 | PHY_RESET_PORT2; + void __iomem *base = priv->base; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->lock, flags); + val = readl(base + RESET); + val |= id ? PHY_RESET_PORT2 : PHY_RESET_PORT1; + if (port_mask == (val & port_mask)) + val |= RESET_PLLRESET; + writel(val, base + RESET); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rzg2l_usbphy_ctrl_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev); + void __iomem *base = priv->base; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&priv->lock, flags); + val = readl(base + RESET); + + val |= RESET_SEL_PLLRESET; + val &= ~(RESET_PLLRESET | (id ? PHY_RESET_PORT2 : PHY_RESET_PORT1)); + writel(val, base + RESET); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rzg2l_usbphy_ctrl_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev); + u32 port_mask; + + port_mask = id ? PHY_RESET_PORT2 : PHY_RESET_PORT1; + + return !!(readl(priv->base + RESET) & port_mask); +} + +static const struct of_device_id rzg2l_usbphy_ctrl_match_table[] = { + { .compatible = "renesas,rzg2l-usbphy-ctrl" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rzg2l_usbphy_ctrl_match_table); + +static const struct reset_control_ops rzg2l_usbphy_ctrl_reset_ops = { + .assert = rzg2l_usbphy_ctrl_assert, + .deassert = rzg2l_usbphy_ctrl_deassert, + .status = rzg2l_usbphy_ctrl_status, +}; + +static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rzg2l_usbphy_ctrl_priv *priv; + unsigned long flags; + int error; + u32 val; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); + if (IS_ERR(priv->rstc)) + return dev_err_probe(dev, PTR_ERR(priv->rstc), + "failed to get reset\n"); + + reset_control_deassert(priv->rstc); + + priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops; + priv->rcdev.of_reset_n_cells = 1; + priv->rcdev.nr_resets = NUM_PORTS; + priv->rcdev.of_node = dev->of_node; + priv->rcdev.dev = dev; + + error = devm_reset_controller_register(dev, &priv->rcdev); + if (error) + return error; + + spin_lock_init(&priv->lock); + dev_set_drvdata(dev, priv); + + pm_runtime_enable(&pdev->dev); + pm_runtime_resume_and_get(&pdev->dev); + + /* put pll and phy into reset state */ + spin_lock_irqsave(&priv->lock, flags); + val = readl(priv->base + RESET); + val |= RESET_SEL_PLLRESET | RESET_PLLRESET | PHY_RESET_PORT2 | PHY_RESET_PORT1; + writel(val, priv->base + RESET); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev) +{ + struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); + + return 0; +} + +static struct platform_driver rzg2l_usbphy_ctrl_driver = { + .driver = { + .name = "rzg2l_usbphy_ctrl", + .of_match_table = rzg2l_usbphy_ctrl_match_table, + }, + .probe = rzg2l_usbphy_ctrl_probe, + .remove = rzg2l_usbphy_ctrl_remove, +}; +module_platform_driver(rzg2l_usbphy_ctrl_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Renesas RZ/G2L USBPHY Control"); +MODULE_AUTHOR("biju.das.jz@bp.renesas.com>"); diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h index 654c717e5467..714fa92575df 100644 --- a/drivers/soc/mediatek/mt8173-pm-domains.h +++ b/drivers/soc/mediatek/mt8173-pm-domains.h @@ -71,6 +71,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = { .ctl_offs = SPM_MFG_ASYNC_PWR_CON, .sram_pdn_bits = GENMASK(11, 8), .sram_pdn_ack_bits = 0, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8173_POWER_DOMAIN_MFG_2D] = { .name = "mfg_2d", diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h index 579dfc8dc8fc..9dee485807c9 100644 --- a/drivers/soc/mediatek/mt8183-mmsys.h +++ b/drivers/soc/mediatek/mt8183-mmsys.h @@ -28,25 +28,32 @@ static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0, - MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L + MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L, + MT8183_OVL0_MOUT_EN_OVL0_2L }, { DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0, - MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0 + MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0, + MT8183_OVL0_2L_MOUT_EN_DISP_PATH0 }, { DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1, - MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1 + MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1, + MT8183_OVL1_2L_MOUT_EN_RDMA1 }, { DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, - MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0 + MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0, + MT8183_DITHER0_MOUT_IN_DSI0 }, { DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0, - MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L + MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L, + MT8183_DISP_PATH0_SEL_IN_OVL0_2L }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, - MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1 + MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1, + MT8183_DPI0_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, - MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0 + MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0, + MT8183_RDMA0_SOUT_COLOR0 } }; diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h new file mode 100644 index 000000000000..690e3fe2dee0 --- /dev/null +++ b/drivers/soc/mediatek/mt8365-mmsys.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT8365_MMSYS_H +#define __SOC_MEDIATEK_MT8365_MMSYS_H + +#define MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0xf3c +#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL 0xf4c +#define MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN 0xf50 +#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN 0xf54 +#define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60 +#define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64 +#define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68 + +#define MT8365_RDMA0_SOUT_COLOR0 0x1 +#define MT8365_DITHER_MOUT_EN_DSI0 0x1 +#define MT8365_DSI0_SEL_IN_DITHER 0x1 +#define MT8365_RDMA0_SEL_IN_OVL0 0x0 +#define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0 +#define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0 +#define MT8365_OVL0_MOUT_PATH0_SEL BIT(0) + +static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8365_DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, + MT8365_OVL0_MOUT_PATH0_SEL, MT8365_OVL0_MOUT_PATH0_SEL + }, + { + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, + MT8365_DISP_REG_CONFIG_DISP_RDMA0_SEL_IN, + MT8365_RDMA0_SEL_IN_OVL0, MT8365_RDMA0_SEL_IN_OVL0 + }, + { + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, + MT8365_DISP_REG_CONFIG_DISP_RDMA0_SOUT_SEL, + MT8365_RDMA0_SOUT_COLOR0, MT8365_RDMA0_SOUT_COLOR0 + }, + { + DDP_COMPONENT_COLOR0, DDP_COMPONENT_CCORR, + MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, + MT8365_DISP_COLOR_SEL_IN_COLOR0,MT8365_DISP_COLOR_SEL_IN_COLOR0 + }, + { + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, + MT8365_DISP_REG_CONFIG_DISP_DITHER0_MOUT_EN, + MT8365_DITHER_MOUT_EN_DSI0, MT8365_DITHER_MOUT_EN_DSI0 + }, + { + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0, + MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN, + MT8365_DSI0_SEL_IN_DITHER, MT8365_DSI0_SEL_IN_DITHER + }, + { + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0, + MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN, + MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 + }, +}; + +#endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index 080660ef11bf..a78e88f27b62 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -13,6 +13,7 @@ #include "mtk-mmsys.h" #include "mt8167-mmsys.h" #include "mt8183-mmsys.h" +#include "mt8365-mmsys.h" static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = { .clk_driver = "clk-mt2701-mm", @@ -52,6 +53,12 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = { .num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table), }; +static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = { + .clk_driver = "clk-mt8365-mm", + .routes = mt8365_mmsys_routing_table, + .num_routes = ARRAY_SIZE(mt8365_mmsys_routing_table), +}; + struct mtk_mmsys { void __iomem *regs; const struct mtk_mmsys_driver_data *data; @@ -68,7 +75,9 @@ void mtk_mmsys_ddp_connect(struct device *dev, for (i = 0; i < mmsys->data->num_routes; i++) if (cur == routes[i].from_comp && next == routes[i].to_comp) { - reg = readl_relaxed(mmsys->regs + routes[i].addr) | routes[i].val; + reg = readl_relaxed(mmsys->regs + routes[i].addr); + reg &= ~routes[i].mask; + reg |= routes[i].val; writel_relaxed(reg, mmsys->regs + routes[i].addr); } } @@ -85,7 +94,8 @@ void mtk_mmsys_ddp_disconnect(struct device *dev, for (i = 0; i < mmsys->data->num_routes; i++) if (cur == routes[i].from_comp && next == routes[i].to_comp) { - reg = readl_relaxed(mmsys->regs + routes[i].addr) & ~routes[i].val; + reg = readl_relaxed(mmsys->regs + routes[i].addr); + reg &= ~routes[i].mask; writel_relaxed(reg, mmsys->regs + routes[i].addr); } } @@ -157,6 +167,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data, }, + { + .compatible = "mediatek,mt8365-mmsys", + .data = &mt8365_mmsys_driver_data, + }, { } }; diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h index a760a34e6eca..9e2b81bd38db 100644 --- a/drivers/soc/mediatek/mtk-mmsys.h +++ b/drivers/soc/mediatek/mtk-mmsys.h @@ -35,41 +35,54 @@ #define RDMA0_SOUT_DSI1 0x1 #define RDMA0_SOUT_DSI2 0x4 #define RDMA0_SOUT_DSI3 0x5 +#define RDMA0_SOUT_MASK 0x7 #define RDMA1_SOUT_DPI0 0x2 #define RDMA1_SOUT_DPI1 0x3 #define RDMA1_SOUT_DSI1 0x1 #define RDMA1_SOUT_DSI2 0x4 #define RDMA1_SOUT_DSI3 0x5 +#define RDMA1_SOUT_MASK 0x7 #define RDMA2_SOUT_DPI0 0x2 #define RDMA2_SOUT_DPI1 0x3 #define RDMA2_SOUT_DSI1 0x1 #define RDMA2_SOUT_DSI2 0x4 #define RDMA2_SOUT_DSI3 0x5 +#define RDMA2_SOUT_MASK 0x7 #define DPI0_SEL_IN_RDMA1 0x1 #define DPI0_SEL_IN_RDMA2 0x3 +#define DPI0_SEL_IN_MASK 0x3 #define DPI1_SEL_IN_RDMA1 (0x1 << 8) #define DPI1_SEL_IN_RDMA2 (0x3 << 8) +#define DPI1_SEL_IN_MASK (0x3 << 8) #define DSI0_SEL_IN_RDMA1 0x1 #define DSI0_SEL_IN_RDMA2 0x4 +#define DSI0_SEL_IN_MASK 0x7 #define DSI1_SEL_IN_RDMA1 0x1 #define DSI1_SEL_IN_RDMA2 0x4 +#define DSI1_SEL_IN_MASK 0x7 #define DSI2_SEL_IN_RDMA1 (0x1 << 16) #define DSI2_SEL_IN_RDMA2 (0x4 << 16) +#define DSI2_SEL_IN_MASK (0x7 << 16) #define DSI3_SEL_IN_RDMA1 (0x1 << 16) #define DSI3_SEL_IN_RDMA2 (0x4 << 16) +#define DSI3_SEL_IN_MASK (0x7 << 16) #define COLOR1_SEL_IN_OVL1 0x1 #define OVL_MOUT_EN_RDMA 0x1 #define BLS_TO_DSI_RDMA1_TO_DPI1 0x8 #define BLS_TO_DPI_RDMA1_TO_DSI 0x2 +#define BLS_RDMA1_DSI_DPI_MASK 0xf #define DSI_SEL_IN_BLS 0x0 #define DPI_SEL_IN_BLS 0x0 +#define DPI_SEL_IN_MASK 0x1 #define DSI_SEL_IN_RDMA 0x1 +#define DSI_SEL_IN_MASK 0x1 struct mtk_mmsys_routes { u32 from_comp; u32 to_comp; u32 addr; + u32 mask; u32 val; }; @@ -91,124 +104,168 @@ struct mtk_mmsys_driver_data { static const struct mtk_mmsys_routes mmsys_default_routing_table[] = { { DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0, - DISP_REG_CONFIG_OUT_SEL, BLS_TO_DSI_RDMA1_TO_DPI1 + DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK, + BLS_TO_DSI_RDMA1_TO_DPI1 }, { DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0, - DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_BLS + DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK, + DSI_SEL_IN_BLS }, { DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_OUT_SEL, BLS_TO_DPI_RDMA1_TO_DSI + DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK, + BLS_TO_DPI_RDMA1_TO_DSI }, { DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_RDMA + DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK, + DSI_SEL_IN_RDMA }, { DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_BLS + DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_MASK, + DPI_SEL_IN_BLS }, { DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1, - DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1 + DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1, + GAMMA_MOUT_EN_RDMA1 }, { DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0, - DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0 + DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0, + OD_MOUT_EN_RDMA0 }, { DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1, - DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1 + DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1, + OD1_MOUT_EN_RDMA1 }, { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, - DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0 + DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0, + OVL0_MOUT_EN_COLOR0 }, { DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0, - DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0 + DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0, + COLOR0_SEL_IN_OVL0 }, { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, - DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA + DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA, + OVL_MOUT_EN_RDMA }, { DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1, - DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1 + DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1, + OVL1_MOUT_EN_COLOR1 }, { DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1, - DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1 + DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1, + COLOR1_SEL_IN_OVL1 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI0 + DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK, + RDMA0_SOUT_DPI0 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1, - DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI1 + DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK, + RDMA0_SOUT_DPI1 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1, - DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI1 + DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK, + RDMA0_SOUT_DSI1 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2, - DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI2 + DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK, + RDMA0_SOUT_DSI2 }, { DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3, - DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI3 + DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK, + RDMA0_SOUT_DSI3 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI0 + DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK, + RDMA1_SOUT_DPI0 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA1 + DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK, + DPI0_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1, - DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI1 + DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK, + RDMA1_SOUT_DPI1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1, - DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA1 + DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK, + DPI1_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0, - DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA1 + DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK, + DSI0_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1, - DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI1 + DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK, + RDMA1_SOUT_DSI1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1, - DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA1 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK, + DSI1_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2, - DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI2 + DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK, + RDMA1_SOUT_DSI2 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2, - DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA1 + DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK, + DSI2_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3, - DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI3 + DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK, + RDMA1_SOUT_DSI3 }, { DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3, - DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA1 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK, + DSI3_SEL_IN_RDMA1 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI0 + DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK, + RDMA2_SOUT_DPI0 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0, - DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA2 + DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK, + DPI0_SEL_IN_RDMA2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1, - DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI1 + DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK, + RDMA2_SOUT_DPI1 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1, - DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA2 + DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK, + DPI1_SEL_IN_RDMA2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0, - DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA2 + DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK, + DSI0_SEL_IN_RDMA2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1, - DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI1 + DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK, + RDMA2_SOUT_DSI1 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1, - DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA2 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK, + DSI1_SEL_IN_RDMA2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2, - DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI2 + DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK, + RDMA2_SOUT_DSI2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2, - DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA2 + DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK, + DSI2_SEL_IN_RDMA2 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3, - DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI3 + DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK, + RDMA2_SOUT_DSI3 }, { DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3, - DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA2 + DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK, + DSI3_SEL_IN_RDMA2 + }, { + DDP_COMPONENT_UFOE, DDP_COMPONENT_DSI0, + DISP_REG_CONFIG_DISP_UFOE_MOUT_EN, UFOE_MOUT_EN_DSI0, + UFOE_MOUT_EN_DSI0 } }; diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index 21a4e113bbec..c5ac649ae51b 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -60,7 +60,7 @@ #define BUS_PROT_UPDATE_TOPAXI(_mask) \ BUS_PROT_UPDATE(_mask, \ INFRA_TOPAXI_PROTECTEN, \ - INFRA_TOPAXI_PROTECTEN_CLR, \ + INFRA_TOPAXI_PROTECTEN, \ INFRA_TOPAXI_PROTECTSTA1) struct scpsys_bus_prot_data { diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index b24cc77d1889..4ce8e816154f 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -801,38 +801,6 @@ unlock: return ret; } -static int cpr_read_efuse(struct device *dev, const char *cname, u32 *data) -{ - struct nvmem_cell *cell; - ssize_t len; - char *ret; - int i; - - *data = 0; - - cell = nvmem_cell_get(dev, cname); - if (IS_ERR(cell)) { - if (PTR_ERR(cell) != -EPROBE_DEFER) - dev_err(dev, "undefined cell %s\n", cname); - return PTR_ERR(cell); - } - - ret = nvmem_cell_read(cell, &len); - nvmem_cell_put(cell); - if (IS_ERR(ret)) { - dev_err(dev, "can't read cell %s\n", cname); - return PTR_ERR(ret); - } - - for (i = 0; i < len; i++) - *data |= ret[i] << (8 * i); - - kfree(ret); - dev_dbg(dev, "efuse read(%s) = %x, bytes %zd\n", cname, *data, len); - - return 0; -} - static int cpr_populate_ring_osc_idx(struct cpr_drv *drv) { @@ -843,8 +811,7 @@ cpr_populate_ring_osc_idx(struct cpr_drv *drv) int ret; for (; fuse < end; fuse++, fuses++) { - ret = cpr_read_efuse(drv->dev, fuses->ring_osc, - &data); + ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->ring_osc, &data); if (ret) return ret; fuse->ring_osc_idx = data; @@ -863,7 +830,7 @@ static int cpr_read_fuse_uV(const struct cpr_desc *desc, u32 bits = 0; int ret; - ret = cpr_read_efuse(drv->dev, init_v_efuse, &bits); + ret = nvmem_cell_read_variable_le_u32(drv->dev, init_v_efuse, &bits); if (ret) return ret; @@ -932,7 +899,7 @@ static int cpr_fuse_corner_init(struct cpr_drv *drv) } /* Populate target quotient by scaling */ - ret = cpr_read_efuse(drv->dev, fuses->quotient, &fuse->quot); + ret = nvmem_cell_read_variable_le_u32(drv->dev, fuses->quotient, &fuse->quot); if (ret) return ret; @@ -1001,7 +968,7 @@ static int cpr_calculate_scaling(const char *quot_offset, prev_fuse = fuse - 1; if (quot_offset) { - ret = cpr_read_efuse(drv->dev, quot_offset, "_diff); + ret = nvmem_cell_read_variable_le_u32(drv->dev, quot_offset, "_diff); if (ret) return ret; @@ -1701,7 +1668,7 @@ static int cpr_probe(struct platform_device *pdev) * initialized after attaching to the power domain, * since it depends on the CPU's OPP table. */ - ret = cpr_read_efuse(dev, "cpr_fuse_revision", &cpr_rev); + ret = nvmem_cell_read_variable_le_u32(dev, "cpr_fuse_revision", &cpr_rev); if (ret) return ret; diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c index eba7f76f9d61..bda170d7b4a2 100644 --- a/drivers/soc/qcom/mdt_loader.c +++ b/drivers/soc/qcom/mdt_loader.c @@ -166,6 +166,8 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, metadata = qcom_mdt_read_metadata(fw, &metadata_len); if (IS_ERR(metadata)) { ret = PTR_ERR(metadata); + dev_err(dev, "error %d reading firmware %s metadata\n", + ret, fw_name); goto out; } @@ -173,7 +175,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, kfree(metadata); if (ret) { - dev_err(dev, "invalid firmware metadata\n"); + /* Invalid firmware metadata */ + dev_err(dev, "error %d initializing firmware %s\n", + ret, fw_name); goto out; } } @@ -199,7 +203,9 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ret = qcom_scm_pas_mem_setup(pas_id, mem_phys, max_addr - min_addr); if (ret) { - dev_err(dev, "unable to setup relocation\n"); + /* Unable to set up relocation */ + dev_err(dev, "error %d setting up firmware %s\n", + ret, fw_name); goto out; } } @@ -243,9 +249,8 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, if (phdr->p_filesz && phdr->p_offset < fw->size) { /* Firmware is large enough to be non-split */ if (phdr->p_offset + phdr->p_filesz > fw->size) { - dev_err(dev, - "failed to load segment %d from truncated file %s\n", - i, firmware); + dev_err(dev, "file %s segment %d would be truncated\n", + fw_name, i); ret = -EINVAL; break; } @@ -257,7 +262,8 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw, ret = request_firmware_into_buf(&seg_fw, fw_name, dev, ptr, phdr->p_filesz); if (ret) { - dev_err(dev, "failed to load %s\n", fw_name); + dev_err(dev, "error %d loading %s\n", + ret, fw_name); break; } diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index 5bdfb1565c14..7d649d2cf31e 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -104,7 +104,6 @@ static const char * const icc_path_names[] = {"qup-core", "qup-config", #define GENI_OUTPUT_CTRL 0x24 #define GENI_CGC_CTRL 0x28 #define GENI_CLK_CTRL_RO 0x60 -#define GENI_IF_DISABLE_RO 0x64 #define GENI_FW_S_REVISION_RO 0x6c #define SE_GENI_BYTE_GRAN 0x254 #define SE_GENI_TX_PACKING_CFG0 0x260 @@ -322,6 +321,30 @@ static void geni_se_select_dma_mode(struct geni_se *se) writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN); } +static void geni_se_select_gpi_mode(struct geni_se *se) +{ + u32 val; + + geni_se_irq_clear(se); + + writel(0, se->base + SE_IRQ_EN); + + val = readl(se->base + SE_GENI_S_IRQ_EN); + val &= ~S_CMD_DONE_EN; + writel(val, se->base + SE_GENI_S_IRQ_EN); + + val = readl(se->base + SE_GENI_M_IRQ_EN); + val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN | + M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN); + writel(val, se->base + SE_GENI_M_IRQ_EN); + + writel(GENI_DMA_MODE_EN, se->base + SE_GENI_DMA_MODE_EN); + + val = readl(se->base + SE_GSI_EVENT_EN); + val |= (DMA_RX_EVENT_EN | DMA_TX_EVENT_EN | GENI_M_EVENT_EN | GENI_S_EVENT_EN); + writel(val, se->base + SE_GSI_EVENT_EN); +} + /** * geni_se_select_mode() - Select the serial engine transfer mode * @se: Pointer to the concerned serial engine. @@ -329,7 +352,7 @@ static void geni_se_select_dma_mode(struct geni_se *se) */ void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode) { - WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA); + WARN_ON(mode != GENI_SE_FIFO && mode != GENI_SE_DMA && mode != GENI_GPI_DMA); switch (mode) { case GENI_SE_FIFO: @@ -338,6 +361,9 @@ void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode) case GENI_SE_DMA: geni_se_select_dma_mode(se); break; + case GENI_GPI_DMA: + geni_se_select_gpi_mode(se); + break; case GENI_SE_INVALID: default: break; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 934fcc4d2b05..536c3e4114fb 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -476,12 +476,12 @@ static int qmp_cooling_device_add(struct qmp *qmp, static int qmp_cooling_devices_register(struct qmp *qmp) { struct device_node *np, *child; - int count = QMP_NUM_COOLING_RESOURCES; + int count = 0; int ret; np = qmp->dev->of_node; - qmp->cooling_devs = devm_kcalloc(qmp->dev, count, + qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES, sizeof(*qmp->cooling_devs), GFP_KERNEL); @@ -497,12 +497,16 @@ static int qmp_cooling_devices_register(struct qmp *qmp) goto unroll; } + if (!count) + devm_kfree(qmp->dev, qmp->cooling_devs); + return 0; unroll: while (--count >= 0) thermal_cooling_device_unregister (qmp->cooling_devs[count].cdev); + devm_kfree(qmp->dev, qmp->cooling_devs); return ret; } @@ -602,6 +606,7 @@ static const struct of_device_id qmp_dt_match[] = { { .compatible = "qcom,sm8150-aoss-qmp", }, { .compatible = "qcom,sm8250-aoss-qmp", }, { .compatible = "qcom,sm8350-aoss-qmp", }, + { .compatible = "qcom,aoss-qmp", }, {} }; MODULE_DEVICE_TABLE(of, qmp_dt_match); diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 2daa17ba54a3..fa209b479ab3 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -403,12 +403,11 @@ static int rpmhpd_power_on(struct generic_pm_domain *domain) static int rpmhpd_power_off(struct generic_pm_domain *domain) { struct rpmhpd *pd = domain_to_rpmhpd(domain); - int ret = 0; + int ret; mutex_lock(&rpmhpd_lock); - ret = rpmhpd_aggregate_corner(pd, pd->level[0]); - + ret = rpmhpd_aggregate_corner(pd, 0); if (!ret) pd->enabled = false; diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 0b532a892d60..dbf494e92574 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -346,6 +346,33 @@ static const struct rpmpd_desc sdm660_desc = { .max_state = RPM_SMD_LEVEL_TURBO, }; +/* sm4250/6115 RPM Power domains */ +DEFINE_RPMPD_PAIR(sm6115, vddcx, vddcx_ao, RWCX, LEVEL, 0); +DEFINE_RPMPD_VFL(sm6115, vddcx_vfl, RWCX, 0); + +DEFINE_RPMPD_PAIR(sm6115, vddmx, vddmx_ao, RWMX, LEVEL, 0); +DEFINE_RPMPD_VFL(sm6115, vddmx_vfl, RWMX, 0); + +DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_cx, RWLC, 0); +DEFINE_RPMPD_LEVEL(sm6115, vdd_lpi_mx, RWLM, 0); + +static struct rpmpd *sm6115_rpmpds[] = { + [SM6115_VDDCX] = &sm6115_vddcx, + [SM6115_VDDCX_AO] = &sm6115_vddcx_ao, + [SM6115_VDDCX_VFL] = &sm6115_vddcx_vfl, + [SM6115_VDDMX] = &sm6115_vddmx, + [SM6115_VDDMX_AO] = &sm6115_vddmx_ao, + [SM6115_VDDMX_VFL] = &sm6115_vddmx_vfl, + [SM6115_VDD_LPI_CX] = &sm6115_vdd_lpi_cx, + [SM6115_VDD_LPI_MX] = &sm6115_vdd_lpi_mx, +}; + +static const struct rpmpd_desc sm6115_desc = { + .rpmpds = sm6115_rpmpds, + .num_pds = ARRAY_SIZE(sm6115_rpmpds), + .max_state = RPM_SMD_LEVEL_TURBO_NO_CPR, +}; + static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc }, { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc }, @@ -356,6 +383,7 @@ static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc }, { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc }, + { .compatible = "qcom,sm6115-rpmpd", .data = &sm6115_desc }, { } }; MODULE_DEVICE_TABLE(of, rpmpd_match_table); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index bc0be1d4be5f..dfdd4f20f5fd 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -242,6 +242,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-msm8996" }, { .compatible = "qcom,rpm-msm8998" }, { .compatible = "qcom,rpm-sdm660" }, + { .compatible = "qcom,rpm-sm6115" }, { .compatible = "qcom,rpm-sm6125" }, { .compatible = "qcom,rpm-qcs404" }, {} diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c index 1d3d5e3ec2b0..ef15d014c03a 100644 --- a/drivers/soc/qcom/smsm.c +++ b/drivers/soc/qcom/smsm.c @@ -109,7 +109,7 @@ struct smsm_entry { DECLARE_BITMAP(irq_enabled, 32); DECLARE_BITMAP(irq_rising, 32); DECLARE_BITMAP(irq_falling, 32); - u32 last_value; + unsigned long last_value; u32 *remote_state; u32 *subscription; @@ -204,8 +204,7 @@ static irqreturn_t smsm_intr(int irq, void *data) u32 val; val = readl(entry->remote_state); - changed = val ^ entry->last_value; - entry->last_value = val; + changed = val ^ xchg(&entry->last_value, val); for_each_set_bit(i, entry->irq_enabled, 32) { if (!(changed & BIT(i))) @@ -264,6 +263,12 @@ static void smsm_unmask_irq(struct irq_data *irqd) struct qcom_smsm *smsm = entry->smsm; u32 val; + /* Make sure our last cached state is up-to-date */ + if (readl(entry->remote_state) & BIT(irq)) + set_bit(irq, &entry->last_value); + else + clear_bit(irq, &entry->last_value); + set_bit(irq, entry->irq_enabled); if (entry->subscription) { @@ -299,11 +304,28 @@ static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type) return 0; } +static int smsm_get_irqchip_state(struct irq_data *irqd, + enum irqchip_irq_state which, bool *state) +{ + struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd); + irq_hw_number_t irq = irqd_to_hwirq(irqd); + u32 val; + + if (which != IRQCHIP_STATE_LINE_LEVEL) + return -EINVAL; + + val = readl(entry->remote_state); + *state = !!(val & BIT(irq)); + + return 0; +} + static struct irq_chip smsm_irq_chip = { .name = "smsm", .irq_mask = smsm_mask_irq, .irq_unmask = smsm_unmask_irq, .irq_set_type = smsm_set_irq_type, + .irq_get_irqchip_state = smsm_get_irqchip_state, }; /** diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index b2f049faa3df..9faf48302f4b 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -417,8 +417,8 @@ QCOM_OPEN(chip_id, qcom_show_chip_id); static int show_image_##type(struct seq_file *seq, void *p) \ { \ struct smem_image_version *image_version = seq->private; \ - seq_puts(seq, image_version->type); \ - seq_putc(seq, '\n'); \ + if (image_version->type[0] != '\0') \ + seq_printf(seq, "%s\n", image_version->type); \ return 0; \ } \ static int open_image_##type(struct inode *inode, struct file *file) \ diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 71b44c31b012..07e0ecd64319 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -208,6 +208,7 @@ config ARCH_R8A77951 help This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and later). + This includes different gradings like R-Car H3e-2G. config ARCH_R8A77965 bool "ARM64 Platform support for R-Car M3-N" @@ -229,6 +230,7 @@ config ARCH_R8A77961 select SYSC_R8A77961 help This enables support for the Renesas R-Car M3-W+ SoC. + This includes different gradings like R-Car M3e-2G. config ARCH_R8A77980 bool "ARM64 Platform support for R-Car V3H" diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index d464ffa1be33..7410b9fa9846 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -404,19 +404,21 @@ static int __init r8a779a0_sysc_pd_init(void) for (i = 0; i < info->num_areas; i++) { const struct r8a779a0_sysc_area *area = &info->areas[i]; struct r8a779a0_sysc_pd *pd; + size_t n; if (!area->name) { /* Skip NULLified area */ continue; } - pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL); + n = strlen(area->name) + 1; + pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { error = -ENOMEM; goto out_put; } - strcpy(pd->name, area->name); + memcpy(pd->name, area->name, n); pd->genpd.name = pd->name; pd->pdr = area->pdr; pd->flags = area->flags; diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 53387a72ca00..b0a80de34c98 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -396,19 +396,21 @@ static int __init rcar_sysc_pd_init(void) for (i = 0; i < info->num_areas; i++) { const struct rcar_sysc_area *area = &info->areas[i]; struct rcar_sysc_pd *pd; + size_t n; if (!area->name) { /* Skip NULLified area */ continue; } - pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL); + n = strlen(area->name) + 1; + pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL); if (!pd) { error = -ENOMEM; goto out_put; } - strcpy(pd->name, area->name); + memcpy(pd->name, area->name, n); pd->genpd.name = pd->name; pd->ch.chan_offs = area->chan_offs; pd->ch.chan_bit = area->chan_bit; diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 8310fce7714e..dab9f5a0aad0 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -284,11 +284,15 @@ static const struct of_device_id renesas_socs[] __initconst = { #if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951) { .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 }, #endif +#ifdef CONFIG_ARCH_R8A77951 + { .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 }, +#endif #ifdef CONFIG_ARCH_R8A77960 { .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w }, #endif #ifdef CONFIG_ARCH_R8A77961 { .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w }, + { .compatible = "renesas,r8a779m3", .data = &soc_rcar_m3_w }, #endif #ifdef CONFIG_ARCH_R8A77965 { .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n }, diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig index 2c13bf4dd5db..25eb2c1e31bb 100644 --- a/drivers/soc/rockchip/Kconfig +++ b/drivers/soc/rockchip/Kconfig @@ -6,8 +6,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST # config ROCKCHIP_GRF - bool - default y + bool "Rockchip General Register Files support" if COMPILE_TEST + default y if ARCH_ROCKCHIP help The General Register Files are a central component providing special additional settings registers for a lot of soc-components. diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c index cf8182fc3642..9df513d1219b 100644 --- a/drivers/soc/rockchip/io-domain.c +++ b/drivers/soc/rockchip/io-domain.c @@ -51,13 +51,11 @@ #define RK3399_PMUGRF_CON0_VSEL BIT(8) #define RK3399_PMUGRF_VSEL_SUPPLY_NUM 9 -struct rockchip_iodomain; +#define RK3568_PMU_GRF_IO_VSEL0 (0x0140) +#define RK3568_PMU_GRF_IO_VSEL1 (0x0144) +#define RK3568_PMU_GRF_IO_VSEL2 (0x0148) -struct rockchip_iodomain_soc_data { - int grf_offset; - const char *supply_names[MAX_SUPPLIES]; - void (*init)(struct rockchip_iodomain *iod); -}; +struct rockchip_iodomain; struct rockchip_iodomain_supply { struct rockchip_iodomain *iod; @@ -66,13 +64,62 @@ struct rockchip_iodomain_supply { int idx; }; +struct rockchip_iodomain_soc_data { + int grf_offset; + const char *supply_names[MAX_SUPPLIES]; + void (*init)(struct rockchip_iodomain *iod); + int (*write)(struct rockchip_iodomain_supply *supply, int uV); +}; + struct rockchip_iodomain { struct device *dev; struct regmap *grf; const struct rockchip_iodomain_soc_data *soc_data; struct rockchip_iodomain_supply supplies[MAX_SUPPLIES]; + int (*write)(struct rockchip_iodomain_supply *supply, int uV); }; +static int rk3568_iodomain_write(struct rockchip_iodomain_supply *supply, int uV) +{ + struct rockchip_iodomain *iod = supply->iod; + u32 is_3v3 = uV > MAX_VOLTAGE_1_8; + u32 val0, val1; + int b; + + switch (supply->idx) { + case 0: /* pmuio1 */ + break; + case 1: /* pmuio2 */ + b = supply->idx; + val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b)); + b = supply->idx + 4; + val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0); + + regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val0); + regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val1); + break; + case 3: /* vccio2 */ + break; + case 2: /* vccio1 */ + case 4: /* vccio3 */ + case 5: /* vccio4 */ + case 6: /* vccio5 */ + case 7: /* vccio6 */ + case 8: /* vccio7 */ + b = supply->idx - 1; + val0 = BIT(16 + b) | (is_3v3 ? 0 : BIT(b)); + val1 = BIT(16 + b) | (is_3v3 ? BIT(b) : 0); + + regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL0, val0); + regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL1, val1); + break; + default: + return -EINVAL; + } + + return 0; +} + static int rockchip_iodomain_write(struct rockchip_iodomain_supply *supply, int uV) { @@ -136,7 +183,7 @@ static int rockchip_iodomain_notify(struct notifier_block *nb, return NOTIFY_BAD; } - ret = rockchip_iodomain_write(supply, uV); + ret = supply->iod->write(supply, uV); if (ret && event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) return NOTIFY_BAD; @@ -398,6 +445,22 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { .init = rk3399_pmu_iodomain_init, }; +static const struct rockchip_iodomain_soc_data soc_data_rk3568_pmu = { + .grf_offset = 0x140, + .supply_names = { + "pmuio1", + "pmuio2", + "vccio1", + "vccio2", + "vccio3", + "vccio4", + "vccio5", + "vccio6", + "vccio7", + }, + .write = rk3568_iodomain_write, +}; + static const struct rockchip_iodomain_soc_data soc_data_rv1108 = { .grf_offset = 0x404, .supply_names = { @@ -470,6 +533,10 @@ static const struct of_device_id rockchip_iodomain_match[] = { .data = &soc_data_rk3399_pmu }, { + .compatible = "rockchip,rk3568-pmu-io-voltage-domain", + .data = &soc_data_rk3568_pmu + }, + { .compatible = "rockchip,rv1108-io-voltage-domain", .data = &soc_data_rv1108 }, @@ -502,6 +569,11 @@ static int rockchip_iodomain_probe(struct platform_device *pdev) match = of_match_node(rockchip_iodomain_match, np); iod->soc_data = match->data; + if (iod->soc_data->write) + iod->write = iod->soc_data->write; + else + iod->write = rockchip_iodomain_write; + parent = pdev->dev.parent; if (parent && parent->of_node) { iod->grf = syscon_node_to_regmap(parent->of_node); @@ -562,7 +634,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev) supply->reg = reg; supply->nb.notifier_call = rockchip_iodomain_notify; - ret = rockchip_iodomain_write(supply, uV); + ret = iod->write(supply, uV); if (ret) { supply->reg = NULL; goto unreg_notify; diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 3d9da3d359da..f2151815db58 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -13,6 +13,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/sys_soc.h> @@ -210,6 +211,8 @@ static int tegra_fuse_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; + pm_runtime_enable(&pdev->dev); + if (fuse->soc->probe) { err = fuse->soc->probe(fuse); if (err < 0) @@ -246,14 +249,71 @@ static int tegra_fuse_probe(struct platform_device *pdev) return 0; restore: + fuse->clk = NULL; fuse->base = base; + pm_runtime_disable(&pdev->dev); return err; } +static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev) +{ + int err; + + err = clk_prepare_enable(fuse->clk); + if (err < 0) { + dev_err(dev, "failed to enable FUSE clock: %d\n", err); + return err; + } + + return 0; +} + +static int __maybe_unused tegra_fuse_runtime_suspend(struct device *dev) +{ + clk_disable_unprepare(fuse->clk); + + return 0; +} + +static int __maybe_unused tegra_fuse_suspend(struct device *dev) +{ + int ret; + + /* + * Critical for RAM re-repair operation, which must occur on resume + * from LP1 system suspend and as part of CCPLEX cluster switching. + */ + if (fuse->soc->clk_suspend_on) + ret = pm_runtime_resume_and_get(dev); + else + ret = pm_runtime_force_suspend(dev); + + return ret; +} + +static int __maybe_unused tegra_fuse_resume(struct device *dev) +{ + int ret = 0; + + if (fuse->soc->clk_suspend_on) + pm_runtime_put(dev); + else + ret = pm_runtime_force_resume(dev); + + return ret; +} + +static const struct dev_pm_ops tegra_fuse_pm = { + SET_RUNTIME_PM_OPS(tegra_fuse_runtime_suspend, tegra_fuse_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume) +}; + static struct platform_driver tegra_fuse_driver = { .driver = { .name = "tegra-fuse", .of_match_table = tegra_fuse_match, + .pm = &tegra_fuse_pm, .suppress_bind_attrs = true, }, .probe = tegra_fuse_probe, diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c index 16aaa28573ac..8ec9fc5e5e4b 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra20.c +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c @@ -16,6 +16,7 @@ #include <linux/kobject.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/random.h> #include <soc/tegra/fuse.h> @@ -46,6 +47,10 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset) u32 value = 0; int err; + err = pm_runtime_resume_and_get(fuse->dev); + if (err) + return err; + mutex_lock(&fuse->apbdma.lock); fuse->apbdma.config.src_addr = fuse->phys + FUSE_BEGIN + offset; @@ -66,8 +71,6 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset) reinit_completion(&fuse->apbdma.wait); - clk_prepare_enable(fuse->clk); - dmaengine_submit(dma_desc); dma_async_issue_pending(fuse->apbdma.chan); time_left = wait_for_completion_timeout(&fuse->apbdma.wait, @@ -78,10 +81,9 @@ static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset) else value = *fuse->apbdma.virt; - clk_disable_unprepare(fuse->clk); - out: mutex_unlock(&fuse->apbdma.lock); + pm_runtime_put(fuse->dev); return value; } @@ -165,4 +167,5 @@ const struct tegra_fuse_soc tegra20_fuse_soc = { .probe = tegra20_fuse_probe, .info = &tegra20_fuse_info, .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, }; diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index c1aa7815bd6e..b071d433d74f 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -12,6 +12,7 @@ #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/random.h> #include <soc/tegra/fuse.h> @@ -52,15 +53,13 @@ static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset) u32 value; int err; - err = clk_prepare_enable(fuse->clk); - if (err < 0) { - dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err); + err = pm_runtime_resume_and_get(fuse->dev); + if (err) return 0; - } value = readl_relaxed(fuse->base + FUSE_BEGIN + offset); - clk_disable_unprepare(fuse->clk); + pm_runtime_put(fuse->dev); return value; } @@ -113,6 +112,7 @@ const struct tegra_fuse_soc tegra30_fuse_soc = { .speedo_init = tegra30_init_speedo_data, .info = &tegra30_fuse_info, .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, }; #endif @@ -128,6 +128,7 @@ const struct tegra_fuse_soc tegra114_fuse_soc = { .speedo_init = tegra114_init_speedo_data, .info = &tegra114_fuse_info, .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, }; #endif @@ -209,6 +210,7 @@ const struct tegra_fuse_soc tegra124_fuse_soc = { .lookups = tegra124_fuse_lookups, .num_lookups = ARRAY_SIZE(tegra124_fuse_lookups), .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = true, }; #endif @@ -295,6 +297,7 @@ const struct tegra_fuse_soc tegra210_fuse_soc = { .lookups = tegra210_fuse_lookups, .num_lookups = ARRAY_SIZE(tegra210_fuse_lookups), .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, }; #endif @@ -325,6 +328,7 @@ const struct tegra_fuse_soc tegra186_fuse_soc = { .lookups = tegra186_fuse_lookups, .num_lookups = ARRAY_SIZE(tegra186_fuse_lookups), .soc_attr_group = &tegra_soc_attr_group, + .clk_suspend_on = false, }; #endif @@ -355,6 +359,7 @@ const struct tegra_fuse_soc tegra194_fuse_soc = { .lookups = tegra194_fuse_lookups, .num_lookups = ARRAY_SIZE(tegra194_fuse_lookups), .soc_attr_group = &tegra194_soc_attr_group, + .clk_suspend_on = false, }; #endif @@ -385,5 +390,6 @@ const struct tegra_fuse_soc tegra234_fuse_soc = { .lookups = tegra234_fuse_lookups, .num_lookups = ARRAY_SIZE(tegra234_fuse_lookups), .soc_attr_group = &tegra194_soc_attr_group, + .clk_suspend_on = false, }; #endif diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index e057a58e2060..de58feba0435 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -34,6 +34,8 @@ struct tegra_fuse_soc { unsigned int num_lookups; const struct attribute_group *soc_attr_group; + + bool clk_suspend_on; }; struct tegra_fuse { diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index ea62f84d1c8b..50091c4ec948 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -436,7 +436,7 @@ struct tegra_pmc { static struct tegra_pmc *pmc = &(struct tegra_pmc) { .base = NULL, - .suspend_mode = TEGRA_SUSPEND_NONE, + .suspend_mode = TEGRA_SUSPEND_NOT_READY, }; static inline struct tegra_powergate * @@ -1812,6 +1812,7 @@ static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np) u32 value, values[2]; if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) { + pmc->suspend_mode = TEGRA_SUSPEND_NONE; } else { switch (value) { case 0: @@ -2785,6 +2786,11 @@ static int tegra_pmc_regmap_init(struct tegra_pmc *pmc) return 0; } +static void tegra_pmc_reset_suspend_mode(void *data) +{ + pmc->suspend_mode = TEGRA_SUSPEND_NOT_READY; +} + static int tegra_pmc_probe(struct platform_device *pdev) { void __iomem *base; @@ -2803,6 +2809,11 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (err < 0) return err; + err = devm_add_action_or_reset(&pdev->dev, tegra_pmc_reset_suspend_mode, + NULL); + if (err) + return err; + /* take over the memory region from the early initialization */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); @@ -2909,6 +2920,7 @@ static int tegra_pmc_probe(struct platform_device *pdev) tegra_pmc_clock_register(pmc, pdev->dev.of_node); platform_set_drvdata(pdev, pmc); + tegra_pm_init_suspend(); return 0; diff --git a/drivers/soc/tegra/powergate-bpmp.c b/drivers/soc/tegra/powergate-bpmp.c index 06c792bafca5..8eaf50d0b6af 100644 --- a/drivers/soc/tegra/powergate-bpmp.c +++ b/drivers/soc/tegra/powergate-bpmp.c @@ -7,7 +7,6 @@ #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/slab.h> -#include <linux/version.h> #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index f22ac1edbdd0..49da387d7749 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -338,6 +338,7 @@ static const struct of_device_id pruss_of_match[] = { { .compatible = "ti,k2g-pruss" }, { .compatible = "ti,am654-icssg", .data = &am65x_j721e_pruss_data, }, { .compatible = "ti,j721e-icssg", .data = &am65x_j721e_pruss_data, }, + { .compatible = "ti,am642-icssg", .data = &am65x_j721e_pruss_data, }, {}, }; MODULE_DEVICE_TABLE(of, pruss_of_match); diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c index 06cbee5fd254..b5b2fa538d5c 100644 --- a/drivers/soc/ti/smartreflex.c +++ b/drivers/soc/ti/smartreflex.c @@ -126,23 +126,13 @@ static irqreturn_t sr_interrupt(int irq, void *data) static void sr_set_clk_length(struct omap_sr *sr) { - struct clk *fck; u32 fclk_speed; /* Try interconnect target module fck first if it already exists */ - fck = clk_get(sr->pdev->dev.parent, "fck"); - if (IS_ERR(fck)) { - fck = clk_get(&sr->pdev->dev, "fck"); - if (IS_ERR(fck)) { - dev_err(&sr->pdev->dev, - "%s: unable to get fck for device %s\n", - __func__, dev_name(&sr->pdev->dev)); - return; - } - } + if (IS_ERR(sr->fck)) + return; - fclk_speed = clk_get_rate(fck); - clk_put(fck); + fclk_speed = clk_get_rate(sr->fck); switch (fclk_speed) { case 12000000: @@ -587,21 +577,25 @@ int sr_enable(struct omap_sr *sr, unsigned long volt) /* errminlimit is opp dependent and hence linked to voltage */ sr->err_minlimit = nvalue_row->errminlimit; - pm_runtime_get_sync(&sr->pdev->dev); + clk_enable(sr->fck); /* Check if SR is already enabled. If yes do nothing */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) - return 0; + goto out_enabled; /* Configure SR */ ret = sr_class->configure(sr); if (ret) - return ret; + goto out_enabled; sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue); /* SRCONFIG - enable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE); + +out_enabled: + sr->enabled = 1; + return 0; } @@ -621,7 +615,7 @@ void sr_disable(struct omap_sr *sr) } /* Check if SR clocks are already disabled. If yes do nothing */ - if (pm_runtime_suspended(&sr->pdev->dev)) + if (!sr->enabled) return; /* @@ -642,7 +636,8 @@ void sr_disable(struct omap_sr *sr) } } - pm_runtime_put_sync_suspend(&sr->pdev->dev); + clk_disable(sr->fck); + sr->enabled = 0; } /** @@ -851,8 +846,12 @@ static int omap_sr_probe(struct platform_device *pdev) irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + sr_info->fck = devm_clk_get(pdev->dev.parent, "fck"); + if (IS_ERR(sr_info->fck)) + return PTR_ERR(sr_info->fck); + clk_prepare(sr_info->fck); + pm_runtime_enable(&pdev->dev); - pm_runtime_irq_safe(&pdev->dev); snprintf(sr_info->name, SMARTREFLEX_NAME_LEN, "%s", pdata->name); @@ -878,12 +877,6 @@ static int omap_sr_probe(struct platform_device *pdev) list_add(&sr_info->node, &sr_list); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); - goto err_list_del; - } - /* * Call into late init to do initializations that require * both sr driver and sr class driver to be initiallized. @@ -933,16 +926,13 @@ static int omap_sr_probe(struct platform_device *pdev) } - pm_runtime_put_sync(&pdev->dev); - return ret; err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_list_del: list_del(&sr_info->node); - - pm_runtime_put_sync(&pdev->dev); + clk_unprepare(sr_info->fck); return ret; } @@ -950,6 +940,7 @@ err_list_del: static int omap_sr_remove(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; + struct device *dev = &pdev->dev; struct omap_sr *sr_info; if (!pdata) { @@ -968,7 +959,8 @@ static int omap_sr_remove(struct platform_device *pdev) sr_stop_vddautocomp(sr_info); debugfs_remove_recursive(sr_info->dbg_dir); - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); + clk_unprepare(sr_info->fck); list_del(&sr_info->node); return 0; } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 8d8df51c5466..b2dd0a4d2446 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -77,6 +77,11 @@ struct spi_imx_devtype_data { bool has_slavemode; unsigned int fifo_size; bool dynamic_burst; + /* + * ERR009165 fixed or not: + * https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf + */ + bool tx_glitch_fixed; enum spi_imx_devtype devtype; }; @@ -622,8 +627,14 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk); spi_imx->spi_bus_clk = clk; - if (spi_imx->usedma) + /* + * ERR009165: work in XHC mode instead of SMC as PIO on the chips + * before i.mx6ul. + */ + if (spi_imx->usedma && spi_imx->devtype_data->tx_glitch_fixed) ctrl |= MX51_ECSPI_CTRL_SMC; + else + ctrl &= ~MX51_ECSPI_CTRL_SMC; writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); @@ -632,12 +643,16 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, static void mx51_setup_wml(struct spi_imx_data *spi_imx) { + u32 tx_wml = 0; + + if (spi_imx->devtype_data->tx_glitch_fixed) + tx_wml = spi_imx->wml; /* * Configure the DMA register: setup the watermark * and enable DMA request. */ writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) | - MX51_ECSPI_DMA_TX_WML(spi_imx->wml) | + MX51_ECSPI_DMA_TX_WML(tx_wml) | MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) | MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN | MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA); @@ -1028,6 +1043,23 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { .devtype = IMX53_ECSPI, }; +static struct spi_imx_devtype_data imx6ul_ecspi_devtype_data = { + .intctrl = mx51_ecspi_intctrl, + .prepare_message = mx51_ecspi_prepare_message, + .prepare_transfer = mx51_ecspi_prepare_transfer, + .trigger = mx51_ecspi_trigger, + .rx_available = mx51_ecspi_rx_available, + .reset = mx51_ecspi_reset, + .setup_wml = mx51_setup_wml, + .fifo_size = 64, + .has_dmamode = true, + .dynamic_burst = true, + .has_slavemode = true, + .tx_glitch_fixed = true, + .disable = mx51_ecspi_disable, + .devtype = IMX51_ECSPI, +}; + static const struct of_device_id spi_imx_dt_ids[] = { { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, }, { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, }, @@ -1036,6 +1068,7 @@ static const struct of_device_id spi_imx_dt_ids[] = { { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, }, { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, }, { .compatible = "fsl,imx53-ecspi", .data = &imx53_ecspi_devtype_data, }, + { .compatible = "fsl,imx6ul-ecspi", .data = &imx6ul_ecspi_devtype_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, spi_imx_dt_ids); @@ -1249,10 +1282,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, { int ret; - /* use pio mode for i.mx6dl chip TKT238285 */ - if (of_machine_is_compatible("fsl,imx6dl")) - return 0; - spi_imx->wml = spi_imx->devtype_data->fifo_size / 2; /* Prepare for TX DMA: */ diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 546dfc1e2349..0bc7046ab942 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -487,6 +487,7 @@ config FTWDT010_WATCHDOG config IXP4XX_WATCHDOG tristate "IXP4xx Watchdog" depends on ARCH_IXP4XX + select WATCHDOG_CORE help Say Y here if to include support for the watchdog timer in the Intel IXP4xx network processors. This driver can diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c index aae29dcfaf11..2693ffb24ac7 100644 --- a/drivers/watchdog/ixp4xx_wdt.c +++ b/drivers/watchdog/ixp4xx_wdt.c @@ -1,220 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * drivers/char/watchdog/ixp4xx_wdt.c * * Watchdog driver for Intel IXP4xx network processors * * Author: Deepak Saxena <dsaxena@plexity.net> + * Author: Linus Walleij <linus.walleij@linaro.org> * * Copyright 2004 (c) MontaVista, Software, Inc. * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include <linux/module.h> -#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/miscdevice.h> -#include <linux/of.h> #include <linux/watchdog.h> -#include <linux/init.h> -#include <linux/bitops.h> -#include <linux/uaccess.h> -#include <mach/hardware.h> +#include <linux/bits.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/soc/ixp4xx/cpu.h> + +struct ixp4xx_wdt { + struct watchdog_device wdd; + void __iomem *base; + unsigned long rate; +}; -static bool nowayout = WATCHDOG_NOWAYOUT; -static int heartbeat = 60; /* (secs) Default is 1 minute */ -static unsigned long wdt_status; -static unsigned long boot_status; -static DEFINE_SPINLOCK(wdt_lock); +/* Fallback if we do not have a clock for this */ +#define IXP4XX_TIMER_FREQ 66666000 -#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL) +/* Registers after the timer registers */ +#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */ +#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */ +#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */ +#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */ -#define WDT_IN_USE 0 -#define WDT_OK_TO_CLOSE 1 +#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008 +#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010 +#define IXP4XX_WDT_KEY 0x0000482E +#define IXP4XX_WDT_RESET_ENABLE 0x00000001 +#define IXP4XX_WDT_IRQ_ENABLE 0x00000002 +#define IXP4XX_WDT_COUNT_ENABLE 0x00000004 -static void wdt_enable(void) +static inline +struct ixp4xx_wdt *to_ixp4xx_wdt(struct watchdog_device *wdd) { - spin_lock(&wdt_lock); - *IXP4XX_OSWK = IXP4XX_WDT_KEY; - *IXP4XX_OSWE = 0; - *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat; - *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE; - *IXP4XX_OSWK = 0; - spin_unlock(&wdt_lock); + return container_of(wdd, struct ixp4xx_wdt, wdd); } -static void wdt_disable(void) +static int ixp4xx_wdt_start(struct watchdog_device *wdd) { - spin_lock(&wdt_lock); - *IXP4XX_OSWK = IXP4XX_WDT_KEY; - *IXP4XX_OSWE = 0; - *IXP4XX_OSWK = 0; - spin_unlock(&wdt_lock); -} + struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd); -static int ixp4xx_wdt_open(struct inode *inode, struct file *file) -{ - if (test_and_set_bit(WDT_IN_USE, &wdt_status)) - return -EBUSY; + __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET); + __raw_writel(0, iwdt->base + IXP4XX_OSWE_OFFSET); + __raw_writel(wdd->timeout * iwdt->rate, + iwdt->base + IXP4XX_OSWT_OFFSET); + __raw_writel(IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE, + iwdt->base + IXP4XX_OSWE_OFFSET); + __raw_writel(0, iwdt->base + IXP4XX_OSWK_OFFSET); - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - wdt_enable(); - return stream_open(inode, file); + return 0; } -static ssize_t -ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) +static int ixp4xx_wdt_stop(struct watchdog_device *wdd) { - if (len) { - if (!nowayout) { - size_t i; - - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - - for (i = 0; i != len; i++) { - char c; - - if (get_user(c, data + i)) - return -EFAULT; - if (c == 'V') - set_bit(WDT_OK_TO_CLOSE, &wdt_status); - } - } - wdt_enable(); - } - return len; -} + struct ixp4xx_wdt *iwdt = to_ixp4xx_wdt(wdd); -static const struct watchdog_info ident = { - .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | - WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, - .identity = "IXP4xx Watchdog", -}; + __raw_writel(IXP4XX_WDT_KEY, iwdt->base + IXP4XX_OSWK_OFFSET); + __raw_writel(0, iwdt->base + IXP4XX_OSWE_OFFSET); + __raw_writel(0, iwdt->base + IXP4XX_OSWK_OFFSET); - -static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - int ret = -ENOTTY; - int time; - - switch (cmd) { - case WDIOC_GETSUPPORT: - ret = copy_to_user((struct watchdog_info *)arg, &ident, - sizeof(ident)) ? -EFAULT : 0; - break; - - case WDIOC_GETSTATUS: - ret = put_user(0, (int *)arg); - break; - - case WDIOC_GETBOOTSTATUS: - ret = put_user(boot_status, (int *)arg); - break; - - case WDIOC_KEEPALIVE: - wdt_enable(); - ret = 0; - break; - - case WDIOC_SETTIMEOUT: - ret = get_user(time, (int *)arg); - if (ret) - break; - - if (time <= 0 || time > 60) { - ret = -EINVAL; - break; - } - - heartbeat = time; - wdt_enable(); - fallthrough; - - case WDIOC_GETTIMEOUT: - ret = put_user(heartbeat, (int *)arg); - break; - } - return ret; + return 0; } -static int ixp4xx_wdt_release(struct inode *inode, struct file *file) +static int ixp4xx_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) { - if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) - wdt_disable(); - else - pr_crit("Device closed unexpectedly - timer will not stop\n"); - clear_bit(WDT_IN_USE, &wdt_status); - clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + wdd->timeout = timeout; + if (watchdog_active(wdd)) + ixp4xx_wdt_start(wdd); return 0; } - -static const struct file_operations ixp4xx_wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = ixp4xx_wdt_write, - .unlocked_ioctl = ixp4xx_wdt_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .open = ixp4xx_wdt_open, - .release = ixp4xx_wdt_release, +static const struct watchdog_ops ixp4xx_wdt_ops = { + .start = ixp4xx_wdt_start, + .stop = ixp4xx_wdt_stop, + .set_timeout = ixp4xx_wdt_set_timeout, + .owner = THIS_MODULE, }; -static struct miscdevice ixp4xx_wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &ixp4xx_wdt_fops, +static const struct watchdog_info ixp4xx_wdt_info = { + .options = WDIOF_KEEPALIVEPING + | WDIOF_MAGICCLOSE + | WDIOF_SETTIMEOUT, + .identity = KBUILD_MODNAME, }; -static int __init ixp4xx_wdt_init(void) +/* Devres-handled clock disablement */ +static void ixp4xx_clock_action(void *d) +{ + clk_disable_unprepare(d); +} + +static int ixp4xx_wdt_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; + struct ixp4xx_wdt *iwdt; + struct clk *clk; int ret; - /* - * FIXME: we bail out on device tree boot but this really needs - * to be fixed in a nicer way: this registers the MDIO bus before - * even matching the driver infrastructure, we should only probe - * detected hardware. - */ - if (of_have_populated_dt()) - return -ENODEV; if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) { - pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n"); - + dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n"); return -ENODEV; } - boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ? - WDIOF_CARDRESET : 0; - ret = misc_register(&ixp4xx_wdt_miscdev); - if (ret == 0) - pr_info("timer heartbeat %d sec\n", heartbeat); - return ret; -} -static void __exit ixp4xx_wdt_exit(void) -{ - misc_deregister(&ixp4xx_wdt_miscdev); -} + iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL); + if (!iwdt) + return -ENOMEM; + iwdt->base = dev->platform_data; + /* + * Retrieve rate from a fixed clock from the device tree if + * the parent has that, else use the default clock rate. + */ + clk = devm_clk_get(dev->parent, NULL); + if (!IS_ERR(clk)) { + ret = clk_prepare_enable(clk); + if (ret) + return ret; + ret = devm_add_action_or_reset(dev, ixp4xx_clock_action, clk); + if (ret) + return ret; + iwdt->rate = clk_get_rate(clk); + } + if (!iwdt->rate) + iwdt->rate = IXP4XX_TIMER_FREQ; -module_init(ixp4xx_wdt_init); -module_exit(ixp4xx_wdt_exit); + iwdt->wdd.info = &ixp4xx_wdt_info; + iwdt->wdd.ops = &ixp4xx_wdt_ops; + iwdt->wdd.min_timeout = 1; + iwdt->wdd.max_timeout = U32_MAX / iwdt->rate; + iwdt->wdd.parent = dev; + /* Default to 60 seconds */ + iwdt->wdd.timeout = 60U; + watchdog_init_timeout(&iwdt->wdd, 0, dev); -MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); -MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog"); + if (__raw_readl(iwdt->base + IXP4XX_OSST_OFFSET) & + IXP4XX_OSST_TIMER_WARM_RESET) + iwdt->wdd.bootstatus = WDIOF_CARDRESET; + + ret = devm_watchdog_register_device(dev, &iwdt->wdd); + if (ret) + return ret; + + dev_info(dev, "IXP4xx watchdog available\n"); -module_param(heartbeat, int, 0); -MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)"); + return 0; +} -module_param(nowayout, bool, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); +static struct platform_driver ixp4xx_wdt_driver = { + .probe = ixp4xx_wdt_probe, + .driver = { + .name = "ixp4xx-watchdog", + }, +}; +module_platform_driver(ixp4xx_wdt_driver); +MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); +MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog"); MODULE_LICENSE("GPL"); |