diff options
Diffstat (limited to 'drivers/soc')
37 files changed, 2007 insertions, 1734 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index f09023f7ab11..309643fe35f9 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -1,7 +1,9 @@ menu "SOC (System On Chip) specific Drivers" +source "drivers/soc/atmel/Kconfig" source "drivers/soc/bcm/Kconfig" source "drivers/soc/fsl/Kconfig" +source "drivers/soc/imx/Kconfig" source "drivers/soc/mediatek/Kconfig" source "drivers/soc/qcom/Kconfig" source "drivers/soc/rockchip/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 05eae52a30b4..824b44281efa 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -2,10 +2,12 @@ # Makefile for the Linux Kernel SOC specific device drivers. # +obj-$(CONFIG_ARCH_AT91) += atmel/ obj-y += bcm/ obj-$(CONFIG_ARCH_DOVE) += dove/ obj-$(CONFIG_MACH_DOVE) += dove/ obj-y += fsl/ +obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-$(CONFIG_ARCH_RENESAS) += renesas/ diff --git a/drivers/soc/atmel/Kconfig b/drivers/soc/atmel/Kconfig new file mode 100644 index 000000000000..6242ebb41abb --- /dev/null +++ b/drivers/soc/atmel/Kconfig @@ -0,0 +1,6 @@ +config AT91_SOC_ID + bool "SoC bus for Atmel ARM SoCs" + depends on ARCH_AT91 || COMPILE_TEST + default ARCH_AT91 + help + Include support for the SoC bus on the Atmel ARM SoCs. diff --git a/drivers/soc/atmel/Makefile b/drivers/soc/atmel/Makefile new file mode 100644 index 000000000000..2d92f32e4ea5 --- /dev/null +++ b/drivers/soc/atmel/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_AT91_SOC_ID) += soc.o diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c new file mode 100644 index 000000000000..4790094b498e --- /dev/null +++ b/drivers/soc/atmel/soc.c @@ -0,0 +1,231 @@ +/* + * Copyright (C) 2015 Atmel + * + * Alexandre Belloni <alexandre.belloni@free-electrons.com + * Boris Brezillon <boris.brezillon@free-electrons.com + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#define pr_fmt(fmt) "AT91: " fmt + +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/slab.h> +#include <linux/sys_soc.h> + +#include "soc.h" + +#define AT91_DBGU_CIDR 0x40 +#define AT91_DBGU_EXID 0x44 +#define AT91_CHIPID_CIDR 0x00 +#define AT91_CHIPID_EXID 0x04 +#define AT91_CIDR_VERSION(x) ((x) & 0x1f) +#define AT91_CIDR_EXT BIT(31) +#define AT91_CIDR_MATCH_MASK 0x7fffffe0 + +static const struct at91_soc __initconst socs[] = { +#ifdef CONFIG_SOC_AT91RM9200 + AT91_SOC(AT91RM9200_CIDR_MATCH, 0, "at91rm9200 BGA", "at91rm9200"), +#endif +#ifdef CONFIG_SOC_AT91SAM9 + AT91_SOC(AT91SAM9260_CIDR_MATCH, 0, "at91sam9260", NULL), + AT91_SOC(AT91SAM9261_CIDR_MATCH, 0, "at91sam9261", NULL), + AT91_SOC(AT91SAM9263_CIDR_MATCH, 0, "at91sam9263", NULL), + AT91_SOC(AT91SAM9G20_CIDR_MATCH, 0, "at91sam9g20", NULL), + AT91_SOC(AT91SAM9RL64_CIDR_MATCH, 0, "at91sam9rl64", NULL), + AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M11_EXID_MATCH, + "at91sam9m11", "at91sam9g45"), + AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9M10_EXID_MATCH, + "at91sam9m10", "at91sam9g45"), + AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G46_EXID_MATCH, + "at91sam9g46", "at91sam9g45"), + AT91_SOC(AT91SAM9G45_CIDR_MATCH, AT91SAM9G45_EXID_MATCH, + "at91sam9g45", "at91sam9g45"), + AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G15_EXID_MATCH, + "at91sam9g15", "at91sam9x5"), + AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G35_EXID_MATCH, + "at91sam9g35", "at91sam9x5"), + AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X35_EXID_MATCH, + "at91sam9x35", "at91sam9x5"), + AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9G25_EXID_MATCH, + "at91sam9g25", "at91sam9x5"), + AT91_SOC(AT91SAM9X5_CIDR_MATCH, AT91SAM9X25_EXID_MATCH, + "at91sam9x25", "at91sam9x5"), + AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN12_EXID_MATCH, + "at91sam9cn12", "at91sam9n12"), + AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9N12_EXID_MATCH, + "at91sam9n12", "at91sam9n12"), + AT91_SOC(AT91SAM9N12_CIDR_MATCH, AT91SAM9CN11_EXID_MATCH, + "at91sam9cn11", "at91sam9n12"), + AT91_SOC(AT91SAM9XE128_CIDR_MATCH, 0, "at91sam9xe128", "at91sam9xe128"), + AT91_SOC(AT91SAM9XE256_CIDR_MATCH, 0, "at91sam9xe256", "at91sam9xe256"), + AT91_SOC(AT91SAM9XE512_CIDR_MATCH, 0, "at91sam9xe512", "at91sam9xe512"), +#endif +#ifdef CONFIG_SOC_SAMA5 + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH, + "sama5d21", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D22CU_EXID_MATCH, + "sama5d22", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D23CU_EXID_MATCH, + "sama5d23", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CX_EXID_MATCH, + "sama5d24", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D24CU_EXID_MATCH, + "sama5d24", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D26CU_EXID_MATCH, + "sama5d26", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CU_EXID_MATCH, + "sama5d27", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27CN_EXID_MATCH, + "sama5d27", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CU_EXID_MATCH, + "sama5d28", "sama5d2"), + AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D28CN_EXID_MATCH, + "sama5d28", "sama5d2"), + AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH, + "sama5d31", "sama5d3"), + AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH, + "sama5d33", "sama5d3"), + AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D34_EXID_MATCH, + "sama5d34", "sama5d3"), + AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D35_EXID_MATCH, + "sama5d35", "sama5d3"), + AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D36_EXID_MATCH, + "sama5d36", "sama5d3"), + AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D41_EXID_MATCH, + "sama5d41", "sama5d4"), + AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D42_EXID_MATCH, + "sama5d42", "sama5d4"), + AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D43_EXID_MATCH, + "sama5d43", "sama5d4"), + AT91_SOC(SAMA5D4_CIDR_MATCH, SAMA5D44_EXID_MATCH, + "sama5d44", "sama5d4"), +#endif + { /* sentinel */ }, +}; + +static int __init at91_get_cidr_exid_from_dbgu(u32 *cidr, u32 *exid) +{ + struct device_node *np; + void __iomem *regs; + + np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-dbgu"); + if (!np) + np = of_find_compatible_node(NULL, NULL, + "atmel,at91sam9260-dbgu"); + if (!np) + return -ENODEV; + + regs = of_iomap(np, 0); + of_node_put(np); + + if (!regs) { + pr_warn("Could not map DBGU iomem range"); + return -ENXIO; + } + + *cidr = readl(regs + AT91_DBGU_CIDR); + *exid = readl(regs + AT91_DBGU_EXID); + + iounmap(regs); + + return 0; +} + +static int __init at91_get_cidr_exid_from_chipid(u32 *cidr, u32 *exid) +{ + struct device_node *np; + void __iomem *regs; + + np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-chipid"); + if (!np) + return -ENODEV; + + regs = of_iomap(np, 0); + of_node_put(np); + + if (!regs) { + pr_warn("Could not map DBGU iomem range"); + return -ENXIO; + } + + *cidr = readl(regs + AT91_CHIPID_CIDR); + *exid = readl(regs + AT91_CHIPID_EXID); + + iounmap(regs); + + return 0; +} + +struct soc_device * __init at91_soc_init(const struct at91_soc *socs) +{ + struct soc_device_attribute *soc_dev_attr; + const struct at91_soc *soc; + struct soc_device *soc_dev; + u32 cidr, exid; + int ret; + + /* + * With SAMA5D2 and later SoCs, CIDR and EXID registers are no more + * in the dbgu device but in the chipid device whose purpose is only + * to expose these two registers. + */ + ret = at91_get_cidr_exid_from_dbgu(&cidr, &exid); + if (ret) + ret = at91_get_cidr_exid_from_chipid(&cidr, &exid); + if (ret) { + if (ret == -ENODEV) + pr_warn("Could not find identification node"); + return NULL; + } + + for (soc = socs; soc->name; soc++) { + if (soc->cidr_match != (cidr & AT91_CIDR_MATCH_MASK)) + continue; + + if (!(cidr & AT91_CIDR_EXT) || soc->exid_match == exid) + break; + } + + if (!soc->name) { + pr_warn("Could not find matching SoC description\n"); + return NULL; + } + + soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + if (!soc_dev_attr) + return NULL; + + soc_dev_attr->family = soc->family; + soc_dev_attr->soc_id = soc->name; + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%X", + AT91_CIDR_VERSION(cidr)); + soc_dev = soc_device_register(soc_dev_attr); + if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr->revision); + kfree(soc_dev_attr); + pr_warn("Could not register SoC device\n"); + return NULL; + } + + if (soc->family) + pr_info("Detected SoC family: %s\n", soc->family); + pr_info("Detected SoC: %s, revision %X\n", soc->name, + AT91_CIDR_VERSION(cidr)); + + return soc_dev; +} + +static int __init atmel_soc_device_init(void) +{ + at91_soc_init(socs); + + return 0; +} +subsys_initcall(atmel_soc_device_init); diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h new file mode 100644 index 000000000000..228efded5085 --- /dev/null +++ b/drivers/soc/atmel/soc.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2015 Atmel + * + * Boris Brezillon <boris.brezillon@free-electrons.com + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + */ + +#ifndef __AT91_SOC_H +#define __AT91_SOC_H + +#include <linux/sys_soc.h> + +struct at91_soc { + u32 cidr_match; + u32 exid_match; + const char *name; + const char *family; +}; + +#define AT91_SOC(__cidr, __exid, __name, __family) \ + { \ + .cidr_match = (__cidr), \ + .exid_match = (__exid), \ + .name = (__name), \ + .family = (__family), \ + } + +struct soc_device * __init +at91_soc_init(const struct at91_soc *socs); + +#define AT91RM9200_CIDR_MATCH 0x09290780 + +#define AT91SAM9260_CIDR_MATCH 0x019803a0 +#define AT91SAM9261_CIDR_MATCH 0x019703a0 +#define AT91SAM9263_CIDR_MATCH 0x019607a0 +#define AT91SAM9G20_CIDR_MATCH 0x019905a0 +#define AT91SAM9RL64_CIDR_MATCH 0x019b03a0 +#define AT91SAM9G45_CIDR_MATCH 0x019b05a0 +#define AT91SAM9X5_CIDR_MATCH 0x019a05a0 +#define AT91SAM9N12_CIDR_MATCH 0x019a07a0 + +#define AT91SAM9M11_EXID_MATCH 0x00000001 +#define AT91SAM9M10_EXID_MATCH 0x00000002 +#define AT91SAM9G46_EXID_MATCH 0x00000003 +#define AT91SAM9G45_EXID_MATCH 0x00000004 + +#define AT91SAM9G15_EXID_MATCH 0x00000000 +#define AT91SAM9G35_EXID_MATCH 0x00000001 +#define AT91SAM9X35_EXID_MATCH 0x00000002 +#define AT91SAM9G25_EXID_MATCH 0x00000003 +#define AT91SAM9X25_EXID_MATCH 0x00000004 + +#define AT91SAM9CN12_EXID_MATCH 0x00000005 +#define AT91SAM9N12_EXID_MATCH 0x00000006 +#define AT91SAM9CN11_EXID_MATCH 0x00000009 + +#define AT91SAM9XE128_CIDR_MATCH 0x329973a0 +#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0 +#define AT91SAM9XE512_CIDR_MATCH 0x329aa3a0 + +#define SAMA5D2_CIDR_MATCH 0x0a5c08c0 +#define SAMA5D21CU_EXID_MATCH 0x0000005a +#define SAMA5D22CU_EXID_MATCH 0x00000059 +#define SAMA5D22CN_EXID_MATCH 0x00000069 +#define SAMA5D23CU_EXID_MATCH 0x00000058 +#define SAMA5D24CX_EXID_MATCH 0x00000004 +#define SAMA5D24CU_EXID_MATCH 0x00000014 +#define SAMA5D26CU_EXID_MATCH 0x00000012 +#define SAMA5D27CU_EXID_MATCH 0x00000011 +#define SAMA5D27CN_EXID_MATCH 0x00000021 +#define SAMA5D28CU_EXID_MATCH 0x00000010 +#define SAMA5D28CN_EXID_MATCH 0x00000020 + +#define SAMA5D3_CIDR_MATCH 0x0a5c07c0 +#define SAMA5D31_EXID_MATCH 0x00444300 +#define SAMA5D33_EXID_MATCH 0x00414300 +#define SAMA5D34_EXID_MATCH 0x00414301 +#define SAMA5D35_EXID_MATCH 0x00584300 +#define SAMA5D36_EXID_MATCH 0x00004301 + +#define SAMA5D4_CIDR_MATCH 0x0a5c07c0 +#define SAMA5D41_EXID_MATCH 0x00000001 +#define SAMA5D42_EXID_MATCH 0x00000002 +#define SAMA5D43_EXID_MATCH 0x00000003 +#define SAMA5D44_EXID_MATCH 0x00000004 + +#endif /* __AT91_SOC_H */ diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c index 94e7335553f4..b6195fdf0d00 100644 --- a/drivers/soc/bcm/brcmstb/common.c +++ b/drivers/soc/bcm/brcmstb/common.c @@ -41,6 +41,15 @@ bool soc_is_brcmstb(void) } static const struct of_device_id sun_top_ctrl_match[] = { + { .compatible = "brcm,bcm7125-sun-top-ctrl", }, + { .compatible = "brcm,bcm7346-sun-top-ctrl", }, + { .compatible = "brcm,bcm7358-sun-top-ctrl", }, + { .compatible = "brcm,bcm7360-sun-top-ctrl", }, + { .compatible = "brcm,bcm7362-sun-top-ctrl", }, + { .compatible = "brcm,bcm7420-sun-top-ctrl", }, + { .compatible = "brcm,bcm7425-sun-top-ctrl", }, + { .compatible = "brcm,bcm7429-sun-top-ctrl", }, + { .compatible = "brcm,bcm7425-sun-top-ctrl", }, { .compatible = "brcm,brcmstb-sun-top-ctrl", }, { } }; diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 2827a65aa25a..18eefc3f1abe 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -2024,8 +2024,7 @@ out: return ret; } -static int qman_query_fq_np(struct qman_fq *fq, - struct qm_mcr_queryfq_np *np) +int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) { union qm_mc_command *mcc; union qm_mc_result *mcr; @@ -2051,6 +2050,7 @@ out: put_affine_portal(); return ret; } +EXPORT_SYMBOL(qman_query_fq_np); static int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd) diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c index f4e6e70de259..90bc40c48675 100644 --- a/drivers/soc/fsl/qbman/qman_ccsr.c +++ b/drivers/soc/fsl/qbman/qman_ccsr.c @@ -34,6 +34,8 @@ u16 qman_ip_rev; EXPORT_SYMBOL(qman_ip_rev); u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1; EXPORT_SYMBOL(qm_channel_pool1); +u16 qm_channel_caam = QMAN_CHANNEL_CAAM; +EXPORT_SYMBOL(qm_channel_caam); /* Register offsets */ #define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10)) @@ -720,8 +722,10 @@ static int fsl_qman_probe(struct platform_device *pdev) return -ENODEV; } - if ((qman_ip_rev & 0xff00) >= QMAN_REV30) + if ((qman_ip_rev & 0xff00) >= QMAN_REV30) { qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3; + qm_channel_caam = QMAN_CHANNEL_CAAM_REV3; + } ret = zero_priv_mem(dev, node, fqd_a, fqd_sz); WARN_ON(ret); diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h index 53685b59718e..5fe9faf6232e 100644 --- a/drivers/soc/fsl/qbman/qman_priv.h +++ b/drivers/soc/fsl/qbman/qman_priv.h @@ -33,6 +33,7 @@ #include "dpaa_sys.h" #include <soc/fsl/qman.h> +#include <linux/dma-mapping.h> #include <linux/iommu.h> #if defined(CONFIG_FSL_PAMU) @@ -89,67 +90,6 @@ static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); } -/* "Query FQ Non-Programmable Fields" */ - -struct qm_mcr_queryfq_np { - u8 verb; - u8 result; - u8 __reserved1; - u8 state; /* QM_MCR_NP_STATE_*** */ - u32 fqd_link; /* 24-bit, _res2[24-31] */ - u16 odp_seq; /* 14-bit, _res3[14-15] */ - u16 orp_nesn; /* 14-bit, _res4[14-15] */ - u16 orp_ea_hseq; /* 15-bit, _res5[15] */ - u16 orp_ea_tseq; /* 15-bit, _res6[15] */ - u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */ - u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */ - u32 pfdr_hptr; /* 24-bit, _res9[24-31] */ - u32 pfdr_tptr; /* 24-bit, _res10[24-31] */ - u8 __reserved2[5]; - u8 is; /* 1-bit, _res12[1-7] */ - u16 ics_surp; - u32 byte_cnt; - u32 frm_cnt; /* 24-bit, _res13[24-31] */ - u32 __reserved3; - u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */ - u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */ - u16 __reserved4; - u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */ - u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ - u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ -} __packed; - -#define QM_MCR_NP_STATE_FE 0x10 -#define QM_MCR_NP_STATE_R 0x08 -#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */ -#define QM_MCR_NP_STATE_OOS 0x00 -#define QM_MCR_NP_STATE_RETIRED 0x01 -#define QM_MCR_NP_STATE_TEN_SCHED 0x02 -#define QM_MCR_NP_STATE_TRU_SCHED 0x03 -#define QM_MCR_NP_STATE_PARKED 0x04 -#define QM_MCR_NP_STATE_ACTIVE 0x05 -#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */ -#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */ -#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */ -#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */ -#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */ - -enum qm_mcr_queryfq_np_masks { - qm_mcr_fqd_link_mask = BIT(24)-1, - qm_mcr_odp_seq_mask = BIT(14)-1, - qm_mcr_orp_nesn_mask = BIT(14)-1, - qm_mcr_orp_ea_hseq_mask = BIT(15)-1, - qm_mcr_orp_ea_tseq_mask = BIT(15)-1, - qm_mcr_orp_ea_hptr_mask = BIT(24)-1, - qm_mcr_orp_ea_tptr_mask = BIT(24)-1, - qm_mcr_pfdr_hptr_mask = BIT(24)-1, - qm_mcr_pfdr_tptr_mask = BIT(24)-1, - qm_mcr_is_mask = BIT(1)-1, - qm_mcr_frm_cnt_mask = BIT(24)-1, -}; -#define qm_mcr_np_get(np, field) \ - ((np)->field & (qm_mcr_##field##_mask)) - /* Congestion Groups */ /* @@ -271,42 +211,6 @@ const struct qm_portal_config *qman_destroy_affine_portal(void); */ int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); -/* - * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use - * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use - * FQID(n) to fill in the frame queue ID. - */ -#define QM_VDQCR_PRECEDENCE_VDQCR 0x0 -#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000 -#define QM_VDQCR_EXACT 0x40000000 -#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000 -#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24) -#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f) -#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0) - -#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */ -#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */ -#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */ - -/* - * qman_volatile_dequeue - Issue a volatile dequeue command - * @fq: the frame queue object to dequeue from - * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options - * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set() - * - * Attempts to lock access to the portal's VDQCR volatile dequeue functionality. - * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and - * the VDQCR is already in use, otherwise returns non-zero for failure. If - * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once - * the VDQCR command has finished executing (ie. once the callback for the last - * DQRR entry resulting from the VDQCR command has been called). If not using - * the FINISH flag, completion can be determined either by detecting the - * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits - * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting - * for the QMAN_FQ_STATE_VDQCR bit to disappear. - */ -int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); - int qman_alloc_fq_table(u32 num_fqids); /* QMan s/w corenet portal, low-level i/face */ diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig new file mode 100644 index 000000000000..357a5d8f8da0 --- /dev/null +++ b/drivers/soc/imx/Kconfig @@ -0,0 +1,9 @@ +menu "i.MX SoC drivers" + +config IMX7_PM_DOMAINS + bool "i.MX7 PM domains" + select PM_GENERIC_DOMAINS + depends on SOC_IMX7D || (COMPILE_TEST && OF) + default y if SOC_IMX7D + +endmenu diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile new file mode 100644 index 000000000000..5b6e396c1121 --- /dev/null +++ b/drivers/soc/imx/Makefile @@ -0,0 +1,2 @@ +obj-y += gpc.o +obj-$(CONFIG_IMX7_PM_DOMAINS) += gpcv2.o diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c new file mode 100644 index 000000000000..47e7aa963dbb --- /dev/null +++ b/drivers/soc/imx/gpc.c @@ -0,0 +1,489 @@ +/* + * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de> + * Copyright 2011-2013 Freescale Semiconductor, Inc. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#define GPC_CNTR 0x000 + +#define GPC_PGC_CTRL_OFFS 0x0 +#define GPC_PGC_PUPSCR_OFFS 0x4 +#define GPC_PGC_PDNSCR_OFFS 0x8 +#define GPC_PGC_SW2ISO_SHIFT 0x8 +#define GPC_PGC_SW_SHIFT 0x0 + +#define GPC_PGC_GPU_PDN 0x260 +#define GPC_PGC_GPU_PUPSCR 0x264 +#define GPC_PGC_GPU_PDNSCR 0x268 + +#define GPU_VPU_PUP_REQ BIT(1) +#define GPU_VPU_PDN_REQ BIT(0) + +#define GPC_CLK_MAX 6 + +#define PGC_DOMAIN_FLAG_NO_PD BIT(0) + +struct imx_pm_domain { + struct generic_pm_domain base; + struct regmap *regmap; + struct regulator *supply; + struct clk *clk[GPC_CLK_MAX]; + int num_clks; + unsigned int reg_offs; + signed char cntr_pdn_bit; + unsigned int ipg_rate_mhz; + unsigned int flags; +}; + +static inline struct imx_pm_domain * +to_imx_pm_domain(struct generic_pm_domain *genpd) +{ + return container_of(genpd, struct imx_pm_domain, base); +} + +static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd) +{ + struct imx_pm_domain *pd = to_imx_pm_domain(genpd); + int iso, iso2sw; + u32 val; + + if (pd->flags & PGC_DOMAIN_FLAG_NO_PD) + return -EBUSY; + + /* Read ISO and ISO2SW power down delays */ + regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); + iso = val & 0x3f; + iso2sw = (val >> 8) & 0x3f; + + /* Gate off domain when powered down */ + regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, + 0x1, 0x1); + + /* Request GPC to power down domain */ + val = BIT(pd->cntr_pdn_bit); + regmap_update_bits(pd->regmap, GPC_CNTR, val, val); + + /* Wait ISO + ISO2SW IPG clock cycles */ + udelay(DIV_ROUND_UP(iso + iso2sw, pd->ipg_rate_mhz)); + + if (pd->supply) + regulator_disable(pd->supply); + + return 0; +} + +static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd) +{ + struct imx_pm_domain *pd = to_imx_pm_domain(genpd); + int i, ret, sw, sw2iso; + u32 val; + + if (pd->supply) { + ret = regulator_enable(pd->supply); + if (ret) { + pr_err("%s: failed to enable regulator: %d\n", + __func__, ret); + return ret; + } + } + + /* Enable reset clocks for all devices in the domain */ + for (i = 0; i < pd->num_clks; i++) + clk_prepare_enable(pd->clk[i]); + + /* Gate off domain when powered down */ + regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS, + 0x1, 0x1); + + /* Read ISO and ISO2SW power up delays */ + regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val); + sw = val & 0x3f; + sw2iso = (val >> 8) & 0x3f; + + /* Request GPC to power up domain */ + val = BIT(pd->cntr_pdn_bit + 1); + regmap_update_bits(pd->regmap, GPC_CNTR, val, val); + + /* Wait ISO + ISO2SW IPG clock cycles */ + udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz)); + + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < pd->num_clks; i++) + clk_disable_unprepare(pd->clk[i]); + + return 0; +} + +static int imx_pgc_get_clocks(struct device *dev, struct imx_pm_domain *domain) +{ + int i, ret; + + for (i = 0; ; i++) { + struct clk *clk = of_clk_get(dev->of_node, i); + if (IS_ERR(clk)) + break; + if (i >= GPC_CLK_MAX) { + dev_err(dev, "more than %d clocks\n", GPC_CLK_MAX); + ret = -EINVAL; + goto clk_err; + } + domain->clk[i] = clk; + } + domain->num_clks = i; + + return 0; + +clk_err: + while (i--) + clk_put(domain->clk[i]); + + return ret; +} + +static void imx_pgc_put_clocks(struct imx_pm_domain *domain) +{ + int i; + + for (i = domain->num_clks - 1; i >= 0; i--) + clk_put(domain->clk[i]); +} + +static int imx_pgc_parse_dt(struct device *dev, struct imx_pm_domain *domain) +{ + /* try to get the domain supply regulator */ + domain->supply = devm_regulator_get_optional(dev, "power"); + if (IS_ERR(domain->supply)) { + if (PTR_ERR(domain->supply) == -ENODEV) + domain->supply = NULL; + else + return PTR_ERR(domain->supply); + } + + /* try to get all clocks needed for reset propagation */ + return imx_pgc_get_clocks(dev, domain); +} + +static int imx_pgc_power_domain_probe(struct platform_device *pdev) +{ + struct imx_pm_domain *domain = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + int ret; + + /* if this PD is associated with a DT node try to parse it */ + if (dev->of_node) { + ret = imx_pgc_parse_dt(dev, domain); + if (ret) + return ret; + } + + /* initially power on the domain */ + if (domain->base.power_on) + domain->base.power_on(&domain->base); + + if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + pm_genpd_init(&domain->base, NULL, false); + ret = of_genpd_add_provider_simple(dev->of_node, &domain->base); + if (ret) + goto genpd_err; + } + + device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE); + + return 0; + +genpd_err: + pm_genpd_remove(&domain->base); + imx_pgc_put_clocks(domain); + + return ret; +} + +static int imx_pgc_power_domain_remove(struct platform_device *pdev) +{ + struct imx_pm_domain *domain = pdev->dev.platform_data; + + if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + of_genpd_del_provider(pdev->dev.of_node); + pm_genpd_remove(&domain->base); + imx_pgc_put_clocks(domain); + } + + return 0; +} + +static const struct platform_device_id imx_pgc_power_domain_id[] = { + { "imx-pgc-power-domain"}, + { }, +}; + +static struct platform_driver imx_pgc_power_domain_driver = { + .driver = { + .name = "imx-pgc-pd", + }, + .probe = imx_pgc_power_domain_probe, + .remove = imx_pgc_power_domain_remove, + .id_table = imx_pgc_power_domain_id, +}; +builtin_platform_driver(imx_pgc_power_domain_driver) + +#define GPC_PGC_DOMAIN_ARM 0 +#define GPC_PGC_DOMAIN_PU 1 +#define GPC_PGC_DOMAIN_DISPLAY 2 + +static struct genpd_power_state imx6_pm_domain_pu_state = { + .power_off_latency_ns = 25000, + .power_on_latency_ns = 2000000, +}; + +static struct imx_pm_domain imx_gpc_domains[] = { + { + .base = { + .name = "ARM", + }, + }, { + .base = { + .name = "PU", + .power_off = imx6_pm_domain_power_off, + .power_on = imx6_pm_domain_power_on, + .states = &imx6_pm_domain_pu_state, + .state_count = 1, + }, + .reg_offs = 0x260, + .cntr_pdn_bit = 0, + }, { + .base = { + .name = "DISPLAY", + .power_off = imx6_pm_domain_power_off, + .power_on = imx6_pm_domain_power_on, + }, + .reg_offs = 0x240, + .cntr_pdn_bit = 4, + } +}; + +struct imx_gpc_dt_data { + int num_domains; + bool err009619_present; +}; + +static const struct imx_gpc_dt_data imx6q_dt_data = { + .num_domains = 2, + .err009619_present = false, +}; + +static const struct imx_gpc_dt_data imx6qp_dt_data = { + .num_domains = 2, + .err009619_present = true, +}; + +static const struct imx_gpc_dt_data imx6sl_dt_data = { + .num_domains = 3, + .err009619_present = false, +}; + +static const struct of_device_id imx_gpc_dt_ids[] = { + { .compatible = "fsl,imx6q-gpc", .data = &imx6q_dt_data }, + { .compatible = "fsl,imx6qp-gpc", .data = &imx6qp_dt_data }, + { .compatible = "fsl,imx6sl-gpc", .data = &imx6sl_dt_data }, + { } +}; + +static const struct regmap_config imx_gpc_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = 0x2ac, +}; + +static struct generic_pm_domain *imx_gpc_onecell_domains[] = { + &imx_gpc_domains[0].base, + &imx_gpc_domains[1].base, +}; + +static struct genpd_onecell_data imx_gpc_onecell_data = { + .domains = imx_gpc_onecell_domains, + .num_domains = 2, +}; + +static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap, + unsigned int num_domains) +{ + struct imx_pm_domain *domain; + int i, ret; + + for (i = 0; i < num_domains; i++) { + domain = &imx_gpc_domains[i]; + domain->regmap = regmap; + domain->ipg_rate_mhz = 66; + + if (i == 1) { + domain->supply = devm_regulator_get(dev, "pu"); + if (IS_ERR(domain->supply)) + return PTR_ERR(domain->supply);; + + ret = imx_pgc_get_clocks(dev, domain); + if (ret) + goto clk_err; + + domain->base.power_on(&domain->base); + } + } + + for (i = 0; i < num_domains; i++) + pm_genpd_init(&imx_gpc_domains[i].base, NULL, false); + + if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS)) { + ret = of_genpd_add_provider_onecell(dev->of_node, + &imx_gpc_onecell_data); + if (ret) + goto genpd_err; + } + + return 0; + +genpd_err: + for (i = 0; i < num_domains; i++) + pm_genpd_remove(&imx_gpc_domains[i].base); + imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]); +clk_err: + return ret; +} + +static int imx_gpc_probe(struct platform_device *pdev) +{ + const struct of_device_id *of_id = + of_match_device(imx_gpc_dt_ids, &pdev->dev); + const struct imx_gpc_dt_data *of_id_data = of_id->data; + struct device_node *pgc_node; + struct regmap *regmap; + struct resource *res; + void __iomem *base; + int ret; + + pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); + + /* bail out if DT too old and doesn't provide the necessary info */ + if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && + !pgc_node) + return 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base, + &imx_gpc_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&pdev->dev, "failed to init regmap: %d\n", + ret); + return ret; + } + + /* Disable PU power down in normal operation if ERR009619 is present */ + if (of_id_data->err009619_present) + imx_gpc_domains[GPC_PGC_DOMAIN_PU].flags |= + PGC_DOMAIN_FLAG_NO_PD; + + if (!pgc_node) { + ret = imx_gpc_old_dt_init(&pdev->dev, regmap, + of_id_data->num_domains); + if (ret) + return ret; + } else { + struct imx_pm_domain *domain; + struct platform_device *pd_pdev; + struct device_node *np; + struct clk *ipg_clk; + unsigned int ipg_rate_mhz; + int domain_index; + + ipg_clk = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(ipg_clk)) + return PTR_ERR(ipg_clk); + ipg_rate_mhz = clk_get_rate(ipg_clk) / 1000000; + + for_each_child_of_node(pgc_node, np) { + ret = of_property_read_u32(np, "reg", &domain_index); + if (ret) { + of_node_put(np); + return ret; + } + if (domain_index >= of_id_data->num_domains) + continue; + + domain = &imx_gpc_domains[domain_index]; + domain->regmap = regmap; + domain->ipg_rate_mhz = ipg_rate_mhz; + + pd_pdev = platform_device_alloc("imx-pgc-power-domain", + domain_index); + if (!pd_pdev) { + of_node_put(np); + return -ENOMEM; + } + pd_pdev->dev.platform_data = domain; + pd_pdev->dev.parent = &pdev->dev; + pd_pdev->dev.of_node = np; + + ret = platform_device_add(pd_pdev); + if (ret) { + platform_device_put(pd_pdev); + of_node_put(np); + return ret; + } + } + } + + return 0; +} + +static int imx_gpc_remove(struct platform_device *pdev) +{ + int ret; + + /* + * If the old DT binding is used the toplevel driver needs to + * de-register the power domains + */ + if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { + of_genpd_del_provider(pdev->dev.of_node); + + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); + if (ret) + return ret; + imx_pgc_put_clocks(&imx_gpc_domains[GPC_PGC_DOMAIN_PU]); + + ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_ARM].base); + if (ret) + return ret; + } + + return 0; +} + +static struct platform_driver imx_gpc_driver = { + .driver = { + .name = "imx-gpc", + .of_match_table = imx_gpc_dt_ids, + }, + .probe = imx_gpc_probe, + .remove = imx_gpc_remove, +}; +builtin_platform_driver(imx_gpc_driver) diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c new file mode 100644 index 000000000000..3039072911a5 --- /dev/null +++ b/drivers/soc/imx/gpcv2.c @@ -0,0 +1,363 @@ +/* + * Copyright 2017 Impinj, Inc + * Author: Andrey Smirnov <andrew.smirnov@gmail.com> + * + * Based on the code of analogus driver: + * + * Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de> + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <dt-bindings/power/imx7-power.h> + +#define GPC_LPCR_A7_BSC 0x000 + +#define GPC_PGC_CPU_MAPPING 0x0ec +#define USB_HSIC_PHY_A7_DOMAIN BIT(6) +#define USB_OTG2_PHY_A7_DOMAIN BIT(5) +#define USB_OTG1_PHY_A7_DOMAIN BIT(4) +#define PCIE_PHY_A7_DOMAIN BIT(3) +#define MIPI_PHY_A7_DOMAIN BIT(2) + +#define GPC_PU_PGC_SW_PUP_REQ 0x0f8 +#define GPC_PU_PGC_SW_PDN_REQ 0x104 +#define USB_HSIC_PHY_SW_Pxx_REQ BIT(4) +#define USB_OTG2_PHY_SW_Pxx_REQ BIT(3) +#define USB_OTG1_PHY_SW_Pxx_REQ BIT(2) +#define PCIE_PHY_SW_Pxx_REQ BIT(1) +#define MIPI_PHY_SW_Pxx_REQ BIT(0) + +#define GPC_M4_PU_PDN_FLG 0x1bc + + +#define PGC_MIPI 4 +#define PGC_PCIE 5 +#define PGC_USB_HSIC 8 +#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40) +#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc) + +#define GPC_PGC_CTRL_PCR BIT(0) + +struct imx7_pgc_domain { + struct generic_pm_domain genpd; + struct regmap *regmap; + struct regulator *regulator; + + unsigned int pgc; + + const struct { + u32 pxx; + u32 map; + } bits; + + const int voltage; + struct device *dev; +}; + +static int imx7_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, + bool on) +{ + struct imx7_pgc_domain *domain = container_of(genpd, + struct imx7_pgc_domain, + genpd); + unsigned int offset = on ? + GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ; + const bool enable_power_control = !on; + const bool has_regulator = !IS_ERR(domain->regulator); + unsigned long deadline; + int ret = 0; + + regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING, + domain->bits.map, domain->bits.map); + + if (has_regulator && on) { + ret = regulator_enable(domain->regulator); + if (ret) { + dev_err(domain->dev, "failed to enable regulator\n"); + goto unmap; + } + } + + if (enable_power_control) + regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), + GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR); + + regmap_update_bits(domain->regmap, offset, + domain->bits.pxx, domain->bits.pxx); + + /* + * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait + * for PUP_REQ/PDN_REQ bit to be cleared + */ + deadline = jiffies + msecs_to_jiffies(1); + while (true) { + u32 pxx_req; + + regmap_read(domain->regmap, offset, &pxx_req); + + if (!(pxx_req & domain->bits.pxx)) + break; + + if (time_after(jiffies, deadline)) { + dev_err(domain->dev, "falied to command PGC\n"); + ret = -ETIMEDOUT; + /* + * If we were in a process of enabling a + * domain and failed we might as well disable + * the regulator we just enabled. And if it + * was the opposite situation and we failed to + * power down -- keep the regulator on + */ + on = !on; + break; + } + + cpu_relax(); + } + + if (enable_power_control) + regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), + GPC_PGC_CTRL_PCR, 0); + + if (has_regulator && !on) { + int err; + + err = regulator_disable(domain->regulator); + if (err) + dev_err(domain->dev, + "failed to disable regulator: %d\n", ret); + /* Preserve earlier error code */ + ret = ret ?: err; + } +unmap: + regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING, + domain->bits.map, 0); + return ret; +} + +static int imx7_gpc_pu_pgc_sw_pup_req(struct generic_pm_domain *genpd) +{ + return imx7_gpc_pu_pgc_sw_pxx_req(genpd, true); +} + +static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd) +{ + return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false); +} + +static struct imx7_pgc_domain imx7_pgc_domains[] = { + [IMX7_POWER_DOMAIN_MIPI_PHY] = { + .genpd = { + .name = "mipi-phy", + }, + .bits = { + .pxx = MIPI_PHY_SW_Pxx_REQ, + .map = MIPI_PHY_A7_DOMAIN, + }, + .voltage = 1000000, + .pgc = PGC_MIPI, + }, + + [IMX7_POWER_DOMAIN_PCIE_PHY] = { + .genpd = { + .name = "pcie-phy", + }, + .bits = { + .pxx = PCIE_PHY_SW_Pxx_REQ, + .map = PCIE_PHY_A7_DOMAIN, + }, + .voltage = 1000000, + .pgc = PGC_PCIE, + }, + + [IMX7_POWER_DOMAIN_USB_HSIC_PHY] = { + .genpd = { + .name = "usb-hsic-phy", + }, + .bits = { + .pxx = USB_HSIC_PHY_SW_Pxx_REQ, + .map = USB_HSIC_PHY_A7_DOMAIN, + }, + .voltage = 1200000, + .pgc = PGC_USB_HSIC, + }, +}; + +static int imx7_pgc_domain_probe(struct platform_device *pdev) +{ + struct imx7_pgc_domain *domain = pdev->dev.platform_data; + int ret; + + domain->dev = &pdev->dev; + + ret = pm_genpd_init(&domain->genpd, NULL, true); + if (ret) { + dev_err(domain->dev, "Failed to init power domain\n"); + return ret; + } + + domain->regulator = devm_regulator_get_optional(domain->dev, "power"); + if (IS_ERR(domain->regulator)) { + if (PTR_ERR(domain->regulator) != -ENODEV) { + dev_err(domain->dev, "Failed to get domain's regulator\n"); + return PTR_ERR(domain->regulator); + } + } else { + regulator_set_voltage(domain->regulator, + domain->voltage, domain->voltage); + } + + ret = of_genpd_add_provider_simple(domain->dev->of_node, + &domain->genpd); + if (ret) { + dev_err(domain->dev, "Failed to add genpd provider\n"); + pm_genpd_remove(&domain->genpd); + } + + return ret; +} + +static int imx7_pgc_domain_remove(struct platform_device *pdev) +{ + struct imx7_pgc_domain *domain = pdev->dev.platform_data; + + of_genpd_del_provider(domain->dev->of_node); + pm_genpd_remove(&domain->genpd); + + return 0; +} + +static const struct platform_device_id imx7_pgc_domain_id[] = { + { "imx7-pgc-domain", }, + { }, +}; + +static struct platform_driver imx7_pgc_domain_driver = { + .driver = { + .name = "imx7-pgc", + }, + .probe = imx7_pgc_domain_probe, + .remove = imx7_pgc_domain_remove, + .id_table = imx7_pgc_domain_id, +}; +builtin_platform_driver(imx7_pgc_domain_driver) + +static int imx_gpcv2_probe(struct platform_device *pdev) +{ + static const struct regmap_range yes_ranges[] = { + regmap_reg_range(GPC_LPCR_A7_BSC, + GPC_M4_PU_PDN_FLG), + regmap_reg_range(GPC_PGC_CTRL(PGC_MIPI), + GPC_PGC_SR(PGC_MIPI)), + regmap_reg_range(GPC_PGC_CTRL(PGC_PCIE), + GPC_PGC_SR(PGC_PCIE)), + regmap_reg_range(GPC_PGC_CTRL(PGC_USB_HSIC), + GPC_PGC_SR(PGC_USB_HSIC)), + }; + static const struct regmap_access_table access_table = { + .yes_ranges = yes_ranges, + .n_yes_ranges = ARRAY_SIZE(yes_ranges), + }; + static const struct regmap_config regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .rd_table = &access_table, + .wr_table = &access_table, + .max_register = SZ_4K, + }; + struct device *dev = &pdev->dev; + struct device_node *pgc_np, *np; + struct regmap *regmap; + struct resource *res; + void __iomem *base; + int ret; + + pgc_np = of_get_child_by_name(dev->of_node, "pgc"); + if (!pgc_np) { + dev_err(dev, "No power domains specified in DT\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(dev, base, ®map_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(dev, "failed to init regmap (%d)\n", ret); + return ret; + } + + for_each_child_of_node(pgc_np, np) { + struct platform_device *pd_pdev; + struct imx7_pgc_domain *domain; + u32 domain_index; + + ret = of_property_read_u32(np, "reg", &domain_index); + if (ret) { + dev_err(dev, "Failed to read 'reg' property\n"); + of_node_put(np); + return ret; + } + + if (domain_index >= ARRAY_SIZE(imx7_pgc_domains)) { + dev_warn(dev, + "Domain index %d is out of bounds\n", + domain_index); + continue; + } + + domain = &imx7_pgc_domains[domain_index]; + domain->regmap = regmap; + domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; + domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; + + pd_pdev = platform_device_alloc("imx7-pgc-domain", + domain_index); + if (!pd_pdev) { + dev_err(dev, "Failed to allocate platform device\n"); + of_node_put(np); + return -ENOMEM; + } + + pd_pdev->dev.platform_data = domain; + pd_pdev->dev.parent = dev; + pd_pdev->dev.of_node = np; + + ret = platform_device_add(pd_pdev); + if (ret) { + platform_device_put(pd_pdev); + of_node_put(np); + return ret; + } + } + + return 0; +} + +static const struct of_device_id imx_gpcv2_dt_ids[] = { + { .compatible = "fsl,imx7d-gpc" }, + { } +}; + +static struct platform_driver imx_gpc_driver = { + .driver = { + .name = "imx-gpcv2", + .of_match_table = imx_gpcv2_dt_ids, + }, + .probe = imx_gpcv2_probe, +}; +builtin_platform_driver(imx_gpc_driver) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 78b1bb7bcf20..9fca977ef18d 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -33,17 +33,10 @@ config QCOM_SMEM The driver provides an interface to items in a heap shared among all processors in a Qualcomm platform. -config QCOM_SMD - tristate "Qualcomm Shared Memory Driver (SMD)" - depends on QCOM_SMEM - help - Say y here to enable support for the Qualcomm Shared Memory Driver - providing communication channels to remote processors in Qualcomm - platforms. - config QCOM_SMD_RPM tristate "Qualcomm Resource Power Manager (RPM) over SMD" - depends on QCOM_SMD && OF + depends on ARCH_QCOM + depends on RPMSG && OF help If you say yes to this option, support will be included for the Resource Power Manager system found in the Qualcomm 8974 based @@ -76,7 +69,8 @@ config QCOM_SMSM config QCOM_WCNSS_CTRL tristate "Qualcomm WCNSS control driver" - depends on QCOM_SMD + depends on ARCH_QCOM + depends on RPMSG help Client driver for the WCNSS_CTRL SMD channel, used to download nv firmware to a newly booted WCNSS chip. diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 1f30260b06b8..414f0de274fa 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o obj-$(CONFIG_QCOM_PM) += spm.o -obj-$(CONFIG_QCOM_SMD) += smd.o obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o obj-$(CONFIG_QCOM_SMEM) += smem.o obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 6609d7e0edb0..c2346752b3ea 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -19,7 +19,7 @@ #include <linux/interrupt.h> #include <linux/slab.h> -#include <linux/soc/qcom/smd.h> +#include <linux/rpmsg.h> #include <linux/soc/qcom/smd-rpm.h> #define RPM_REQUEST_TIMEOUT (5 * HZ) @@ -32,7 +32,7 @@ * @ack_status: result of the rpm request */ struct qcom_smd_rpm { - struct qcom_smd_channel *rpm_channel; + struct rpmsg_endpoint *rpm_channel; struct device *dev; struct completion ack; @@ -133,7 +133,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, pkt->req.data_len = cpu_to_le32(count); memcpy(pkt->payload, buf, count); - ret = qcom_smd_send(rpm->rpm_channel, pkt, size); + ret = rpmsg_send(rpm->rpm_channel, pkt, size); if (ret) goto out; @@ -150,14 +150,16 @@ out: } EXPORT_SYMBOL(qcom_rpm_smd_write); -static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { const struct qcom_rpm_header *hdr = data; size_t hdr_length = le32_to_cpu(hdr->length); const struct qcom_rpm_message *msg; - struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel); + struct qcom_smd_rpm *rpm = dev_get_drvdata(&rpdev->dev); const u8 *buf = data + sizeof(struct qcom_rpm_header); const u8 *end = buf + hdr_length; char msgbuf[32]; @@ -196,59 +198,57 @@ static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel, return 0; } -static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) +static int qcom_smd_rpm_probe(struct rpmsg_device *rpdev) { struct qcom_smd_rpm *rpm; - rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL); + rpm = devm_kzalloc(&rpdev->dev, sizeof(*rpm), GFP_KERNEL); if (!rpm) return -ENOMEM; mutex_init(&rpm->lock); init_completion(&rpm->ack); - rpm->dev = &sdev->dev; - rpm->rpm_channel = sdev->channel; - qcom_smd_set_drvdata(sdev->channel, rpm); + rpm->dev = &rpdev->dev; + rpm->rpm_channel = rpdev->ept; + dev_set_drvdata(&rpdev->dev, rpm); - dev_set_drvdata(&sdev->dev, rpm); - - return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev); + return of_platform_populate(rpdev->dev.of_node, NULL, NULL, &rpdev->dev); } -static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev) +static void qcom_smd_rpm_remove(struct rpmsg_device *rpdev) { - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-apq8084" }, { .compatible = "qcom,rpm-msm8916" }, { .compatible = "qcom,rpm-msm8974" }, + { .compatible = "qcom,rpm-msm8996" }, {} }; MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); -static struct qcom_smd_driver qcom_smd_rpm_driver = { +static struct rpmsg_driver qcom_smd_rpm_driver = { .probe = qcom_smd_rpm_probe, .remove = qcom_smd_rpm_remove, .callback = qcom_smd_rpm_callback, - .driver = { + .drv = { .name = "qcom_smd_rpm", - .owner = THIS_MODULE, .of_match_table = qcom_smd_rpm_of_match, }, }; static int __init qcom_smd_rpm_init(void) { - return qcom_smd_driver_register(&qcom_smd_rpm_driver); + return register_rpmsg_driver(&qcom_smd_rpm_driver); } arch_initcall(qcom_smd_rpm_init); static void __exit qcom_smd_rpm_exit(void) { - qcom_smd_driver_unregister(&qcom_smd_rpm_driver); + unregister_rpmsg_driver(&qcom_smd_rpm_driver); } module_exit(qcom_smd_rpm_exit); diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c deleted file mode 100644 index 322034ab9d37..000000000000 --- a/drivers/soc/qcom/smd.c +++ /dev/null @@ -1,1560 +0,0 @@ -/* - * Copyright (c) 2015, Sony Mobile Communications AB. - * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/mfd/syscon.h> -#include <linux/module.h> -#include <linux/of_irq.h> -#include <linux/of_platform.h> -#include <linux/platform_device.h> -#include <linux/regmap.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/soc/qcom/smd.h> -#include <linux/soc/qcom/smem.h> -#include <linux/wait.h> - -/* - * The Qualcomm Shared Memory communication solution provides point-to-point - * channels for clients to send and receive streaming or packet based data. - * - * Each channel consists of a control item (channel info) and a ring buffer - * pair. The channel info carry information related to channel state, flow - * control and the offsets within the ring buffer. - * - * All allocated channels are listed in an allocation table, identifying the - * pair of items by name, type and remote processor. - * - * Upon creating a new channel the remote processor allocates channel info and - * ring buffer items from the smem heap and populate the allocation table. An - * interrupt is sent to the other end of the channel and a scan for new - * channels should be done. A channel never goes away, it will only change - * state. - * - * The remote processor signals it intent for bring up the communication - * channel by setting the state of its end of the channel to "opening" and - * sends out an interrupt. We detect this change and register a smd device to - * consume the channel. Upon finding a consumer we finish the handshake and the - * channel is up. - * - * Upon closing a channel, the remote processor will update the state of its - * end of the channel and signal us, we will then unregister any attached - * device and close our end of the channel. - * - * Devices attached to a channel can use the qcom_smd_send function to push - * data to the channel, this is done by copying the data into the tx ring - * buffer, updating the pointers in the channel info and signaling the remote - * processor. - * - * The remote processor does the equivalent when it transfer data and upon - * receiving the interrupt we check the channel info for new data and delivers - * this to the attached device. If the device is not ready to receive the data - * we leave it in the ring buffer for now. - */ - -struct smd_channel_info; -struct smd_channel_info_pair; -struct smd_channel_info_word; -struct smd_channel_info_word_pair; - -#define SMD_ALLOC_TBL_COUNT 2 -#define SMD_ALLOC_TBL_SIZE 64 - -/* - * This lists the various smem heap items relevant for the allocation table and - * smd channel entries. - */ -static const struct { - unsigned alloc_tbl_id; - unsigned info_base_id; - unsigned fifo_base_id; -} smem_items[SMD_ALLOC_TBL_COUNT] = { - { - .alloc_tbl_id = 13, - .info_base_id = 14, - .fifo_base_id = 338 - }, - { - .alloc_tbl_id = 266, - .info_base_id = 138, - .fifo_base_id = 202, - }, -}; - -/** - * struct qcom_smd_edge - representing a remote processor - * @dev: device for this edge - * @of_node: of_node handle for information related to this edge - * @edge_id: identifier of this edge - * @remote_pid: identifier of remote processor - * @irq: interrupt for signals on this edge - * @ipc_regmap: regmap handle holding the outgoing ipc register - * @ipc_offset: offset within @ipc_regmap of the register for ipc - * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap - * @channels: list of all channels detected on this edge - * @channels_lock: guard for modifications of @channels - * @allocated: array of bitmaps representing already allocated channels - * @smem_available: last available amount of smem triggering a channel scan - * @scan_work: work item for discovering new channels - * @state_work: work item for edge state changes - */ -struct qcom_smd_edge { - struct device dev; - - struct device_node *of_node; - unsigned edge_id; - unsigned remote_pid; - - int irq; - - struct regmap *ipc_regmap; - int ipc_offset; - int ipc_bit; - - struct list_head channels; - spinlock_t channels_lock; - - DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); - - unsigned smem_available; - - wait_queue_head_t new_channel_event; - - struct work_struct scan_work; - struct work_struct state_work; -}; - -#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) - -/* - * SMD channel states. - */ -enum smd_channel_state { - SMD_CHANNEL_CLOSED, - SMD_CHANNEL_OPENING, - SMD_CHANNEL_OPENED, - SMD_CHANNEL_FLUSHING, - SMD_CHANNEL_CLOSING, - SMD_CHANNEL_RESET, - SMD_CHANNEL_RESET_OPENING -}; - -/** - * struct qcom_smd_channel - smd channel struct - * @edge: qcom_smd_edge this channel is living on - * @qsdev: reference to a associated smd client device - * @name: name of the channel - * @state: local state of the channel - * @remote_state: remote state of the channel - * @info: byte aligned outgoing/incoming channel info - * @info_word: word aligned outgoing/incoming channel info - * @tx_lock: lock to make writes to the channel mutually exclusive - * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR - * @tx_fifo: pointer to the outgoing ring buffer - * @rx_fifo: pointer to the incoming ring buffer - * @fifo_size: size of each ring buffer - * @bounce_buffer: bounce buffer for reading wrapped packets - * @cb: callback function registered for this channel - * @recv_lock: guard for rx info modifications and cb pointer - * @pkt_size: size of the currently handled packet - * @list: lite entry for @channels in qcom_smd_edge - */ -struct qcom_smd_channel { - struct qcom_smd_edge *edge; - - struct qcom_smd_device *qsdev; - - char *name; - enum smd_channel_state state; - enum smd_channel_state remote_state; - - struct smd_channel_info_pair *info; - struct smd_channel_info_word_pair *info_word; - - struct mutex tx_lock; - wait_queue_head_t fblockread_event; - - void *tx_fifo; - void *rx_fifo; - int fifo_size; - - void *bounce_buffer; - qcom_smd_cb_t cb; - - spinlock_t recv_lock; - - int pkt_size; - - void *drvdata; - - struct list_head list; -}; - -/* - * Format of the smd_info smem items, for byte aligned channels. - */ -struct smd_channel_info { - __le32 state; - u8 fDSR; - u8 fCTS; - u8 fCD; - u8 fRI; - u8 fHEAD; - u8 fTAIL; - u8 fSTATE; - u8 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_pair { - struct smd_channel_info tx; - struct smd_channel_info rx; -}; - -/* - * Format of the smd_info smem items, for word aligned channels. - */ -struct smd_channel_info_word { - __le32 state; - __le32 fDSR; - __le32 fCTS; - __le32 fCD; - __le32 fRI; - __le32 fHEAD; - __le32 fTAIL; - __le32 fSTATE; - __le32 fBLOCKREADINTR; - __le32 tail; - __le32 head; -}; - -struct smd_channel_info_word_pair { - struct smd_channel_info_word tx; - struct smd_channel_info_word rx; -}; - -#define GET_RX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->rx.param) : \ - channel->info->rx.param; \ - }) - -#define GET_RX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->rx.param : \ - channel->info->rx.param); \ - }) - -#define SET_RX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = value; \ - }) - -#define SET_RX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->rx.param = cpu_to_le32(value); \ - else \ - channel->info->rx.param = cpu_to_le32(value); \ - }) - -#define GET_TX_CHANNEL_FLAG(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - channel->info_word ? \ - le32_to_cpu(channel->info_word->tx.param) : \ - channel->info->tx.param; \ - }) - -#define GET_TX_CHANNEL_INFO(channel, param) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - le32_to_cpu(channel->info_word ? \ - channel->info_word->tx.param : \ - channel->info->tx.param); \ - }) - -#define SET_TX_CHANNEL_FLAG(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = value; \ - }) - -#define SET_TX_CHANNEL_INFO(channel, param, value) \ - ({ \ - BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ - if (channel->info_word) \ - channel->info_word->tx.param = cpu_to_le32(value); \ - else \ - channel->info->tx.param = cpu_to_le32(value); \ - }) - -/** - * struct qcom_smd_alloc_entry - channel allocation entry - * @name: channel name - * @cid: channel index - * @flags: channel flags and edge id - * @ref_count: reference count of the channel - */ -struct qcom_smd_alloc_entry { - u8 name[20]; - __le32 cid; - __le32 flags; - __le32 ref_count; -} __packed; - -#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff -#define SMD_CHANNEL_FLAGS_STREAM BIT(8) -#define SMD_CHANNEL_FLAGS_PACKET BIT(9) - -/* - * Each smd packet contains a 20 byte header, with the first 4 being the length - * of the packet. - */ -#define SMD_PACKET_HEADER_LEN 20 - -/* - * Signal the remote processor associated with 'channel'. - */ -static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) -{ - struct qcom_smd_edge *edge = channel->edge; - - regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); -} - -/* - * Initialize the tx channel info - */ -static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) -{ - SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_TX_CHANNEL_FLAG(channel, fDSR, 0); - SET_TX_CHANNEL_FLAG(channel, fCTS, 0); - SET_TX_CHANNEL_FLAG(channel, fCD, 0); - SET_TX_CHANNEL_FLAG(channel, fRI, 0); - SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - SET_TX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - - qcom_smd_signal_channel(channel); - - channel->state = SMD_CHANNEL_CLOSED; - channel->pkt_size = 0; -} - -/* - * Set the callback for a channel, with appropriate locking - */ -static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - channel->cb = cb; - spin_unlock_irqrestore(&channel->recv_lock, flags); -}; - -/* - * Calculate the amount of data available in the rx fifo - */ -static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - - head = GET_RX_CHANNEL_INFO(channel, head); - tail = GET_RX_CHANNEL_INFO(channel, tail); - - return (head - tail) & (channel->fifo_size - 1); -} - -/* - * Set tx channel state and inform the remote processor - */ -static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, - int state) -{ - struct qcom_smd_edge *edge = channel->edge; - bool is_open = state == SMD_CHANNEL_OPENED; - - if (channel->state == state) - return; - - dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); - - SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); - SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); - SET_TX_CHANNEL_FLAG(channel, fCD, is_open); - - SET_TX_CHANNEL_INFO(channel, state, state); - SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); - - channel->state = state; - qcom_smd_signal_channel(channel); -} - -/* - * Copy count bytes of data using 32bit accesses, if that's required. - */ -static void smd_copy_to_fifo(void __iomem *dst, - const void *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __iowrite32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_toio(dst, src, count); - } -} - -/* - * Copy count bytes of data using 32bit accesses, if that is required. - */ -static void smd_copy_from_fifo(void *dst, - const void __iomem *src, - size_t count, - bool word_aligned) -{ - if (word_aligned) { - __ioread32_copy(dst, src, count / sizeof(u32)); - } else { - memcpy_fromio(dst, src, count); - } -} - -/* - * Read count bytes of data from the rx fifo into buf, but don't advance the - * tail. - */ -static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, - void *buf, size_t count) -{ - bool word_aligned; - unsigned tail; - size_t len; - - word_aligned = channel->info_word; - tail = GET_RX_CHANNEL_INFO(channel, tail); - - len = min_t(size_t, count, channel->fifo_size - tail); - if (len) { - smd_copy_from_fifo(buf, - channel->rx_fifo + tail, - len, - word_aligned); - } - - if (len != count) { - smd_copy_from_fifo(buf + len, - channel->rx_fifo, - count - len, - word_aligned); - } - - return count; -} - -/* - * Advance the rx tail by count bytes. - */ -static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, - size_t count) -{ - unsigned tail; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - tail += count; - tail &= (channel->fifo_size - 1); - SET_RX_CHANNEL_INFO(channel, tail, tail); -} - -/* - * Read out a single packet from the rx fifo and deliver it to the device - */ -static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) -{ - unsigned tail; - size_t len; - void *ptr; - int ret; - - if (!channel->cb) - return 0; - - tail = GET_RX_CHANNEL_INFO(channel, tail); - - /* Use bounce buffer if the data wraps */ - if (tail + channel->pkt_size >= channel->fifo_size) { - ptr = channel->bounce_buffer; - len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); - } else { - ptr = channel->rx_fifo + tail; - len = channel->pkt_size; - } - - ret = channel->cb(channel, ptr, len); - if (ret < 0) - return ret; - - /* Only forward the tail if the client consumed the data */ - qcom_smd_channel_advance(channel, len); - - channel->pkt_size = 0; - - return 0; -} - -/* - * Per channel interrupt handling - */ -static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) -{ - bool need_state_scan = false; - int remote_state; - __le32 pktlen; - int avail; - int ret; - - /* Handle state changes */ - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != channel->remote_state) { - channel->remote_state = remote_state; - need_state_scan = true; - } - /* Indicate that we have seen any state change */ - SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); - - /* Signal waiting qcom_smd_send() about the interrupt */ - if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) - wake_up_interruptible(&channel->fblockread_event); - - /* Don't consume any data until we've opened the channel */ - if (channel->state != SMD_CHANNEL_OPENED) - goto out; - - /* Indicate that we've seen the new data */ - SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); - - /* Consume data */ - for (;;) { - avail = qcom_smd_channel_get_rx_avail(channel); - - if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { - qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); - qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); - channel->pkt_size = le32_to_cpu(pktlen); - } else if (channel->pkt_size && avail >= channel->pkt_size) { - ret = qcom_smd_channel_recv_single(channel); - if (ret) - break; - } else { - break; - } - } - - /* Indicate that we have seen and updated tail */ - SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); - - /* Signal the remote that we've consumed the data (if requested) */ - if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - } - -out: - return need_state_scan; -} - -/* - * The edge interrupts are triggered by the remote processor on state changes, - * channel info updates or when new channels are created. - */ -static irqreturn_t qcom_smd_edge_intr(int irq, void *data) -{ - struct qcom_smd_edge *edge = data; - struct qcom_smd_channel *channel; - unsigned available; - bool kick_scanner = false; - bool kick_state = false; - - /* - * Handle state changes or data on each of the channels on this edge - */ - spin_lock(&edge->channels_lock); - list_for_each_entry(channel, &edge->channels, list) { - spin_lock(&channel->recv_lock); - kick_state |= qcom_smd_channel_intr(channel); - spin_unlock(&channel->recv_lock); - } - spin_unlock(&edge->channels_lock); - - /* - * Creating a new channel requires allocating an smem entry, so we only - * have to scan if the amount of available space in smem have changed - * since last scan. - */ - available = qcom_smem_get_free_space(edge->remote_pid); - if (available != edge->smem_available) { - edge->smem_available = available; - kick_scanner = true; - } - - if (kick_scanner) - schedule_work(&edge->scan_work); - if (kick_state) - schedule_work(&edge->state_work); - - return IRQ_HANDLED; -} - -/* - * Delivers any outstanding packets in the rx fifo, can be used after probe of - * the clients to deliver any packets that wasn't delivered before the client - * was setup. - */ -static void qcom_smd_channel_resume(struct qcom_smd_channel *channel) -{ - unsigned long flags; - - spin_lock_irqsave(&channel->recv_lock, flags); - qcom_smd_channel_intr(channel); - spin_unlock_irqrestore(&channel->recv_lock, flags); -} - -/* - * Calculate how much space is available in the tx fifo. - */ -static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) -{ - unsigned head; - unsigned tail; - unsigned mask = channel->fifo_size - 1; - - head = GET_TX_CHANNEL_INFO(channel, head); - tail = GET_TX_CHANNEL_INFO(channel, tail); - - return mask - ((head - tail) & mask); -} - -/* - * Write count bytes of data into channel, possibly wrapping in the ring buffer - */ -static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, - const void *data, - size_t count) -{ - bool word_aligned; - unsigned head; - size_t len; - - word_aligned = channel->info_word; - head = GET_TX_CHANNEL_INFO(channel, head); - - len = min_t(size_t, count, channel->fifo_size - head); - if (len) { - smd_copy_to_fifo(channel->tx_fifo + head, - data, - len, - word_aligned); - } - - if (len != count) { - smd_copy_to_fifo(channel->tx_fifo, - data + len, - count - len, - word_aligned); - } - - head += count; - head &= (channel->fifo_size - 1); - SET_TX_CHANNEL_INFO(channel, head, head); - - return count; -} - -/** - * qcom_smd_send - write data to smd channel - * @channel: channel handle - * @data: buffer of data to write - * @len: number of bytes to write - * - * This is a blocking write of len bytes into the channel's tx ring buffer and - * signal the remote end. It will sleep until there is enough space available - * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid - * polling. - */ -int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) -{ - __le32 hdr[5] = { cpu_to_le32(len), }; - int tlen = sizeof(hdr) + len; - int ret; - - /* Word aligned channels only accept word size aligned data */ - if (channel->info_word && len % 4) - return -EINVAL; - - /* Reject packets that are too big */ - if (tlen >= channel->fifo_size) - return -EINVAL; - - ret = mutex_lock_interruptible(&channel->tx_lock); - if (ret) - return ret; - - while (qcom_smd_get_tx_avail(channel) < tlen) { - if (channel->state != SMD_CHANNEL_OPENED) { - ret = -EPIPE; - goto out; - } - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); - - ret = wait_event_interruptible(channel->fblockread_event, - qcom_smd_get_tx_avail(channel) >= tlen || - channel->state != SMD_CHANNEL_OPENED); - if (ret) - goto out; - - SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); - } - - SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); - - qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); - qcom_smd_write_fifo(channel, data, len); - - SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); - - /* Ensure ordering of channel info updates */ - wmb(); - - qcom_smd_signal_channel(channel); - -out: - mutex_unlock(&channel->tx_lock); - - return ret; -} -EXPORT_SYMBOL(qcom_smd_send); - -static struct qcom_smd_device *to_smd_device(struct device *dev) -{ - return container_of(dev, struct qcom_smd_device, dev); -} - -static struct qcom_smd_driver *to_smd_driver(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver); -} - -static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); - const struct qcom_smd_id *match = qsdrv->smd_match_table; - const char *name = qsdev->channel->name; - - if (match) { - while (match->name[0]) { - if (!strcmp(match->name, name)) - return 1; - match++; - } - } - - return of_driver_match_device(dev, drv); -} - -/* - * Helper for opening a channel - */ -static int qcom_smd_channel_open(struct qcom_smd_channel *channel, - qcom_smd_cb_t cb) -{ - size_t bb_size; - - /* - * Packets are maximum 4k, but reduce if the fifo is smaller - */ - bb_size = min(channel->fifo_size, SZ_4K); - channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); - if (!channel->bounce_buffer) - return -ENOMEM; - - qcom_smd_channel_set_callback(channel, cb); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); - qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); - - return 0; -} - -/* - * Helper for closing and resetting a channel - */ -static void qcom_smd_channel_close(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_set_callback(channel, NULL); - - kfree(channel->bounce_buffer); - channel->bounce_buffer = NULL; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); - qcom_smd_channel_reset(channel); -} - -/* - * Probe the smd client. - * - * The remote side have indicated that it want the channel to be opened, so - * complete the state handshake and probe our client driver. - */ -static int qcom_smd_dev_probe(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - int ret; - - ret = qcom_smd_channel_open(channel, qsdrv->callback); - if (ret) - return ret; - - ret = qsdrv->probe(qsdev); - if (ret) - goto err; - - qcom_smd_channel_resume(channel); - - return 0; - -err: - dev_err(&qsdev->dev, "probe failed\n"); - - qcom_smd_channel_close(channel); - return ret; -} - -/* - * Remove the smd client. - * - * The channel is going away, for some reason, so remove the smd client and - * reset the channel state. - */ -static int qcom_smd_dev_remove(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - struct qcom_smd_driver *qsdrv = to_smd_driver(dev); - struct qcom_smd_channel *channel = qsdev->channel; - - qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); - - /* - * Make sure we don't race with the code receiving data. - */ - qcom_smd_channel_set_callback(channel, NULL); - - /* Wake up any sleepers in qcom_smd_send() */ - wake_up_interruptible(&channel->fblockread_event); - - /* - * We expect that the client might block in remove() waiting for any - * outstanding calls to qcom_smd_send() to wake up and finish. - */ - if (qsdrv->remove) - qsdrv->remove(qsdev); - - /* The client is now gone, close the primary channel */ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; - - return 0; -} - -static struct bus_type qcom_smd_bus = { - .name = "qcom_smd", - .match = qcom_smd_dev_match, - .probe = qcom_smd_dev_probe, - .remove = qcom_smd_dev_remove, -}; - -/* - * Release function for the qcom_smd_device object. - */ -static void qcom_smd_release_device(struct device *dev) -{ - struct qcom_smd_device *qsdev = to_smd_device(dev); - - kfree(qsdev); -} - -/* - * Finds the device_node for the smd child interested in this channel. - */ -static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, - const char *channel) -{ - struct device_node *child; - const char *name; - const char *key; - int ret; - - for_each_available_child_of_node(edge_node, child) { - key = "qcom,smd-channels"; - ret = of_property_read_string(child, key, &name); - if (ret) - continue; - - if (strcmp(name, channel) == 0) - return child; - } - - return NULL; -} - -/* - * Create a smd client device for channel that is being opened. - */ -static int qcom_smd_create_device(struct qcom_smd_channel *channel) -{ - struct qcom_smd_device *qsdev; - struct qcom_smd_edge *edge = channel->edge; - struct device_node *node; - int ret; - - if (channel->qsdev) - return -EEXIST; - - dev_dbg(&edge->dev, "registering '%s'\n", channel->name); - - qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); - if (!qsdev) - return -ENOMEM; - - node = qcom_smd_match_channel(edge->of_node, channel->name); - dev_set_name(&qsdev->dev, "%s.%s", - edge->of_node->name, - node ? node->name : channel->name); - - qsdev->dev.parent = &edge->dev; - qsdev->dev.bus = &qcom_smd_bus; - qsdev->dev.release = qcom_smd_release_device; - qsdev->dev.of_node = node; - - qsdev->channel = channel; - - channel->qsdev = qsdev; - - ret = device_register(&qsdev->dev); - if (ret) { - dev_err(&edge->dev, "device_register failed: %d\n", ret); - put_device(&qsdev->dev); - } - - return ret; -} - -/* - * Destroy a smd client device for a channel that's going away. - */ -static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) -{ - struct device *dev; - - BUG_ON(!channel->qsdev); - - dev = &channel->qsdev->dev; - - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); -} - -/** - * qcom_smd_driver_register - register a smd driver - * @qsdrv: qcom_smd_driver struct - */ -int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) -{ - qsdrv->driver.bus = &qcom_smd_bus; - return driver_register(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_register); - -void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel) -{ - return channel->drvdata; -} -EXPORT_SYMBOL(qcom_smd_get_drvdata); - -void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data) -{ - channel->drvdata = data; -} -EXPORT_SYMBOL(qcom_smd_set_drvdata); - -/** - * qcom_smd_driver_unregister - unregister a smd driver - * @qsdrv: qcom_smd_driver struct - */ -void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) -{ - driver_unregister(&qsdrv->driver); -} -EXPORT_SYMBOL(qcom_smd_driver_unregister); - -static struct qcom_smd_channel * -qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_channel *ret = NULL; - unsigned long flags; - unsigned state; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (strcmp(channel->name, name)) - continue; - - state = GET_RX_CHANNEL_INFO(channel, state); - if (state != SMD_CHANNEL_OPENING && - state != SMD_CHANNEL_OPENED) - continue; - - ret = channel; - break; - } - spin_unlock_irqrestore(&edge->channels_lock, flags); - - return ret; -} - -/** - * qcom_smd_open_channel() - claim additional channels on the same edge - * @sdev: smd_device handle - * @name: channel name - * @cb: callback method to use for incoming data - * - * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't - * ready. - * - * Any channels returned must be closed with a call to qcom_smd_close_channel() - */ -struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, - const char *name, - qcom_smd_cb_t cb) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_device *sdev = parent->qsdev; - struct qcom_smd_edge *edge = parent->edge; - int ret; - - /* Wait up to HZ for the channel to appear */ - ret = wait_event_interruptible_timeout(edge->new_channel_event, - (channel = qcom_smd_find_channel(edge, name)) != NULL, - HZ); - if (!ret) - return ERR_PTR(-ETIMEDOUT); - - if (channel->state != SMD_CHANNEL_CLOSED) { - dev_err(&sdev->dev, "channel %s is busy\n", channel->name); - return ERR_PTR(-EBUSY); - } - - channel->qsdev = sdev; - ret = qcom_smd_channel_open(channel, cb); - if (ret) { - channel->qsdev = NULL; - return ERR_PTR(ret); - } - - return channel; -} -EXPORT_SYMBOL(qcom_smd_open_channel); - -/** - * qcom_smd_close_channel() - close an additionally opened channel - * @channel: channel handle, returned by qcom_smd_open_channel() - */ -void qcom_smd_close_channel(struct qcom_smd_channel *channel) -{ - qcom_smd_channel_close(channel); - channel->qsdev = NULL; -} -EXPORT_SYMBOL(qcom_smd_close_channel); - -/* - * Allocate the qcom_smd_channel object for a newly found smd channel, - * retrieving and validating the smem items involved. - */ -static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, - unsigned smem_info_item, - unsigned smem_fifo_item, - char *name) -{ - struct qcom_smd_channel *channel; - size_t fifo_size; - size_t info_size; - void *fifo_base; - void *info; - int ret; - - channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); - if (!channel) - return ERR_PTR(-ENOMEM); - - channel->edge = edge; - channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); - if (!channel->name) - return ERR_PTR(-ENOMEM); - - mutex_init(&channel->tx_lock); - spin_lock_init(&channel->recv_lock); - init_waitqueue_head(&channel->fblockread_event); - - info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto free_name_and_channel; - } - - /* - * Use the size of the item to figure out which channel info struct to - * use. - */ - if (info_size == 2 * sizeof(struct smd_channel_info_word)) { - channel->info_word = info; - } else if (info_size == 2 * sizeof(struct smd_channel_info)) { - channel->info = info; - } else { - dev_err(&edge->dev, - "channel info of size %zu not supported\n", info_size); - ret = -EINVAL; - goto free_name_and_channel; - } - - fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); - if (IS_ERR(fifo_base)) { - ret = PTR_ERR(fifo_base); - goto free_name_and_channel; - } - - /* The channel consist of a rx and tx fifo of equal size */ - fifo_size /= 2; - - dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", - name, info_size, fifo_size); - - channel->tx_fifo = fifo_base; - channel->rx_fifo = fifo_base + fifo_size; - channel->fifo_size = fifo_size; - - qcom_smd_channel_reset(channel); - - return channel; - -free_name_and_channel: - devm_kfree(&edge->dev, channel->name); - devm_kfree(&edge->dev, channel); - - return ERR_PTR(ret); -} - -/* - * Scans the allocation table for any newly allocated channels, calls - * qcom_smd_create_channel() to create representations of these and add - * them to the edge's list of channels. - */ -static void qcom_channel_scan_worker(struct work_struct *work) -{ - struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); - struct qcom_smd_alloc_entry *alloc_tbl; - struct qcom_smd_alloc_entry *entry; - struct qcom_smd_channel *channel; - unsigned long flags; - unsigned fifo_id; - unsigned info_id; - int tbl; - int i; - u32 eflags, cid; - - for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { - alloc_tbl = qcom_smem_get(edge->remote_pid, - smem_items[tbl].alloc_tbl_id, NULL); - if (IS_ERR(alloc_tbl)) - continue; - - for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { - entry = &alloc_tbl[i]; - eflags = le32_to_cpu(entry->flags); - if (test_bit(i, edge->allocated[tbl])) - continue; - - if (entry->ref_count == 0) - continue; - - if (!entry->name[0]) - continue; - - if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) - continue; - - if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) - continue; - - cid = le32_to_cpu(entry->cid); - info_id = smem_items[tbl].info_base_id + cid; - fifo_id = smem_items[tbl].fifo_base_id + cid; - - channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); - if (IS_ERR(channel)) - continue; - - spin_lock_irqsave(&edge->channels_lock, flags); - list_add(&channel->list, &edge->channels); - spin_unlock_irqrestore(&edge->channels_lock, flags); - - dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); - set_bit(i, edge->allocated[tbl]); - - wake_up_interruptible(&edge->new_channel_event); - } - } - - schedule_work(&edge->state_work); -} - -/* - * This per edge worker scans smem for any new channels and register these. It - * then scans all registered channels for state changes that should be handled - * by creating or destroying smd client devices for the registered channels. - * - * LOCKING: edge->channels_lock only needs to cover the list operations, as the - * worker is killed before any channels are deallocated - */ -static void qcom_channel_state_worker(struct work_struct *work) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = container_of(work, - struct qcom_smd_edge, - state_work); - unsigned remote_state; - unsigned long flags; - - /* - * Register a device for any closed channel where the remote processor - * is showing interest in opening the channel. - */ - spin_lock_irqsave(&edge->channels_lock, flags); - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_CLOSED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state != SMD_CHANNEL_OPENING && - remote_state != SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_create_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - - /* - * Unregister the device for any channel that is opened where the - * remote processor is closing the channel. - */ - list_for_each_entry(channel, &edge->channels, list) { - if (channel->state != SMD_CHANNEL_OPENING && - channel->state != SMD_CHANNEL_OPENED) - continue; - - remote_state = GET_RX_CHANNEL_INFO(channel, state); - if (remote_state == SMD_CHANNEL_OPENING || - remote_state == SMD_CHANNEL_OPENED) - continue; - - spin_unlock_irqrestore(&edge->channels_lock, flags); - qcom_smd_destroy_device(channel); - spin_lock_irqsave(&edge->channels_lock, flags); - } - spin_unlock_irqrestore(&edge->channels_lock, flags); -} - -/* - * Parses an of_node describing an edge. - */ -static int qcom_smd_parse_edge(struct device *dev, - struct device_node *node, - struct qcom_smd_edge *edge) -{ - struct device_node *syscon_np; - const char *key; - int irq; - int ret; - - INIT_LIST_HEAD(&edge->channels); - spin_lock_init(&edge->channels_lock); - - INIT_WORK(&edge->scan_work, qcom_channel_scan_worker); - INIT_WORK(&edge->state_work, qcom_channel_state_worker); - - edge->of_node = of_node_get(node); - - key = "qcom,smd-edge"; - ret = of_property_read_u32(node, key, &edge->edge_id); - if (ret) { - dev_err(dev, "edge missing %s property\n", key); - return -EINVAL; - } - - edge->remote_pid = QCOM_SMEM_HOST_ANY; - key = "qcom,remote-pid"; - of_property_read_u32(node, key, &edge->remote_pid); - - syscon_np = of_parse_phandle(node, "qcom,ipc", 0); - if (!syscon_np) { - dev_err(dev, "no qcom,ipc node\n"); - return -ENODEV; - } - - edge->ipc_regmap = syscon_node_to_regmap(syscon_np); - if (IS_ERR(edge->ipc_regmap)) - return PTR_ERR(edge->ipc_regmap); - - key = "qcom,ipc"; - ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); - if (ret < 0) { - dev_err(dev, "no offset in %s\n", key); - return -EINVAL; - } - - ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); - if (ret < 0) { - dev_err(dev, "no bit in %s\n", key); - return -EINVAL; - } - - irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { - dev_err(dev, "required smd interrupt missing\n"); - return -EINVAL; - } - - ret = devm_request_irq(dev, irq, - qcom_smd_edge_intr, IRQF_TRIGGER_RISING, - node->name, edge); - if (ret) { - dev_err(dev, "failed to request smd irq\n"); - return ret; - } - - edge->irq = irq; - - return 0; -} - -/* - * Release function for an edge. - * Reset the state of each associated channel and free the edge context. - */ -static void qcom_smd_edge_release(struct device *dev) -{ - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge = to_smd_edge(dev); - - list_for_each_entry(channel, &edge->channels, list) { - SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); - SET_RX_CHANNEL_INFO(channel, head, 0); - SET_RX_CHANNEL_INFO(channel, tail, 0); - } - - kfree(edge); -} - -/** - * qcom_smd_register_edge() - register an edge based on an device_node - * @parent: parent device for the edge - * @node: device_node describing the edge - * - * Returns an edge reference, or negative ERR_PTR() on failure. - */ -struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, - struct device_node *node) -{ - struct qcom_smd_edge *edge; - int ret; - - edge = kzalloc(sizeof(*edge), GFP_KERNEL); - if (!edge) - return ERR_PTR(-ENOMEM); - - init_waitqueue_head(&edge->new_channel_event); - - edge->dev.parent = parent; - edge->dev.release = qcom_smd_edge_release; - dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); - ret = device_register(&edge->dev); - if (ret) { - pr_err("failed to register smd edge\n"); - return ERR_PTR(ret); - } - - ret = qcom_smd_parse_edge(&edge->dev, node, edge); - if (ret) { - dev_err(&edge->dev, "failed to parse smd edge\n"); - goto unregister_dev; - } - - schedule_work(&edge->scan_work); - - return edge; - -unregister_dev: - put_device(&edge->dev); - return ERR_PTR(ret); -} -EXPORT_SYMBOL(qcom_smd_register_edge); - -static int qcom_smd_remove_device(struct device *dev, void *data) -{ - device_unregister(dev); - of_node_put(dev->of_node); - put_device(dev); - - return 0; -} - -/** - * qcom_smd_unregister_edge() - release an edge and its children - * @edge: edge reference acquired from qcom_smd_register_edge - */ -int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) -{ - int ret; - - disable_irq(edge->irq); - cancel_work_sync(&edge->scan_work); - cancel_work_sync(&edge->state_work); - - ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); - if (ret) - dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); - - device_unregister(&edge->dev); - - return 0; -} -EXPORT_SYMBOL(qcom_smd_unregister_edge); - -static int qcom_smd_probe(struct platform_device *pdev) -{ - struct device_node *node; - void *p; - - /* Wait for smem */ - p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); - if (PTR_ERR(p) == -EPROBE_DEFER) - return PTR_ERR(p); - - for_each_available_child_of_node(pdev->dev.of_node, node) - qcom_smd_register_edge(&pdev->dev, node); - - return 0; -} - -static int qcom_smd_remove_edge(struct device *dev, void *data) -{ - struct qcom_smd_edge *edge = to_smd_edge(dev); - - return qcom_smd_unregister_edge(edge); -} - -/* - * Shut down all smd clients by making sure that each edge stops processing - * events and scanning for new channels, then call destroy on the devices. - */ -static int qcom_smd_remove(struct platform_device *pdev) -{ - int ret; - - ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); - if (ret) - dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); - - return ret; -} - -static const struct of_device_id qcom_smd_of_match[] = { - { .compatible = "qcom,smd" }, - {} -}; -MODULE_DEVICE_TABLE(of, qcom_smd_of_match); - -static struct platform_driver qcom_smd_driver = { - .probe = qcom_smd_probe, - .remove = qcom_smd_remove, - .driver = { - .name = "qcom-smd", - .of_match_table = qcom_smd_of_match, - }, -}; - -static int __init qcom_smd_init(void) -{ - int ret; - - ret = bus_register(&qcom_smd_bus); - if (ret) { - pr_err("failed to register smd bus: %d\n", ret); - return ret; - } - - return platform_driver_register(&qcom_smd_driver); -} -postcore_initcall(qcom_smd_init); - -static void __exit qcom_smd_exit(void) -{ - platform_driver_unregister(&qcom_smd_driver); - bus_unregister(&qcom_smd_bus); -} -module_exit(qcom_smd_exit); - -MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); -MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c index 520aedd29965..b9069184df19 100644 --- a/drivers/soc/qcom/wcnss_ctrl.c +++ b/drivers/soc/qcom/wcnss_ctrl.c @@ -14,10 +14,10 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/soc/qcom/smd.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/rpmsg.h> #include <linux/soc/qcom/wcnss_ctrl.h> #define WCNSS_REQUEST_TIMEOUT (5 * HZ) @@ -40,7 +40,7 @@ */ struct wcnss_ctrl { struct device *dev; - struct qcom_smd_channel *channel; + struct rpmsg_endpoint *channel; struct completion ack; struct completion cbc; @@ -122,11 +122,13 @@ struct wcnss_download_nv_resp { * * Handles any incoming packets from the remote WCNSS_CTRL service. */ -static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel, - const void *data, - size_t count) +static int wcnss_ctrl_smd_callback(struct rpmsg_device *rpdev, + void *data, + int count, + void *priv, + u32 addr) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); const struct wcnss_download_nv_resp *nvresp; const struct wcnss_version_resp *version; const struct wcnss_msg_hdr *hdr = data; @@ -180,7 +182,7 @@ static int wcnss_request_version(struct wcnss_ctrl *wcnss) msg.type = WCNSS_VERSION_REQ; msg.len = sizeof(msg); - ret = qcom_smd_send(wcnss->channel, &msg, sizeof(msg)); + ret = rpmsg_send(wcnss->channel, &msg, sizeof(msg)); if (ret < 0) return ret; @@ -238,7 +240,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc) memcpy(req->fragment, data, req->frag_size); - ret = qcom_smd_send(wcnss->channel, req, req->hdr.len); + ret = rpmsg_send(wcnss->channel, req, req->hdr.len); if (ret < 0) { dev_err(wcnss->dev, "failed to send smd packet\n"); goto release_fw; @@ -274,11 +276,16 @@ free_req: * @name: SMD channel name * @cb: callback to handle incoming data on the channel */ -struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb) +struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv) { + struct rpmsg_channel_info chinfo; struct wcnss_ctrl *_wcnss = wcnss; - return qcom_smd_open_channel(_wcnss->channel, name, cb); + strncpy(chinfo.name, name, sizeof(chinfo.name)); + chinfo.src = RPMSG_ADDR_ANY; + chinfo.dst = RPMSG_ADDR_ANY; + + return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo); } EXPORT_SYMBOL(qcom_wcnss_open_channel); @@ -306,35 +313,34 @@ static void wcnss_async_probe(struct work_struct *work) of_platform_populate(wcnss->dev->of_node, NULL, NULL, wcnss->dev); } -static int wcnss_ctrl_probe(struct qcom_smd_device *sdev) +static int wcnss_ctrl_probe(struct rpmsg_device *rpdev) { struct wcnss_ctrl *wcnss; - wcnss = devm_kzalloc(&sdev->dev, sizeof(*wcnss), GFP_KERNEL); + wcnss = devm_kzalloc(&rpdev->dev, sizeof(*wcnss), GFP_KERNEL); if (!wcnss) return -ENOMEM; - wcnss->dev = &sdev->dev; - wcnss->channel = sdev->channel; + wcnss->dev = &rpdev->dev; + wcnss->channel = rpdev->ept; init_completion(&wcnss->ack); init_completion(&wcnss->cbc); INIT_WORK(&wcnss->probe_work, wcnss_async_probe); - qcom_smd_set_drvdata(sdev->channel, wcnss); - dev_set_drvdata(&sdev->dev, wcnss); + dev_set_drvdata(&rpdev->dev, wcnss); schedule_work(&wcnss->probe_work); return 0; } -static void wcnss_ctrl_remove(struct qcom_smd_device *sdev) +static void wcnss_ctrl_remove(struct rpmsg_device *rpdev) { - struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(sdev->channel); + struct wcnss_ctrl *wcnss = dev_get_drvdata(&rpdev->dev); cancel_work_sync(&wcnss->probe_work); - of_platform_depopulate(&sdev->dev); + of_platform_depopulate(&rpdev->dev); } static const struct of_device_id wcnss_ctrl_of_match[] = { @@ -342,18 +348,18 @@ static const struct of_device_id wcnss_ctrl_of_match[] = { {} }; -static struct qcom_smd_driver wcnss_ctrl_driver = { +static struct rpmsg_driver wcnss_ctrl_driver = { .probe = wcnss_ctrl_probe, .remove = wcnss_ctrl_remove, .callback = wcnss_ctrl_smd_callback, - .driver = { + .drv = { .name = "qcom_wcnss_ctrl", .owner = THIS_MODULE, .of_match_table = wcnss_ctrl_of_match, }, }; -module_qcom_smd_driver(wcnss_ctrl_driver); +module_rpmsg_driver(wcnss_ctrl_driver); MODULE_DESCRIPTION("Qualcomm WCNSS control driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c index 5e7537c96f7b..7412666187b3 100644 --- a/drivers/soc/renesas/r8a7795-sysc.c +++ b/drivers/soc/renesas/r8a7795-sysc.c @@ -1,7 +1,7 @@ /* * Renesas R-Car H3 System Controller * - * Copyright (C) 2016 Glider bvba + * Copyright (C) 2016-2017 Glider bvba * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -10,12 +10,13 @@ #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/sys_soc.h> #include <dt-bindings/power/r8a7795-sysc.h> #include "rcar-sysc.h" -static const struct rcar_sysc_area r8a7795_areas[] __initconst = { +static struct rcar_sysc_area r8a7795_areas[] __initdata = { { "always-on", 0, 0, R8A7795_PD_ALWAYS_ON, -1, PD_ALWAYS_ON }, { "ca57-scu", 0x1c0, 0, R8A7795_PD_CA57_SCU, R8A7795_PD_ALWAYS_ON, PD_SCU }, @@ -40,6 +41,7 @@ static const struct rcar_sysc_area r8a7795_areas[] __initconst = { { "a3vp", 0x340, 0, R8A7795_PD_A3VP, R8A7795_PD_ALWAYS_ON }, { "cr7", 0x240, 0, R8A7795_PD_CR7, R8A7795_PD_ALWAYS_ON }, { "a3vc", 0x380, 0, R8A7795_PD_A3VC, R8A7795_PD_ALWAYS_ON }, + /* A2VC0 exists on ES1.x only */ { "a2vc0", 0x3c0, 0, R8A7795_PD_A2VC0, R8A7795_PD_A3VC }, { "a2vc1", 0x3c0, 1, R8A7795_PD_A2VC1, R8A7795_PD_A3VC }, { "3dg-a", 0x100, 0, R8A7795_PD_3DG_A, R8A7795_PD_ALWAYS_ON }, @@ -50,7 +52,27 @@ static const struct rcar_sysc_area r8a7795_areas[] __initconst = { { "a3ir", 0x180, 0, R8A7795_PD_A3IR, R8A7795_PD_ALWAYS_ON }, }; + + /* + * Fixups for R-Car H3 revisions after ES1.x + */ + +static const struct soc_device_attribute r8a7795es1[] __initconst = { + { .soc_id = "r8a7795", .revision = "ES1.*" }, + { /* sentinel */ } +}; + +static int __init r8a7795_sysc_init(void) +{ + if (!soc_device_match(r8a7795es1)) + rcar_sysc_nullify(r8a7795_areas, ARRAY_SIZE(r8a7795_areas), + R8A7795_PD_A2VC0); + + return 0; +} + const struct rcar_sysc_info r8a7795_sysc_info __initconst = { + .init = r8a7795_sysc_init, .areas = r8a7795_areas, .num_areas = ARRAY_SIZE(r8a7795_areas), }; diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c index 225c35c79d9a..528a13742aeb 100644 --- a/drivers/soc/renesas/rcar-sysc.c +++ b/drivers/soc/renesas/rcar-sysc.c @@ -2,7 +2,7 @@ * R-Car SYSC Power management support * * Copyright (C) 2014 Magnus Damm - * Copyright (C) 2015-2016 Glider bvba + * Copyright (C) 2015-2017 Glider bvba * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -334,6 +334,12 @@ static int __init rcar_sysc_pd_init(void) info = match->data; + if (info->init) { + error = info->init(); + if (error) + return error; + } + has_cpg_mstp = of_find_compatible_node(NULL, NULL, "renesas,cpg-mstp-clocks"); @@ -377,6 +383,11 @@ static int __init rcar_sysc_pd_init(void) const struct rcar_sysc_area *area = &info->areas[i]; struct rcar_sysc_pd *pd; + if (!area->name) { + /* Skip NULLified area */ + continue; + } + pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL); if (!pd) { error = -ENOMEM; @@ -406,6 +417,18 @@ out_put: } early_initcall(rcar_sysc_pd_init); +void __init rcar_sysc_nullify(struct rcar_sysc_area *areas, + unsigned int num_areas, u8 id) +{ + unsigned int i; + + for (i = 0; i < num_areas; i++) + if (areas[i].isr_bit == id) { + areas[i].name = NULL; + return; + } +} + void __init rcar_sysc_init(phys_addr_t base, u32 syscier) { u32 syscimr; diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index f6e842e2976e..07edb049a401 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -46,6 +46,7 @@ struct rcar_sysc_area { */ struct rcar_sysc_info { + int (*init)(void); /* Optional */ const struct rcar_sysc_area *areas; unsigned int num_areas; }; @@ -59,4 +60,13 @@ extern const struct rcar_sysc_info r8a7792_sysc_info; extern const struct rcar_sysc_info r8a7794_sysc_info; extern const struct rcar_sysc_info r8a7795_sysc_info; extern const struct rcar_sysc_info r8a7796_sysc_info; + + + /* + * Helpers for fixing up power area tables depending on SoC revision + */ + +extern void rcar_sysc_nullify(struct rcar_sysc_area *areas, + unsigned int num_areas, u8 id); + #endif /* __SOC_RENESAS_RCAR_SYSC_H__ */ diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index 330960312296..ca26f13d399c 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -80,11 +80,21 @@ static const struct renesas_soc soc_rmobile_a1 __initconst __maybe_unused = { .id = 0x40, }; +static const struct renesas_soc soc_rz_g1h __initconst __maybe_unused = { + .family = &fam_rzg, + .id = 0x45, +}; + static const struct renesas_soc soc_rz_g1m __initconst __maybe_unused = { .family = &fam_rzg, .id = 0x47, }; +static const struct renesas_soc soc_rz_g1n __initconst __maybe_unused = { + .family = &fam_rzg, + .id = 0x4b, +}; + static const struct renesas_soc soc_rz_g1e __initconst __maybe_unused = { .family = &fam_rzg, .id = 0x4c, @@ -150,9 +160,15 @@ static const struct of_device_id renesas_socs[] __initconst = { #ifdef CONFIG_ARCH_R8A7740 { .compatible = "renesas,r8a7740", .data = &soc_rmobile_a1 }, #endif +#ifdef CONFIG_ARCH_R8A7742 + { .compatible = "renesas,r8a7742", .data = &soc_rz_g1h }, +#endif #ifdef CONFIG_ARCH_R8A7743 { .compatible = "renesas,r8a7743", .data = &soc_rz_g1m }, #endif +#ifdef CONFIG_ARCH_R8A7744 + { .compatible = "renesas,r8a7744", .data = &soc_rz_g1n }, +#endif #ifdef CONFIG_ARCH_R8A7745 { .compatible = "renesas,r8a7745", .data = &soc_rz_g1e }, #endif @@ -254,4 +270,4 @@ static int __init renesas_soc_init(void) return 0; } -core_initcall(renesas_soc_init); +early_initcall(renesas_soc_init); diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index 245533907d1b..8b25bd55e648 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -8,7 +8,13 @@ if SOC_SAMSUNG config EXYNOS_PMU bool "Exynos PMU controller driver" if COMPILE_TEST - depends on (ARM && ARCH_EXYNOS) || ((ARM || ARM64) && COMPILE_TEST) + depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST) + select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS + +# There is no need to enable these drivers for ARMv8 +config EXYNOS_PMU_ARM_DRIVERS + bool "Exynos PMU ARMv7-specific driver extensions" if COMPILE_TEST + depends on EXYNOS_PMU config EXYNOS_PM_DOMAINS bool "Exynos PM domains" if COMPILE_TEST diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile index 3619f2ecddaa..4d7694a4e7a4 100644 --- a/drivers/soc/samsung/Makefile +++ b/drivers/soc/samsung/Makefile @@ -1,3 +1,5 @@ -obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \ +obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o + +obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \ exynos5250-pmu.o exynos5420-pmu.o obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c index 56d9244ff981..bd4a76f27bc2 100644 --- a/drivers/soc/samsung/exynos-pmu.c +++ b/drivers/soc/samsung/exynos-pmu.c @@ -69,27 +69,37 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode) } /* + * Split the data between ARM architectures because it is relatively big + * and useless on other arch. + */ +#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS +#define exynos_pmu_data_arm_ptr(data) (&data) +#else +#define exynos_pmu_data_arm_ptr(data) NULL +#endif + +/* * PMU platform driver and devicetree bindings. */ static const struct of_device_id exynos_pmu_of_device_ids[] = { { .compatible = "samsung,exynos3250-pmu", - .data = &exynos3250_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data), }, { .compatible = "samsung,exynos4210-pmu", - .data = &exynos4210_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), }, { .compatible = "samsung,exynos4212-pmu", - .data = &exynos4212_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data), }, { .compatible = "samsung,exynos4412-pmu", - .data = &exynos4412_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), }, { .compatible = "samsung,exynos5250-pmu", - .data = &exynos5250_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data), }, { .compatible = "samsung,exynos5420-pmu", - .data = &exynos5420_pmu_data, + .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data), }, { .compatible = "samsung,exynos5433-pmu", }, diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h index a469e366fead..40d4229abfb5 100644 --- a/drivers/soc/samsung/exynos-pmu.h +++ b/drivers/soc/samsung/exynos-pmu.h @@ -31,6 +31,8 @@ struct exynos_pmu_data { }; extern void __iomem *pmu_base_addr; + +#ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS /* list of all exported SoC specific data */ extern const struct exynos_pmu_data exynos3250_pmu_data; extern const struct exynos_pmu_data exynos4210_pmu_data; @@ -38,6 +40,7 @@ extern const struct exynos_pmu_data exynos4212_pmu_data; extern const struct exynos_pmu_data exynos4412_pmu_data; extern const struct exynos_pmu_data exynos5250_pmu_data; extern const struct exynos_pmu_data exynos5420_pmu_data; +#endif extern void pmu_raw_writel(u32 val, u32 offset); extern u32 pmu_raw_readl(u32 offset); diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index e5e124c07066..dcf088db40b6 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -12,6 +12,8 @@ config ARCH_TEGRA_2x_SOC select PINCTRL_TEGRA20 select PL310_ERRATA_727915 if CACHE_L2X0 select PL310_ERRATA_769419 if CACHE_L2X0 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC select TEGRA_TIMER help Support for NVIDIA Tegra AP20 and T20 processors, based on the @@ -23,6 +25,8 @@ config ARCH_TEGRA_3x_SOC select ARM_ERRATA_764369 if SMP select PINCTRL_TEGRA30 select PL310_ERRATA_769419 if CACHE_L2X0 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC select TEGRA_TIMER help Support for NVIDIA Tegra T30 processor family, based on the @@ -33,6 +37,8 @@ config ARCH_TEGRA_114_SOC select ARM_ERRATA_798181 if SMP select HAVE_ARM_ARCH_TIMER select PINCTRL_TEGRA114 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC select TEGRA_TIMER help Support for NVIDIA Tegra T114 processor family, based on the @@ -42,6 +48,8 @@ config ARCH_TEGRA_124_SOC bool "Enable support for Tegra124 family" select HAVE_ARM_ARCH_TIMER select PINCTRL_TEGRA124 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC select TEGRA_TIMER help Support for NVIDIA Tegra T124 processor family, based on the @@ -55,6 +63,8 @@ if ARM64 config ARCH_TEGRA_132_SOC bool "NVIDIA Tegra132 SoC" select PINCTRL_TEGRA124 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC help Enable support for NVIDIA Tegra132 SoC, based on the Denver ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC, @@ -64,6 +74,8 @@ config ARCH_TEGRA_132_SOC config ARCH_TEGRA_210_SOC bool "NVIDIA Tegra210 SoC" select PINCTRL_TEGRA210 + select SOC_TEGRA_FLOWCTRL + select SOC_TEGRA_PMC help Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1, the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53 @@ -83,6 +95,7 @@ config ARCH_TEGRA_186_SOC select TEGRA_BPMP select TEGRA_HSP_MBOX select TEGRA_IVC + select SOC_TEGRA_PMC_TEGRA186 help Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a combination of Denver and Cortex-A57 CPU cores and a GPU based on @@ -93,3 +106,12 @@ config ARCH_TEGRA_186_SOC endif endif + +config SOC_TEGRA_FLOWCTRL + bool + +config SOC_TEGRA_PMC + bool + +config SOC_TEGRA_PMC_TEGRA186 + bool diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile index ae857ff7d53d..4f81dd55e5d1 100644 --- a/drivers/soc/tegra/Makefile +++ b/drivers/soc/tegra/Makefile @@ -1,4 +1,6 @@ obj-y += fuse/ obj-y += common.o -obj-y += pmc.o +obj-$(CONFIG_SOC_TEGRA_FLOWCTRL) += flowctrl.o +obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o +obj-$(CONFIG_SOC_TEGRA_PMC_TEGRA186) += pmc-tegra186.o diff --git a/drivers/soc/tegra/flowctrl.c b/drivers/soc/tegra/flowctrl.c new file mode 100644 index 000000000000..0e345c05fc65 --- /dev/null +++ b/drivers/soc/tegra/flowctrl.c @@ -0,0 +1,224 @@ +/* + * drivers/soc/tegra/flowctrl.c + * + * Functions and macros to control the flowcontroller + * + * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> + +#include <soc/tegra/common.h> +#include <soc/tegra/flowctrl.h> +#include <soc/tegra/fuse.h> + +static u8 flowctrl_offset_halt_cpu[] = { + FLOW_CTRL_HALT_CPU0_EVENTS, + FLOW_CTRL_HALT_CPU1_EVENTS, + FLOW_CTRL_HALT_CPU1_EVENTS + 8, + FLOW_CTRL_HALT_CPU1_EVENTS + 16, +}; + +static u8 flowctrl_offset_cpu_csr[] = { + FLOW_CTRL_CPU0_CSR, + FLOW_CTRL_CPU1_CSR, + FLOW_CTRL_CPU1_CSR + 8, + FLOW_CTRL_CPU1_CSR + 16, +}; + +static void __iomem *tegra_flowctrl_base; + +static void flowctrl_update(u8 offset, u32 value) +{ + if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base), + "Tegra flowctrl not initialised!\n")) + return; + + writel(value, tegra_flowctrl_base + offset); + + /* ensure the update has reached the flow controller */ + wmb(); + readl_relaxed(tegra_flowctrl_base + offset); +} + +u32 flowctrl_read_cpu_csr(unsigned int cpuid) +{ + u8 offset = flowctrl_offset_cpu_csr[cpuid]; + + if (WARN_ONCE(IS_ERR_OR_NULL(tegra_flowctrl_base), + "Tegra flowctrl not initialised!\n")) + return 0; + + return readl(tegra_flowctrl_base + offset); +} + +void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) +{ + return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); +} + +void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) +{ + return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); +} + +void flowctrl_cpu_suspend_enter(unsigned int cpuid) +{ + unsigned int reg; + int i; + + reg = flowctrl_read_cpu_csr(cpuid); + switch (tegra_get_chip_id()) { + case TEGRA20: + /* clear wfe bitmap */ + reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP; + /* clear wfi bitmap */ + reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP; + /* pwr gating on wfe */ + reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid; + break; + case TEGRA30: + case TEGRA114: + case TEGRA124: + /* clear wfe bitmap */ + reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP; + /* clear wfi bitmap */ + reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP; + /* pwr gating on wfi */ + reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid; + break; + } + reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr flag */ + reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event flag */ + reg |= FLOW_CTRL_CSR_ENABLE; /* pwr gating */ + flowctrl_write_cpu_csr(cpuid, reg); + + for (i = 0; i < num_possible_cpus(); i++) { + if (i == cpuid) + continue; + reg = flowctrl_read_cpu_csr(i); + reg |= FLOW_CTRL_CSR_EVENT_FLAG; + reg |= FLOW_CTRL_CSR_INTR_FLAG; + flowctrl_write_cpu_csr(i, reg); + } +} + +void flowctrl_cpu_suspend_exit(unsigned int cpuid) +{ + unsigned int reg; + + /* Disable powergating via flow controller for CPU0 */ + reg = flowctrl_read_cpu_csr(cpuid); + switch (tegra_get_chip_id()) { + case TEGRA20: + /* clear wfe bitmap */ + reg &= ~TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP; + /* clear wfi bitmap */ + reg &= ~TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP; + break; + case TEGRA30: + case TEGRA114: + case TEGRA124: + /* clear wfe bitmap */ + reg &= ~TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP; + /* clear wfi bitmap */ + reg &= ~TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP; + break; + } + reg &= ~FLOW_CTRL_CSR_ENABLE; /* clear enable */ + reg |= FLOW_CTRL_CSR_INTR_FLAG; /* clear intr */ + reg |= FLOW_CTRL_CSR_EVENT_FLAG; /* clear event */ + flowctrl_write_cpu_csr(cpuid, reg); +} + +static int tegra_flowctrl_probe(struct platform_device *pdev) +{ + void __iomem *base = tegra_flowctrl_base; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + tegra_flowctrl_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(tegra_flowctrl_base)) + return PTR_ERR(base); + + iounmap(base); + + return 0; +} + +static const struct of_device_id tegra_flowctrl_match[] = { + { .compatible = "nvidia,tegra210-flowctrl" }, + { .compatible = "nvidia,tegra124-flowctrl" }, + { .compatible = "nvidia,tegra114-flowctrl" }, + { .compatible = "nvidia,tegra30-flowctrl" }, + { .compatible = "nvidia,tegra20-flowctrl" }, + { } +}; + +static struct platform_driver tegra_flowctrl_driver = { + .driver = { + .name = "tegra-flowctrl", + .suppress_bind_attrs = true, + .of_match_table = tegra_flowctrl_match, + }, + .probe = tegra_flowctrl_probe, +}; +builtin_platform_driver(tegra_flowctrl_driver); + +static int __init tegra_flowctrl_init(void) +{ + struct resource res; + struct device_node *np; + + if (!soc_is_tegra()) + return 0; + + np = of_find_matching_node(NULL, tegra_flowctrl_match); + if (np) { + if (of_address_to_resource(np, 0, &res) < 0) { + pr_err("failed to get flowctrl register\n"); + return -ENXIO; + } + of_node_put(np); + } else if (IS_ENABLED(CONFIG_ARM)) { + /* + * Hardcoded fallback for 32-bit Tegra + * devices if device tree node is missing. + */ + res.start = 0x60007000; + res.end = 0x60007fff; + res.flags = IORESOURCE_MEM; + } else { + /* + * At this point we're running on a Tegra, + * that doesn't support the flow controller + * (eg. Tegra186), so just return. + */ + return 0; + } + + tegra_flowctrl_base = ioremap_nocache(res.start, resource_size(&res)); + if (!tegra_flowctrl_base) + return -ENXIO; + + return 0; +} +early_initcall(tegra_flowctrl_init); diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index de2c1bfe28b5..7413f60fa855 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -18,7 +18,7 @@ #include <linux/clk.h> #include <linux/device.h> #include <linux/kobject.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> @@ -168,7 +168,7 @@ static struct platform_driver tegra_fuse_driver = { }, .probe = tegra_fuse_probe, }; -module_platform_driver(tegra_fuse_driver); +builtin_platform_driver(tegra_fuse_driver); bool __init tegra_fuse_read_spare(unsigned int spare) { diff --git a/drivers/soc/tegra/pmc-tegra186.c b/drivers/soc/tegra/pmc-tegra186.c new file mode 100644 index 000000000000..6f5c6f98ba92 --- /dev/null +++ b/drivers/soc/tegra/pmc-tegra186.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#define pr_fmt(fmt) "tegra-pmc: " fmt + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> + +#include <asm/system_misc.h> + +#define PMC_CNTRL 0x000 +#define PMC_CNTRL_MAIN_RST BIT(4) + +#define PMC_RST_STATUS 0x070 + +#define WAKE_AOWAKE_CTRL 0x4f4 +#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0) + +#define SCRATCH_SCRATCH0 0x2000 +#define SCRATCH_SCRATCH0_MODE_RECOVERY BIT(31) +#define SCRATCH_SCRATCH0_MODE_BOOTLOADER BIT(30) +#define SCRATCH_SCRATCH0_MODE_RCM BIT(1) +#define SCRATCH_SCRATCH0_MODE_MASK (SCRATCH_SCRATCH0_MODE_RECOVERY | \ + SCRATCH_SCRATCH0_MODE_BOOTLOADER | \ + SCRATCH_SCRATCH0_MODE_RCM) + +struct tegra_pmc { + struct device *dev; + void __iomem *regs; + void __iomem *wake; + void __iomem *aotag; + void __iomem *scratch; + + void (*system_restart)(enum reboot_mode mode, const char *cmd); + struct notifier_block restart; +}; + +static int tegra186_pmc_restart_notify(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct tegra_pmc *pmc = container_of(nb, struct tegra_pmc, restart); + const char *cmd = data; + u32 value; + + value = readl(pmc->scratch + SCRATCH_SCRATCH0); + value &= ~SCRATCH_SCRATCH0_MODE_MASK; + + if (cmd) { + if (strcmp(cmd, "recovery") == 0) + value |= SCRATCH_SCRATCH0_MODE_RECOVERY; + + if (strcmp(cmd, "bootloader") == 0) + value |= SCRATCH_SCRATCH0_MODE_BOOTLOADER; + + if (strcmp(cmd, "forced-recovery") == 0) + value |= SCRATCH_SCRATCH0_MODE_RCM; + } + + writel(value, pmc->scratch + SCRATCH_SCRATCH0); + + /* + * If available, call the system restart implementation that was + * registered earlier (typically PSCI). + */ + if (pmc->system_restart) { + pmc->system_restart(reboot_mode, cmd); + return NOTIFY_DONE; + } + + /* reset everything but SCRATCH0_SCRATCH0 and PMC_RST_STATUS */ + value = readl(pmc->regs + PMC_CNTRL); + value |= PMC_CNTRL_MAIN_RST; + writel(value, pmc->regs + PMC_CNTRL); + + return NOTIFY_DONE; +} + +static int tegra186_pmc_setup(struct tegra_pmc *pmc) +{ + struct device_node *np = pmc->dev->of_node; + bool invert; + u32 value; + + invert = of_property_read_bool(np, "nvidia,invert-interrupt"); + + value = readl(pmc->wake + WAKE_AOWAKE_CTRL); + + if (invert) + value |= WAKE_AOWAKE_CTRL_INTR_POLARITY; + else + value &= ~WAKE_AOWAKE_CTRL_INTR_POLARITY; + + writel(value, pmc->wake + WAKE_AOWAKE_CTRL); + + /* + * We need to hook any system restart implementation registered + * previously so we can write SCRATCH_SCRATCH0 before reset. + */ + pmc->system_restart = arm_pm_restart; + arm_pm_restart = NULL; + + pmc->restart.notifier_call = tegra186_pmc_restart_notify; + pmc->restart.priority = 128; + + return register_restart_handler(&pmc->restart); +} + +static int tegra186_pmc_probe(struct platform_device *pdev) +{ + struct tegra_pmc *pmc; + struct resource *res; + + pmc = devm_kzalloc(&pdev->dev, sizeof(*pmc), GFP_KERNEL); + if (!pmc) + return -ENOMEM; + + pmc->dev = &pdev->dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmc"); + pmc->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->regs)) + return PTR_ERR(pmc->regs); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake"); + pmc->wake = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->wake)) + return PTR_ERR(pmc->wake); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag"); + pmc->aotag = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->aotag)) + return PTR_ERR(pmc->aotag); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch"); + pmc->scratch = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->scratch)) + return PTR_ERR(pmc->scratch); + + return tegra186_pmc_setup(pmc); +} + +static const struct of_device_id tegra186_pmc_of_match[] = { + { .compatible = "nvidia,tegra186-pmc" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, tegra186_pmc_of_match); + +static struct platform_driver tegra186_pmc_driver = { + .driver = { + .name = "tegra186-pmc", + .of_match_table = tegra186_pmc_of_match, + }, + .probe = tegra186_pmc_probe, +}; +builtin_platform_driver(tegra186_pmc_driver); diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index 3557c5e32a93..39e152abe6b9 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -38,4 +38,16 @@ config WKUP_M3_IPC to communicate and use the Wakeup M3 for PM features like suspend resume and boots it using wkup_m3_rproc driver. +config TI_SCI_PM_DOMAINS + tristate "TI SCI PM Domains Driver" + depends on TI_SCI_PROTOCOL + depends on PM_GENERIC_DOMAINS + help + Generic power domain implementation for TI device implementing + the TI SCI protocol. + + To compile this as a module, choose M here. The module will be + called ti_sci_pm_domains. Note this is needed early in boot before + rootfs may be available. + endif # SOC_TI diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index 48ff3a79634f..7d572736c86e 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o +obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c new file mode 100644 index 000000000000..b0b283810e72 --- /dev/null +++ b/drivers/soc/ti/ti_sci_pm_domains.c @@ -0,0 +1,202 @@ +/* + * TI SCI Generic Power Domain Driver + * + * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * J Keerthy <j-keerthy@ti.com> + * Dave Gerlach <d-gerlach@ti.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/slab.h> +#include <linux/soc/ti/ti_sci_protocol.h> + +/** + * struct ti_sci_genpd_dev_data: holds data needed for every device attached + * to this genpd + * @idx: index of the device that identifies it with the system + * control processor. + */ +struct ti_sci_genpd_dev_data { + int idx; +}; + +/** + * struct ti_sci_pm_domain: TI specific data needed for power domain + * @ti_sci: handle to TI SCI protocol driver that provides ops to + * communicate with system control processor. + * @dev: pointer to dev for the driver for devm allocs + * @pd: generic_pm_domain for use with the genpd framework + */ +struct ti_sci_pm_domain { + const struct ti_sci_handle *ti_sci; + struct device *dev; + struct generic_pm_domain pd; +}; + +#define genpd_to_ti_sci_pd(gpd) container_of(gpd, struct ti_sci_pm_domain, pd) + +/** + * ti_sci_dev_id(): get prepopulated ti_sci id from struct dev + * @dev: pointer to device associated with this genpd + * + * Returns device_id stored from ti,sci_id property + */ +static int ti_sci_dev_id(struct device *dev) +{ + struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev); + struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data; + + return sci_dev_data->idx; +} + +/** + * ti_sci_dev_to_sci_handle(): get pointer to ti_sci_handle + * @dev: pointer to device associated with this genpd + * + * Returns ti_sci_handle to be used to communicate with system + * control processor. + */ +static const struct ti_sci_handle *ti_sci_dev_to_sci_handle(struct device *dev) +{ + struct generic_pm_domain *pd = pd_to_genpd(dev->pm_domain); + struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(pd); + + return ti_sci_genpd->ti_sci; +} + +/** + * ti_sci_dev_start(): genpd device start hook called to turn device on + * @dev: pointer to device associated with this genpd to be powered on + */ +static int ti_sci_dev_start(struct device *dev) +{ + const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev); + int idx = ti_sci_dev_id(dev); + + return ti_sci->ops.dev_ops.get_device(ti_sci, idx); +} + +/** + * ti_sci_dev_stop(): genpd device stop hook called to turn device off + * @dev: pointer to device associated with this genpd to be powered off + */ +static int ti_sci_dev_stop(struct device *dev) +{ + const struct ti_sci_handle *ti_sci = ti_sci_dev_to_sci_handle(dev); + int idx = ti_sci_dev_id(dev); + + return ti_sci->ops.dev_ops.put_device(ti_sci, idx); +} + +static int ti_sci_pd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct device_node *np = dev->of_node; + struct of_phandle_args pd_args; + struct ti_sci_pm_domain *ti_sci_genpd = genpd_to_ti_sci_pd(domain); + const struct ti_sci_handle *ti_sci = ti_sci_genpd->ti_sci; + struct ti_sci_genpd_dev_data *sci_dev_data; + struct generic_pm_domain_data *genpd_data; + int idx, ret = 0; + + ret = of_parse_phandle_with_args(np, "power-domains", + "#power-domain-cells", 0, &pd_args); + if (ret < 0) + return ret; + + if (pd_args.args_count != 1) + return -EINVAL; + + idx = pd_args.args[0]; + + /* + * Check the validity of the requested idx, if the index is not valid + * the PMMC will return a NAK here and we will not allocate it. + */ + ret = ti_sci->ops.dev_ops.is_valid(ti_sci, idx); + if (ret) + return -EINVAL; + + sci_dev_data = kzalloc(sizeof(*sci_dev_data), GFP_KERNEL); + if (!sci_dev_data) + return -ENOMEM; + + sci_dev_data->idx = idx; + + genpd_data = dev_gpd_data(dev); + genpd_data->data = sci_dev_data; + + return 0; +} + +static void ti_sci_pd_detach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + struct generic_pm_domain_data *genpd_data = dev_gpd_data(dev); + struct ti_sci_genpd_dev_data *sci_dev_data = genpd_data->data; + + kfree(sci_dev_data); + genpd_data->data = NULL; +} + +static const struct of_device_id ti_sci_pm_domain_matches[] = { + { .compatible = "ti,sci-pm-domain", }, + { }, +}; +MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches); + +static int ti_sci_pm_domain_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct ti_sci_pm_domain *ti_sci_pd; + int ret; + + ti_sci_pd = devm_kzalloc(dev, sizeof(*ti_sci_pd), GFP_KERNEL); + if (!ti_sci_pd) + return -ENOMEM; + + ti_sci_pd->ti_sci = devm_ti_sci_get_handle(dev); + if (IS_ERR(ti_sci_pd->ti_sci)) + return PTR_ERR(ti_sci_pd->ti_sci); + + ti_sci_pd->dev = dev; + + ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; + ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; + + ti_sci_pd->pd.dev_ops.start = ti_sci_dev_start; + ti_sci_pd->pd.dev_ops.stop = ti_sci_dev_stop; + + pm_genpd_init(&ti_sci_pd->pd, NULL, true); + + ret = of_genpd_add_provider_simple(np, &ti_sci_pd->pd); + + return ret; +} + +static struct platform_driver ti_sci_pm_domains_driver = { + .probe = ti_sci_pm_domain_probe, + .driver = { + .name = "ti_sci_pm_domains", + .of_match_table = ti_sci_pm_domain_matches, + }, +}; +module_platform_driver(ti_sci_pm_domains_driver); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI System Control Interface (SCI) Power Domain driver"); +MODULE_AUTHOR("Dave Gerlach"); diff --git a/drivers/soc/zte/zx296718_pm_domains.c b/drivers/soc/zte/zx296718_pm_domains.c index 5ed924fee855..4dc5d62ee81b 100644 --- a/drivers/soc/zte/zx296718_pm_domains.c +++ b/drivers/soc/zte/zx296718_pm_domains.c @@ -169,7 +169,6 @@ static const struct of_device_id zx296718_pm_domain_matches[] = { static struct platform_driver zx296718_pd_driver = { .driver = { .name = "zx296718-powerdomain", - .owner = THIS_MODULE, .of_match_table = zx296718_pm_domain_matches, }, .probe = zx296718_pd_probe, diff --git a/drivers/soc/zte/zx2967_pm_domains.c b/drivers/soc/zte/zx2967_pm_domains.c index 61c8d84bf315..c42aeaaa34ba 100644 --- a/drivers/soc/zte/zx2967_pm_domains.c +++ b/drivers/soc/zte/zx2967_pm_domains.c @@ -125,10 +125,8 @@ int zx2967_pd_probe(struct platform_device *pdev, res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pcubase = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pcubase)) { - dev_err(&pdev->dev, "ioremap fail.\n"); + if (IS_ERR(pcubase)) return PTR_ERR(pcubase); - } for (i = 0; i < domain_num; ++i) { zx_pm_domains[i]->power_on = zx2967_power_on; |