diff options
Diffstat (limited to 'drivers/soc')
37 files changed, 2999 insertions, 1366 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 10a9ff84ff41..5d924e946507 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -22,7 +22,6 @@ source "drivers/soc/qcom/Kconfig" source "drivers/soc/renesas/Kconfig" source "drivers/soc/rockchip/Kconfig" source "drivers/soc/samsung/Kconfig" -source "drivers/soc/sifive/Kconfig" source "drivers/soc/sunxi/Kconfig" source "drivers/soc/tegra/Kconfig" source "drivers/soc/ti/Kconfig" diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 0706a27d13be..ba8f5b5460e1 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -28,7 +28,6 @@ obj-y += qcom/ obj-y += renesas/ obj-y += rockchip/ obj-$(CONFIG_SOC_SAMSUNG) += samsung/ -obj-y += sifive/ obj-y += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-y += ti/ diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig index eff486a77337..6388cbe1e56b 100644 --- a/drivers/soc/apple/Kconfig +++ b/drivers/soc/apple/Kconfig @@ -4,9 +4,22 @@ if ARCH_APPLE || COMPILE_TEST menu "Apple SoC drivers" +config APPLE_MAILBOX + tristate "Apple SoC mailboxes" + depends on PM + depends on ARCH_APPLE || (64BIT && COMPILE_TEST) + default ARCH_APPLE + help + Apple SoCs have various co-processors required for certain + peripherals to work (NVMe, display controller, etc.). This + driver adds support for the mailbox controller used to + communicate with those. + + Say Y here if you have an Apple SoC. + config APPLE_RTKIT tristate "Apple RTKit co-processor IPC protocol" - depends on MAILBOX + depends on APPLE_MAILBOX depends on ARCH_APPLE || COMPILE_TEST default ARCH_APPLE help diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile index b241e6a65e5b..4d9ab8f3037b 100644 --- a/drivers/soc/apple/Makefile +++ b/drivers/soc/apple/Makefile @@ -1,4 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o +apple-mailbox-y = mailbox.o + obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o apple-rtkit-y = rtkit.o rtkit-crashlog.o diff --git a/drivers/soc/apple/mailbox.c b/drivers/soc/apple/mailbox.c new file mode 100644 index 000000000000..780199bf351e --- /dev/null +++ b/drivers/soc/apple/mailbox.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Apple mailbox driver + * + * Copyright The Asahi Linux Contributors + * + * This driver adds support for two mailbox variants (called ASC and M3 by + * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to + * exchange 64+32 bit messages between the main CPU and a co-processor. + * Various coprocessors implement different IPC protocols based on these simple + * messages and shared memory buffers. + * + * Both the main CPU and the co-processor see the same set of registers but + * the first FIFO (A2I) is always used to transfer messages from the application + * processor (us) to the I/O processor and the second one (I2A) for the + * other direction. + */ + +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include "mailbox.h" + +#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16) +#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17) + +#define APPLE_ASC_MBOX_A2I_CONTROL 0x110 +#define APPLE_ASC_MBOX_A2I_SEND0 0x800 +#define APPLE_ASC_MBOX_A2I_SEND1 0x808 +#define APPLE_ASC_MBOX_A2I_RECV0 0x810 +#define APPLE_ASC_MBOX_A2I_RECV1 0x818 + +#define APPLE_ASC_MBOX_I2A_CONTROL 0x114 +#define APPLE_ASC_MBOX_I2A_SEND0 0x820 +#define APPLE_ASC_MBOX_I2A_SEND1 0x828 +#define APPLE_ASC_MBOX_I2A_RECV0 0x830 +#define APPLE_ASC_MBOX_I2A_RECV1 0x838 + +#define APPLE_M3_MBOX_CONTROL_FULL BIT(16) +#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17) + +#define APPLE_M3_MBOX_A2I_CONTROL 0x50 +#define APPLE_M3_MBOX_A2I_SEND0 0x60 +#define APPLE_M3_MBOX_A2I_SEND1 0x68 +#define APPLE_M3_MBOX_A2I_RECV0 0x70 +#define APPLE_M3_MBOX_A2I_RECV1 0x78 + +#define APPLE_M3_MBOX_I2A_CONTROL 0x80 +#define APPLE_M3_MBOX_I2A_SEND0 0x90 +#define APPLE_M3_MBOX_I2A_SEND1 0x98 +#define APPLE_M3_MBOX_I2A_RECV0 0xa0 +#define APPLE_M3_MBOX_I2A_RECV1 0xa8 + +#define APPLE_M3_MBOX_IRQ_ENABLE 0x48 +#define APPLE_M3_MBOX_IRQ_ACK 0x4c +#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0) +#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1) +#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2) +#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3) + +#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52) +#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48) +#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44) +#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40) +#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0) + +#define APPLE_MBOX_TX_TIMEOUT 500 + +struct apple_mbox_hw { + unsigned int control_full; + unsigned int control_empty; + + unsigned int a2i_control; + unsigned int a2i_send0; + unsigned int a2i_send1; + + unsigned int i2a_control; + unsigned int i2a_recv0; + unsigned int i2a_recv1; + + bool has_irq_controls; + unsigned int irq_enable; + unsigned int irq_ack; + unsigned int irq_bit_recv_not_empty; + unsigned int irq_bit_send_empty; +}; + +int apple_mbox_send(struct apple_mbox *mbox, const struct apple_mbox_msg msg, + bool atomic) +{ + unsigned long flags; + int ret; + u32 mbox_ctrl; + long t; + + spin_lock_irqsave(&mbox->tx_lock, flags); + mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control); + + while (mbox_ctrl & mbox->hw->control_full) { + if (atomic) { + ret = readl_poll_timeout_atomic( + mbox->regs + mbox->hw->a2i_control, mbox_ctrl, + !(mbox_ctrl & mbox->hw->control_full), 100, + APPLE_MBOX_TX_TIMEOUT * 1000); + + if (ret) { + spin_unlock_irqrestore(&mbox->tx_lock, flags); + return ret; + } + + break; + } + /* + * The interrupt is level triggered and will keep firing as long as the + * FIFO is empty. It will also keep firing if the FIFO was empty + * at any point in the past until it has been acknowledged at the + * mailbox level. By acknowledging it here we can ensure that we will + * only get the interrupt once the FIFO has been cleared again. + * If the FIFO is already empty before the ack it will fire again + * immediately after the ack. + */ + if (mbox->hw->has_irq_controls) { + writel_relaxed(mbox->hw->irq_bit_send_empty, + mbox->regs + mbox->hw->irq_ack); + } + enable_irq(mbox->irq_send_empty); + reinit_completion(&mbox->tx_empty); + spin_unlock_irqrestore(&mbox->tx_lock, flags); + + t = wait_for_completion_interruptible_timeout( + &mbox->tx_empty, + msecs_to_jiffies(APPLE_MBOX_TX_TIMEOUT)); + if (t < 0) + return t; + else if (t == 0) + return -ETIMEDOUT; + + spin_lock_irqsave(&mbox->tx_lock, flags); + mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->a2i_control); + } + + writeq_relaxed(msg.msg0, mbox->regs + mbox->hw->a2i_send0); + writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg.msg1), + mbox->regs + mbox->hw->a2i_send1); + + spin_unlock_irqrestore(&mbox->tx_lock, flags); + + return 0; +} +EXPORT_SYMBOL(apple_mbox_send); + +static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data) +{ + struct apple_mbox *mbox = data; + + /* + * We don't need to acknowledge the interrupt at the mailbox level + * here even if supported by the hardware. It will keep firing but that + * doesn't matter since it's disabled at the main interrupt controller. + * apple_mbox_send will acknowledge it before enabling + * it at the main controller again. + */ + spin_lock(&mbox->tx_lock); + disable_irq_nosync(mbox->irq_send_empty); + complete(&mbox->tx_empty); + spin_unlock(&mbox->tx_lock); + + return IRQ_HANDLED; +} + +static int apple_mbox_poll_locked(struct apple_mbox *mbox) +{ + struct apple_mbox_msg msg; + int ret = 0; + + u32 mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control); + + while (!(mbox_ctrl & mbox->hw->control_empty)) { + msg.msg0 = readq_relaxed(mbox->regs + mbox->hw->i2a_recv0); + msg.msg1 = FIELD_GET( + APPLE_MBOX_MSG1_MSG, + readq_relaxed(mbox->regs + mbox->hw->i2a_recv1)); + + mbox->rx(mbox, msg, mbox->cookie); + ret++; + mbox_ctrl = readl_relaxed(mbox->regs + mbox->hw->i2a_control); + } + + /* + * The interrupt will keep firing even if there are no more messages + * unless we also acknowledge it at the mailbox level here. + * There's no race if a message comes in between the check in the while + * loop above and the ack below: If a new messages arrives inbetween + * those two the interrupt will just fire again immediately after the + * ack since it's level triggered. + */ + if (mbox->hw->has_irq_controls) { + writel_relaxed(mbox->hw->irq_bit_recv_not_empty, + mbox->regs + mbox->hw->irq_ack); + } + + return ret; +} + +static irqreturn_t apple_mbox_recv_irq(int irq, void *data) +{ + struct apple_mbox *mbox = data; + + spin_lock(&mbox->rx_lock); + apple_mbox_poll_locked(mbox); + spin_unlock(&mbox->rx_lock); + + return IRQ_HANDLED; +} + +int apple_mbox_poll(struct apple_mbox *mbox) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mbox->rx_lock, flags); + ret = apple_mbox_poll_locked(mbox); + spin_unlock_irqrestore(&mbox->rx_lock, flags); + + return ret; +} +EXPORT_SYMBOL(apple_mbox_poll); + +int apple_mbox_start(struct apple_mbox *mbox) +{ + int ret; + + if (mbox->active) + return 0; + + ret = pm_runtime_resume_and_get(mbox->dev); + if (ret) + return ret; + + /* + * Only some variants of this mailbox HW provide interrupt control + * at the mailbox level. We therefore need to handle enabling/disabling + * interrupts at the main interrupt controller anyway for hardware that + * doesn't. Just always keep the interrupts we care about enabled at + * the mailbox level so that both hardware revisions behave almost + * the same. + */ + if (mbox->hw->has_irq_controls) { + writel_relaxed(mbox->hw->irq_bit_recv_not_empty | + mbox->hw->irq_bit_send_empty, + mbox->regs + mbox->hw->irq_enable); + } + + enable_irq(mbox->irq_recv_not_empty); + mbox->active = true; + return 0; +} +EXPORT_SYMBOL(apple_mbox_start); + +void apple_mbox_stop(struct apple_mbox *mbox) +{ + if (!mbox->active) + return; + + mbox->active = false; + disable_irq(mbox->irq_recv_not_empty); + pm_runtime_mark_last_busy(mbox->dev); + pm_runtime_put_autosuspend(mbox->dev); +} +EXPORT_SYMBOL(apple_mbox_stop); + +struct apple_mbox *apple_mbox_get(struct device *dev, int index) +{ + struct of_phandle_args args; + struct platform_device *pdev; + struct apple_mbox *mbox; + int ret; + + ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", + index, &args); + if (ret || !args.np) + return ERR_PTR(ret); + + pdev = of_find_device_by_node(args.np); + of_node_put(args.np); + + if (!pdev) + return ERR_PTR(EPROBE_DEFER); + + mbox = platform_get_drvdata(pdev); + if (!mbox) + return ERR_PTR(EPROBE_DEFER); + + if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER)) + return ERR_PTR(ENODEV); + + return mbox; +} +EXPORT_SYMBOL(apple_mbox_get); + +struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name) +{ + int index; + + index = of_property_match_string(dev->of_node, "mbox-names", name); + if (index < 0) + return ERR_PTR(index); + + return apple_mbox_get(dev, index); +} +EXPORT_SYMBOL(apple_mbox_get_byname); + +static int apple_mbox_probe(struct platform_device *pdev) +{ + int ret; + char *irqname; + struct apple_mbox *mbox; + struct device *dev = &pdev->dev; + + mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + mbox->dev = &pdev->dev; + mbox->hw = of_device_get_match_data(dev); + if (!mbox->hw) + return -EINVAL; + + mbox->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mbox->regs)) + return PTR_ERR(mbox->regs); + + mbox->irq_recv_not_empty = + platform_get_irq_byname(pdev, "recv-not-empty"); + if (mbox->irq_recv_not_empty < 0) + return -ENODEV; + + mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty"); + if (mbox->irq_send_empty < 0) + return -ENODEV; + + spin_lock_init(&mbox->rx_lock); + spin_lock_init(&mbox->tx_lock); + init_completion(&mbox->tx_empty); + + irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev)); + if (!irqname) + return -ENOMEM; + + ret = devm_request_irq(dev, mbox->irq_recv_not_empty, + apple_mbox_recv_irq, + IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox); + if (ret) + return ret; + + irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev)); + if (!irqname) + return -ENOMEM; + + ret = devm_request_irq(dev, mbox->irq_send_empty, + apple_mbox_send_empty_irq, + IRQF_NO_AUTOEN | IRQF_NO_SUSPEND, irqname, mbox); + if (ret) + return ret; + + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mbox); + return 0; +} + +static const struct apple_mbox_hw apple_mbox_asc_hw = { + .control_full = APPLE_ASC_MBOX_CONTROL_FULL, + .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY, + + .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL, + .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0, + .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1, + + .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL, + .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0, + .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1, + + .has_irq_controls = false, +}; + +static const struct apple_mbox_hw apple_mbox_m3_hw = { + .control_full = APPLE_M3_MBOX_CONTROL_FULL, + .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY, + + .a2i_control = APPLE_M3_MBOX_A2I_CONTROL, + .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0, + .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1, + + .i2a_control = APPLE_M3_MBOX_I2A_CONTROL, + .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0, + .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1, + + .has_irq_controls = true, + .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE, + .irq_ack = APPLE_M3_MBOX_IRQ_ACK, + .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY, + .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY, +}; + +static const struct of_device_id apple_mbox_of_match[] = { + { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw }, + { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw }, + {} +}; +MODULE_DEVICE_TABLE(of, apple_mbox_of_match); + +static struct platform_driver apple_mbox_driver = { + .driver = { + .name = "apple-mailbox", + .of_match_table = apple_mbox_of_match, + }, + .probe = apple_mbox_probe, +}; +module_platform_driver(apple_mbox_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>"); +MODULE_DESCRIPTION("Apple Mailbox driver"); diff --git a/drivers/soc/apple/mailbox.h b/drivers/soc/apple/mailbox.h new file mode 100644 index 000000000000..f73a8913da95 --- /dev/null +++ b/drivers/soc/apple/mailbox.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* + * Apple mailbox message format + * + * Copyright The Asahi Linux Contributors + */ + +#ifndef _APPLE_MAILBOX_H_ +#define _APPLE_MAILBOX_H_ + +#include <linux/device.h> +#include <linux/types.h> + +/* encodes a single 96bit message sent over the single channel */ +struct apple_mbox_msg { + u64 msg0; + u32 msg1; +}; + +struct apple_mbox { + struct device *dev; + void __iomem *regs; + const struct apple_mbox_hw *hw; + bool active; + + int irq_recv_not_empty; + int irq_send_empty; + + spinlock_t rx_lock; + spinlock_t tx_lock; + + struct completion tx_empty; + + /** Receive callback for incoming messages */ + void (*rx)(struct apple_mbox *mbox, struct apple_mbox_msg msg, void *cookie); + void *cookie; +}; + +struct apple_mbox *apple_mbox_get(struct device *dev, int index); +struct apple_mbox *apple_mbox_get_byname(struct device *dev, const char *name); + +int apple_mbox_start(struct apple_mbox *mbox); +void apple_mbox_stop(struct apple_mbox *mbox); +int apple_mbox_poll(struct apple_mbox *mbox); +int apple_mbox_send(struct apple_mbox *mbox, struct apple_mbox_msg msg, + bool atomic); + +#endif diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h index 24bd619ec5e4..27c9fa745fd5 100644 --- a/drivers/soc/apple/rtkit-internal.h +++ b/drivers/soc/apple/rtkit-internal.h @@ -7,18 +7,17 @@ #ifndef _APPLE_RTKIT_INTERAL_H #define _APPLE_RTKIT_INTERAL_H -#include <linux/apple-mailbox.h> #include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/kernel.h> -#include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/soc/apple/rtkit.h> #include <linux/workqueue.h> +#include "mailbox.h" #define APPLE_RTKIT_APP_ENDPOINT_START 0x20 #define APPLE_RTKIT_MAX_ENDPOINTS 0x100 @@ -28,10 +27,7 @@ struct apple_rtkit { const struct apple_rtkit_ops *ops; struct device *dev; - const char *mbox_name; - int mbox_idx; - struct mbox_client mbox_cl; - struct mbox_chan *mbox_chan; + struct apple_mbox *mbox; struct completion epmap_completion; struct completion iop_pwr_ack_completion; diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c index d9f19dc99da5..e6d940292c9f 100644 --- a/drivers/soc/apple/rtkit.c +++ b/drivers/soc/apple/rtkit.c @@ -72,11 +72,6 @@ enum { #define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 -struct apple_rtkit_msg { - struct completion *completion; - struct apple_mbox_msg mbox_msg; -}; - struct apple_rtkit_rx_work { struct apple_rtkit *rtk; u8 ep; @@ -550,12 +545,12 @@ static void apple_rtkit_rx_work(struct work_struct *work) kfree(rtk_work); } -static void apple_rtkit_rx(struct mbox_client *cl, void *mssg) +static void apple_rtkit_rx(struct apple_mbox *mbox, struct apple_mbox_msg msg, + void *cookie) { - struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl); - struct apple_mbox_msg *msg = mssg; + struct apple_rtkit *rtk = cookie; struct apple_rtkit_rx_work *work; - u8 ep = msg->msg1; + u8 ep = msg.msg1; /* * The message was read from a MMIO FIFO and we have to make @@ -571,7 +566,7 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg) if (ep >= APPLE_RTKIT_APP_ENDPOINT_START && rtk->ops->recv_message_early && - rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0)) + rtk->ops->recv_message_early(rtk->cookie, ep, msg.msg0)) return; work = kzalloc(sizeof(*work), GFP_ATOMIC); @@ -580,30 +575,18 @@ static void apple_rtkit_rx(struct mbox_client *cl, void *mssg) work->rtk = rtk; work->ep = ep; - work->msg = msg->msg0; + work->msg = msg.msg0; INIT_WORK(&work->work, apple_rtkit_rx_work); queue_work(rtk->wq, &work->work); } -static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r) -{ - struct apple_rtkit_msg *msg = - container_of(mssg, struct apple_rtkit_msg, mbox_msg); - - if (r == -ETIME) - return; - - if (msg->completion) - complete(msg->completion); - kfree(msg); -} - int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, struct completion *completion, bool atomic) { - struct apple_rtkit_msg *msg; - int ret; - gfp_t flags; + struct apple_mbox_msg msg = { + .msg0 = message, + .msg1 = ep, + }; if (rtk->crashed) return -EINVAL; @@ -611,19 +594,6 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, !apple_rtkit_is_running(rtk)) return -EINVAL; - if (atomic) - flags = GFP_ATOMIC; - else - flags = GFP_KERNEL; - - msg = kzalloc(sizeof(*msg), flags); - if (!msg) - return -ENOMEM; - - msg->mbox_msg.msg0 = message; - msg->mbox_msg.msg1 = ep; - msg->completion = completion; - /* * The message will be sent with a MMIO write. We need the barrier * here to ensure any previous writes to buffers are visible to the @@ -631,51 +601,13 @@ int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message, */ dma_wmb(); - ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg); - if (ret < 0) { - kfree(msg); - return ret; - } - - return 0; + return apple_mbox_send(rtk->mbox, msg, atomic); } EXPORT_SYMBOL_GPL(apple_rtkit_send_message); -int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message, - unsigned long timeout, bool atomic) -{ - DECLARE_COMPLETION_ONSTACK(completion); - int ret; - long t; - - ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic); - if (ret < 0) - return ret; - - if (atomic) { - ret = mbox_flush(rtk->mbox_chan, timeout); - if (ret < 0) - return ret; - - if (try_wait_for_completion(&completion)) - return 0; - - return -ETIME; - } else { - t = wait_for_completion_interruptible_timeout( - &completion, msecs_to_jiffies(timeout)); - if (t < 0) - return t; - else if (t == 0) - return -ETIME; - return 0; - } -} -EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait); - int apple_rtkit_poll(struct apple_rtkit *rtk) { - return mbox_client_peek_data(rtk->mbox_chan); + return apple_mbox_poll(rtk->mbox); } EXPORT_SYMBOL_GPL(apple_rtkit_poll); @@ -697,20 +629,6 @@ int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint) } EXPORT_SYMBOL_GPL(apple_rtkit_start_ep); -static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk) -{ - if (rtk->mbox_name) - rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl, - rtk->mbox_name); - else - rtk->mbox_chan = - mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx); - - if (IS_ERR(rtk->mbox_chan)) - return PTR_ERR(rtk->mbox_chan); - return 0; -} - struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, const char *mbox_name, int mbox_idx, const struct apple_rtkit_ops *ops) @@ -736,13 +654,18 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS); set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints); - rtk->mbox_name = mbox_name; - rtk->mbox_idx = mbox_idx; - rtk->mbox_cl.dev = dev; - rtk->mbox_cl.tx_block = false; - rtk->mbox_cl.knows_txdone = false; - rtk->mbox_cl.rx_callback = &apple_rtkit_rx; - rtk->mbox_cl.tx_done = &apple_rtkit_tx_done; + if (mbox_name) + rtk->mbox = apple_mbox_get_byname(dev, mbox_name); + else + rtk->mbox = apple_mbox_get(dev, mbox_idx); + + if (IS_ERR(rtk->mbox)) { + ret = PTR_ERR(rtk->mbox); + goto free_rtk; + } + + rtk->mbox->rx = apple_rtkit_rx; + rtk->mbox->cookie = rtk; rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, dev_name(rtk->dev)); @@ -751,7 +674,7 @@ struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, goto free_rtk; } - ret = apple_rtkit_request_mbox_chan(rtk); + ret = apple_mbox_start(rtk->mbox); if (ret) goto destroy_wq; @@ -782,7 +705,7 @@ static int apple_rtkit_wait_for_completion(struct completion *c) int apple_rtkit_reinit(struct apple_rtkit *rtk) { /* make sure we don't handle any messages while reinitializing */ - mbox_free_channel(rtk->mbox_chan); + apple_mbox_stop(rtk->mbox); flush_workqueue(rtk->wq); apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); @@ -806,7 +729,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk) rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF; rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF; - return apple_rtkit_request_mbox_chan(rtk); + return apple_mbox_start(rtk->mbox); } EXPORT_SYMBOL_GPL(apple_rtkit_reinit); @@ -962,7 +885,7 @@ EXPORT_SYMBOL_GPL(apple_rtkit_wake); void apple_rtkit_free(struct apple_rtkit *rtk) { - mbox_free_channel(rtk->mbox_chan); + apple_mbox_stop(rtk->mbox); destroy_workqueue(rtk->wq); apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c index 92ec76c03965..f498db9abe35 100644 --- a/drivers/soc/fsl/qe/qmc.c +++ b/drivers/soc/fsl/qe/qmc.c @@ -166,7 +166,7 @@ struct qmc_xfer_desc { union { void (*tx_complete)(void *context); - void (*rx_complete)(void *context, size_t length); + void (*rx_complete)(void *context, size_t length, unsigned int flags); }; void *context; }; @@ -175,9 +175,12 @@ struct qmc_chan { struct list_head list; unsigned int id; struct qmc *qmc; - void *__iomem s_param; + void __iomem *s_param; enum qmc_mode mode; + spinlock_t ts_lock; /* Protect timeslots */ + u64 tx_ts_mask_avail; u64 tx_ts_mask; + u64 rx_ts_mask_avail; u64 rx_ts_mask; bool is_reverse_data; @@ -203,9 +206,9 @@ struct qmc_chan { struct qmc { struct device *dev; struct tsa_serial *tsa_serial; - void *__iomem scc_regs; - void *__iomem scc_pram; - void *__iomem dpram; + void __iomem *scc_regs; + void __iomem *scc_pram; + void __iomem *dpram; u16 scc_pram_offset; cbd_t __iomem *bd_table; dma_addr_t bd_dma_addr; @@ -214,41 +217,47 @@ struct qmc { u16 __iomem *int_curr; dma_addr_t int_dma_addr; size_t int_size; + bool is_tsa_64rxtx; struct list_head chan_head; struct qmc_chan *chans[64]; }; -static inline void qmc_write16(void *__iomem addr, u16 val) +static void qmc_write16(void __iomem *addr, u16 val) { iowrite16be(val, addr); } -static inline u16 qmc_read16(void *__iomem addr) +static u16 qmc_read16(void __iomem *addr) { return ioread16be(addr); } -static inline void qmc_setbits16(void *__iomem addr, u16 set) +static void qmc_setbits16(void __iomem *addr, u16 set) { qmc_write16(addr, qmc_read16(addr) | set); } -static inline void qmc_clrbits16(void *__iomem addr, u16 clr) +static void qmc_clrbits16(void __iomem *addr, u16 clr) { qmc_write16(addr, qmc_read16(addr) & ~clr); } -static inline void qmc_write32(void *__iomem addr, u32 val) +static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set) +{ + qmc_write16(addr, (qmc_read16(addr) & ~clr) | set); +} + +static void qmc_write32(void __iomem *addr, u32 val) { iowrite32be(val, addr); } -static inline u32 qmc_read32(void *__iomem addr) +static u32 qmc_read32(void __iomem *addr) { return ioread32be(addr); } -static inline void qmc_setbits32(void *__iomem addr, u32 set) +static void qmc_setbits32(void __iomem *addr, u32 set) { qmc_write32(addr, qmc_read32(addr) | set); } @@ -257,6 +266,7 @@ static inline void qmc_setbits32(void *__iomem addr, u32 set) int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) { struct tsa_serial_info tsa_info; + unsigned long flags; int ret; /* Retrieve info from the TSA related serial */ @@ -264,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) if (ret) return ret; + spin_lock_irqsave(&chan->ts_lock, flags); + info->mode = chan->mode; info->rx_fs_rate = tsa_info.rx_fs_rate; info->rx_bit_rate = tsa_info.rx_bit_rate; @@ -272,10 +284,63 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) info->tx_bit_rate = tsa_info.tx_bit_rate; info->nb_rx_ts = hweight64(chan->rx_ts_mask); + spin_unlock_irqrestore(&chan->ts_lock, flags); + return 0; } EXPORT_SYMBOL(qmc_chan_get_info); +int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info) +{ + unsigned long flags; + + spin_lock_irqsave(&chan->ts_lock, flags); + + ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail; + ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail; + ts_info->rx_ts_mask = chan->rx_ts_mask; + ts_info->tx_ts_mask = chan->tx_ts_mask; + + spin_unlock_irqrestore(&chan->ts_lock, flags); + + return 0; +} +EXPORT_SYMBOL(qmc_chan_get_ts_info); + +int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info) +{ + unsigned long flags; + int ret; + + /* Only a subset of available timeslots is allowed */ + if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask) + return -EINVAL; + if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask) + return -EINVAL; + + /* In case of common rx/tx table, rx/tx masks must be identical */ + if (chan->qmc->is_tsa_64rxtx) { + if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) + return -EINVAL; + } + + spin_lock_irqsave(&chan->ts_lock, flags); + + if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) || + (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) { + dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n"); + ret = -EBUSY; + } else { + chan->tx_ts_mask = ts_info->tx_ts_mask; + chan->rx_ts_mask = ts_info->rx_ts_mask; + ret = 0; + } + spin_unlock_irqrestore(&chan->ts_lock, flags); + + return ret; +} +EXPORT_SYMBOL(qmc_chan_set_ts_info); + int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param) { if (param->mode != chan->mode) @@ -318,7 +383,7 @@ int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, { struct qmc_xfer_desc *xfer_desc; unsigned long flags; - cbd_t *__iomem bd; + cbd_t __iomem *bd; u16 ctrl; int ret; @@ -374,7 +439,7 @@ static void qmc_chan_write_done(struct qmc_chan *chan) void (*complete)(void *context); unsigned long flags; void *context; - cbd_t *__iomem bd; + cbd_t __iomem *bd; u16 ctrl; /* @@ -421,11 +486,12 @@ end: } int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, - void (*complete)(void *context, size_t length), void *context) + void (*complete)(void *context, size_t length, unsigned int flags), + void *context) { struct qmc_xfer_desc *xfer_desc; unsigned long flags; - cbd_t *__iomem bd; + cbd_t __iomem *bd; u16 ctrl; int ret; @@ -454,6 +520,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length, xfer_desc->rx_complete = complete; xfer_desc->context = context; + /* Clear previous status flags */ + ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO | + QMC_BD_RX_AB | QMC_BD_RX_CR); + /* Activate the descriptor */ ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB); wmb(); /* Be sure to flush data before descriptor activation */ @@ -485,10 +555,10 @@ EXPORT_SYMBOL(qmc_chan_read_submit); static void qmc_chan_read_done(struct qmc_chan *chan) { - void (*complete)(void *context, size_t size); + void (*complete)(void *context, size_t size, unsigned int flags); struct qmc_xfer_desc *xfer_desc; unsigned long flags; - cbd_t *__iomem bd; + cbd_t __iomem *bd; void *context; u16 datalen; u16 ctrl; @@ -527,7 +597,23 @@ static void qmc_chan_read_done(struct qmc_chan *chan) if (complete) { spin_unlock_irqrestore(&chan->rx_lock, flags); - complete(context, datalen); + + /* + * Avoid conversion between internal hardware flags and + * the software API flags. + * -> Be sure that the software API flags are consistent + * with the hardware flags + */ + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L); + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F); + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG); + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO); + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB); + BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR); + + complete(context, datalen, + ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | + QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR)); spin_lock_irqsave(&chan->rx_lock, flags); } @@ -539,6 +625,155 @@ end: spin_unlock_irqrestore(&chan->rx_lock, flags); } +static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info, + bool enable) +{ + unsigned int i; + u16 curr; + u16 val; + + /* + * Use a common Tx/Rx 64 entries table. + * Tx and Rx related stuffs must be identical + */ + if (chan->tx_ts_mask != chan->rx_ts_mask) { + dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id); + return -EINVAL; + } + + val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + + /* Check entries based on Rx stuff*/ + for (i = 0; i < info->nb_rx_ts; i++) { + if (!(chan->rx_ts_mask & (((u64)1) << i))) + continue; + + curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2)); + if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { + dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n", + chan->id, i); + return -EBUSY; + } + } + + /* Set entries based on Rx stuff*/ + for (i = 0; i < info->nb_rx_ts; i++) { + if (!(chan->rx_ts_mask & (((u64)1) << i))) + continue; + + qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), + ~QMC_TSA_WRAP, enable ? val : 0x0000); + } + + return 0; +} + +static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info, + bool enable) +{ + unsigned int i; + u16 curr; + u16 val; + + /* Use a Rx 32 entries table */ + + val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + + /* Check entries based on Rx stuff */ + for (i = 0; i < info->nb_rx_ts; i++) { + if (!(chan->rx_ts_mask & (((u64)1) << i))) + continue; + + curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2)); + if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { + dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n", + chan->id, i); + return -EBUSY; + } + } + + /* Set entries based on Rx stuff */ + for (i = 0; i < info->nb_rx_ts; i++) { + if (!(chan->rx_ts_mask & (((u64)1) << i))) + continue; + + qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), + ~QMC_TSA_WRAP, enable ? val : 0x0000); + } + + return 0; +} + +static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info, + bool enable) +{ + unsigned int i; + u16 curr; + u16 val; + + /* Use a Tx 32 entries table */ + + val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id); + + /* Check entries based on Tx stuff */ + for (i = 0; i < info->nb_tx_ts; i++) { + if (!(chan->tx_ts_mask & (((u64)1) << i))) + continue; + + curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2)); + if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) { + dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n", + chan->id, i); + return -EBUSY; + } + } + + /* Set entries based on Tx stuff */ + for (i = 0; i < info->nb_tx_ts; i++) { + if (!(chan->tx_ts_mask & (((u64)1) << i))) + continue; + + qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), + ~QMC_TSA_WRAP, enable ? val : 0x0000); + } + + return 0; +} + +static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable) +{ + struct tsa_serial_info info; + int ret; + + /* Retrieve info from the TSA related serial */ + ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); + if (ret) + return ret; + + /* Setup entries */ + if (chan->qmc->is_tsa_64rxtx) + return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); + + return qmc_chan_setup_tsa_32tx(chan, &info, enable); +} + +static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable) +{ + struct tsa_serial_info info; + int ret; + + /* Retrieve info from the TSA related serial */ + ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); + if (ret) + return ret; + + /* Setup entries */ + if (chan->qmc->is_tsa_64rxtx) + return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); + + return qmc_chan_setup_tsa_32rx(chan, &info, enable); +} + static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode) { return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E); @@ -551,6 +786,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan) spin_lock_irqsave(&chan->rx_lock, flags); + if (chan->is_rx_stopped) { + /* The channel is already stopped -> simply return ok */ + ret = 0; + goto end; + } + /* Send STOP RECEIVE command */ ret = qmc_chan_command(chan, 0x0); if (ret) { @@ -561,6 +802,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan) chan->is_rx_stopped = true; + if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) { + ret = qmc_chan_setup_tsa_rx(chan, false); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + } + end: spin_unlock_irqrestore(&chan->rx_lock, flags); return ret; @@ -573,6 +823,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan) spin_lock_irqsave(&chan->tx_lock, flags); + if (chan->is_tx_stopped) { + /* The channel is already stopped -> simply return ok */ + ret = 0; + goto end; + } + /* Send STOP TRANSMIT command */ ret = qmc_chan_command(chan, 0x1); if (ret) { @@ -583,37 +839,114 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan) chan->is_tx_stopped = true; + if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) { + ret = qmc_chan_setup_tsa_tx(chan, false); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + } + end: spin_unlock_irqrestore(&chan->tx_lock, flags); return ret; } +static int qmc_chan_start_rx(struct qmc_chan *chan); + int qmc_chan_stop(struct qmc_chan *chan, int direction) { - int ret; + bool is_rx_rollback_needed = false; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&chan->ts_lock, flags); if (direction & QMC_CHAN_READ) { + is_rx_rollback_needed = !chan->is_rx_stopped; ret = qmc_chan_stop_rx(chan); if (ret) - return ret; + goto end; } if (direction & QMC_CHAN_WRITE) { ret = qmc_chan_stop_tx(chan); - if (ret) - return ret; + if (ret) { + /* Restart rx if needed */ + if (is_rx_rollback_needed) + qmc_chan_start_rx(chan); + goto end; + } } - return 0; +end: + spin_unlock_irqrestore(&chan->ts_lock, flags); + return ret; } EXPORT_SYMBOL(qmc_chan_stop); -static void qmc_chan_start_rx(struct qmc_chan *chan) +static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) +{ + struct tsa_serial_info info; + u16 first_rx, last_tx; + u16 trnsync; + int ret; + + /* Retrieve info from the TSA related serial */ + ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); + if (ret) + return ret; + + /* Find the first Rx TS allocated to the channel */ + first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0; + + /* Find the last Tx TS allocated to the channel */ + last_tx = fls64(chan->tx_ts_mask); + + trnsync = 0; + if (info.nb_rx_ts) + trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2); + if (info.nb_tx_ts) + trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2); + + qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync); + + dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n", + chan->id, trnsync, + first_rx, info.nb_rx_ts, chan->rx_ts_mask, + last_tx, info.nb_tx_ts, chan->tx_ts_mask); + + return 0; +} + +static int qmc_chan_start_rx(struct qmc_chan *chan) { unsigned long flags; + int ret; spin_lock_irqsave(&chan->rx_lock, flags); + if (!chan->is_rx_stopped) { + /* The channel is already started -> simply return ok */ + ret = 0; + goto end; + } + + ret = qmc_chan_setup_tsa_rx(chan, true); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } + /* Restart the receiver */ if (chan->mode == QMC_TRANSPARENT) qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); @@ -624,15 +957,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan) chan->is_rx_stopped = false; +end: spin_unlock_irqrestore(&chan->rx_lock, flags); + return ret; } -static void qmc_chan_start_tx(struct qmc_chan *chan) +static int qmc_chan_start_tx(struct qmc_chan *chan) { unsigned long flags; + int ret; spin_lock_irqsave(&chan->tx_lock, flags); + if (!chan->is_tx_stopped) { + /* The channel is already started -> simply return ok */ + ret = 0; + goto end; + } + + ret = qmc_chan_setup_tsa_tx(chan, true); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } + /* * Enable channel transmitter as it could be disabled if * qmc_chan_reset() was called. @@ -644,18 +1000,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan) chan->is_tx_stopped = false; +end: spin_unlock_irqrestore(&chan->tx_lock, flags); + return ret; } int qmc_chan_start(struct qmc_chan *chan, int direction) { - if (direction & QMC_CHAN_READ) - qmc_chan_start_rx(chan); + bool is_rx_rollback_needed = false; + unsigned long flags; + int ret = 0; - if (direction & QMC_CHAN_WRITE) - qmc_chan_start_tx(chan); + spin_lock_irqsave(&chan->ts_lock, flags); - return 0; + if (direction & QMC_CHAN_READ) { + is_rx_rollback_needed = chan->is_rx_stopped; + ret = qmc_chan_start_rx(chan); + if (ret) + goto end; + } + + if (direction & QMC_CHAN_WRITE) { + ret = qmc_chan_start_tx(chan); + if (ret) { + /* Restop rx if needed */ + if (is_rx_rollback_needed) + qmc_chan_stop_rx(chan); + goto end; + } + } + +end: + spin_unlock_irqrestore(&chan->ts_lock, flags); + return ret; } EXPORT_SYMBOL(qmc_chan_start); @@ -663,7 +1040,7 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan) { struct qmc_xfer_desc *xfer_desc; unsigned long flags; - cbd_t *__iomem bd; + cbd_t __iomem *bd; u16 ctrl; spin_lock_irqsave(&chan->rx_lock, flags); @@ -685,7 +1062,6 @@ static void qmc_chan_reset_rx(struct qmc_chan *chan) qmc_read16(chan->s_param + QMC_SPE_RBASE)); chan->rx_pending = 0; - chan->is_rx_stopped = false; spin_unlock_irqrestore(&chan->rx_lock, flags); } @@ -694,7 +1070,7 @@ static void qmc_chan_reset_tx(struct qmc_chan *chan) { struct qmc_xfer_desc *xfer_desc; unsigned long flags; - cbd_t *__iomem bd; + cbd_t __iomem *bd; u16 ctrl; spin_lock_irqsave(&chan->tx_lock, flags); @@ -741,10 +1117,7 @@ EXPORT_SYMBOL(qmc_chan_reset); static int qmc_check_chans(struct qmc *qmc) { struct tsa_serial_info info; - bool is_one_table = false; struct qmc_chan *chan; - u64 tx_ts_mask = 0; - u64 rx_ts_mask = 0; u64 tx_ts_assigned_mask; u64 rx_ts_assigned_mask; int ret; @@ -768,38 +1141,21 @@ static int qmc_check_chans(struct qmc *qmc) dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n"); return -EINVAL; } - is_one_table = true; } tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1; rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1; list_for_each_entry(chan, &qmc->chan_head, list) { - if (chan->tx_ts_mask > tx_ts_assigned_mask) { - dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id); - return -EINVAL; - } - if (tx_ts_mask & chan->tx_ts_mask) { - dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id); + if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) { + dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id); return -EINVAL; } - if (chan->rx_ts_mask > rx_ts_assigned_mask) { - dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id); - return -EINVAL; - } - if (rx_ts_mask & chan->rx_ts_mask) { - dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id); + if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) { + dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id); return -EINVAL; } - - if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) { - dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id); - return -EINVAL; - } - - tx_ts_mask |= chan->tx_ts_mask; - rx_ts_mask |= chan->rx_ts_mask; } return 0; @@ -845,6 +1201,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) } chan->id = chan_id; + spin_lock_init(&chan->ts_lock); spin_lock_init(&chan->rx_lock); spin_lock_init(&chan->tx_lock); @@ -855,7 +1212,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) of_node_put(chan_np); return ret; } - chan->tx_ts_mask = ts_mask; + chan->tx_ts_mask_avail = ts_mask; + chan->tx_ts_mask = chan->tx_ts_mask_avail; ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask); if (ret) { @@ -864,7 +1222,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) of_node_put(chan_np); return ret; } - chan->rx_ts_mask = ts_mask; + chan->rx_ts_mask_avail = ts_mask; + chan->rx_ts_mask = chan->rx_ts_mask_avail; mode = "transparent"; ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode); @@ -895,9 +1254,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) return qmc_check_chans(qmc); } -static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info) +static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info) { - struct qmc_chan *chan; unsigned int i; u16 val; @@ -906,23 +1264,12 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i * Everything was previously checked, Tx and Rx related stuffs are * identical -> Used Rx related stuff to build the table */ + qmc->is_tsa_64rxtx = true; /* Invalidate all entries */ for (i = 0; i < 64; i++) qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000); - /* Set entries based on Rx stuff*/ - list_for_each_entry(chan, &qmc->chan_head, list) { - for (i = 0; i < info->nb_rx_ts; i++) { - if (!(chan->rx_ts_mask & (((u64)1) << i))) - continue; - - val = QMC_TSA_VALID | QMC_TSA_MASK | - QMC_TSA_CHANNEL(chan->id); - qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val); - } - } - /* Set Wrap bit on last entry */ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2), QMC_TSA_WRAP); @@ -937,9 +1284,8 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i return 0; } -static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info) +static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info) { - struct qmc_chan *chan; unsigned int i; u16 val; @@ -947,6 +1293,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info * Use a Tx 32 entries table and a Rx 32 entries table. * Everything was previously checked. */ + qmc->is_tsa_64rxtx = false; /* Invalidate all entries */ for (i = 0; i < 32; i++) { @@ -954,28 +1301,6 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000); } - /* Set entries based on Rx and Tx stuff*/ - list_for_each_entry(chan, &qmc->chan_head, list) { - /* Rx part */ - for (i = 0; i < info->nb_rx_ts; i++) { - if (!(chan->rx_ts_mask & (((u64)1) << i))) - continue; - - val = QMC_TSA_VALID | QMC_TSA_MASK | - QMC_TSA_CHANNEL(chan->id); - qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val); - } - /* Tx part */ - for (i = 0; i < info->nb_tx_ts; i++) { - if (!(chan->tx_ts_mask & (((u64)1) << i))) - continue; - - val = QMC_TSA_VALID | QMC_TSA_MASK | - QMC_TSA_CHANNEL(chan->id); - qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val); - } - } - /* Set Wrap bit on last entries */ qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2), QMC_TSA_WRAP); @@ -995,7 +1320,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info return 0; } -static int qmc_setup_tsa(struct qmc *qmc) +static int qmc_init_tsa(struct qmc *qmc) { struct tsa_serial_info info; int ret; @@ -1006,46 +1331,12 @@ static int qmc_setup_tsa(struct qmc *qmc) return ret; /* - * Setup one common 64 entries table or two 32 entries (one for Tx and - * one for Tx) according to assigned TS numbers. + * Initialize one common 64 entries table or two 32 entries (one for Tx + * and one for Tx) according to assigned TS numbers. */ return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ? - qmc_setup_tsa_64rxtx(qmc, &info) : - qmc_setup_tsa_32rx_32tx(qmc, &info); -} - -static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan) -{ - struct tsa_serial_info info; - u16 first_rx, last_tx; - u16 trnsync; - int ret; - - /* Retrieve info from the TSA related serial */ - ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); - if (ret) - return ret; - - /* Find the first Rx TS allocated to the channel */ - first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0; - - /* Find the last Tx TS allocated to the channel */ - last_tx = fls64(chan->tx_ts_mask); - - trnsync = 0; - if (info.nb_rx_ts) - trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2); - if (info.nb_tx_ts) - trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2); - - qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync); - - dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n", - chan->id, trnsync, - first_rx, info.nb_rx_ts, chan->rx_ts_mask, - last_tx, info.nb_tx_ts, chan->tx_ts_mask); - - return 0; + qmc_init_tsa_64rxtx(qmc, &info) : + qmc_init_tsa_32rx_32tx(qmc, &info); } static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan) @@ -1367,7 +1658,7 @@ static int qmc_probe(struct platform_device *pdev) qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3); qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8); - ret = qmc_setup_tsa(qmc); + ret = qmc_init_tsa(qmc); if (ret) goto err_tsa_serial_disconnect; @@ -1405,8 +1696,16 @@ static int qmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, qmc); + /* Populate channel related devices */ + ret = devm_of_platform_populate(qmc->dev); + if (ret) + goto err_disable_txrx; + return 0; +err_disable_txrx: + qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0); + err_disable_intr: qmc_write16(qmc->scc_regs + SCC_SCCM, 0); @@ -1445,26 +1744,16 @@ static struct platform_driver qmc_driver = { }; module_platform_driver(qmc_driver); -struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name) +static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index) { - struct of_phandle_args out_args; struct platform_device *pdev; struct qmc_chan *qmc_chan; struct qmc *qmc; - int ret; - - ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, - &out_args); - if (ret < 0) - return ERR_PTR(ret); - if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) { - of_node_put(out_args.np); + if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np)) return ERR_PTR(-EINVAL); - } - pdev = of_find_device_by_node(out_args.np); - of_node_put(out_args.np); + pdev = of_find_device_by_node(qmc_np); if (!pdev) return ERR_PTR(-ENODEV); @@ -1474,17 +1763,12 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan return ERR_PTR(-EPROBE_DEFER); } - if (out_args.args_count != 1) { + if (chan_index >= ARRAY_SIZE(qmc->chans)) { platform_device_put(pdev); return ERR_PTR(-EINVAL); } - if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) { - platform_device_put(pdev); - return ERR_PTR(-EINVAL); - } - - qmc_chan = qmc->chans[out_args.args[0]]; + qmc_chan = qmc->chans[chan_index]; if (!qmc_chan) { platform_device_put(pdev); return ERR_PTR(-ENOENT); @@ -1492,8 +1776,44 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan return qmc_chan; } + +struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name) +{ + struct of_phandle_args out_args; + struct qmc_chan *qmc_chan; + int ret; + + ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, + &out_args); + if (ret < 0) + return ERR_PTR(ret); + + if (out_args.args_count != 1) { + of_node_put(out_args.np); + return ERR_PTR(-EINVAL); + } + + qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]); + of_node_put(out_args.np); + return qmc_chan; +} EXPORT_SYMBOL(qmc_chan_get_byphandle); +struct qmc_chan *qmc_chan_get_bychild(struct device_node *np) +{ + struct device_node *qmc_np; + u32 chan_index; + int ret; + + qmc_np = np->parent; + ret = of_property_read_u32(np, "reg", &chan_index); + if (ret) + return ERR_PTR(-EINVAL); + + return qmc_chan_get_from_qmc(qmc_np, chan_index); +} +EXPORT_SYMBOL(qmc_chan_get_bychild); + void qmc_chan_put(struct qmc_chan *chan) { put_device(chan->qmc->dev); @@ -1530,6 +1850,28 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev, } EXPORT_SYMBOL(devm_qmc_chan_get_byphandle); +struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev, + struct device_node *np) +{ + struct qmc_chan *qmc_chan; + struct qmc_chan **dr; + + dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return ERR_PTR(-ENOMEM); + + qmc_chan = qmc_chan_get_bychild(np); + if (!IS_ERR(qmc_chan)) { + *dr = qmc_chan; + devres_add(dev, dr); + } else { + devres_free(dr); + } + + return qmc_chan; +} +EXPORT_SYMBOL(devm_qmc_chan_get_bychild); + MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>"); MODULE_DESCRIPTION("CPM QMC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/fsl/qe/tsa.c b/drivers/soc/fsl/qe/tsa.c index 3f9981335590..6c5741cf5e9d 100644 --- a/drivers/soc/fsl/qe/tsa.c +++ b/drivers/soc/fsl/qe/tsa.c @@ -98,9 +98,9 @@ #define TSA_SIRP 0x10 struct tsa_entries_area { - void *__iomem entries_start; - void *__iomem entries_next; - void *__iomem last_entry; + void __iomem *entries_start; + void __iomem *entries_next; + void __iomem *last_entry; }; struct tsa_tdm { @@ -117,8 +117,8 @@ struct tsa_tdm { struct tsa { struct device *dev; - void *__iomem si_regs; - void *__iomem si_ram; + void __iomem *si_regs; + void __iomem *si_ram; resource_size_t si_ram_sz; spinlock_t lock; int tdms; /* TSA_TDMx ORed */ @@ -135,27 +135,27 @@ static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial) return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]); } -static inline void tsa_write32(void *__iomem addr, u32 val) +static inline void tsa_write32(void __iomem *addr, u32 val) { iowrite32be(val, addr); } -static inline void tsa_write8(void *__iomem addr, u32 val) +static inline void tsa_write8(void __iomem *addr, u32 val) { iowrite8(val, addr); } -static inline u32 tsa_read32(void *__iomem addr) +static inline u32 tsa_read32(void __iomem *addr) { return ioread32be(addr); } -static inline void tsa_clrbits32(void *__iomem addr, u32 clr) +static inline void tsa_clrbits32(void __iomem *addr, u32 clr) { tsa_write32(addr, tsa_read32(addr) & ~clr); } -static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set) +static inline void tsa_clrsetbits32(void __iomem *addr, u32 clr, u32 set) { tsa_write32(addr, (tsa_read32(addr) & ~clr) | set); } @@ -313,7 +313,7 @@ static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id) static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area, u32 count, u32 serial_id) { - void *__iomem addr; + void __iomem *addr; u32 left; u32 val; u32 cnt; diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c index e31791659560..9ff70b38e5e9 100644 --- a/drivers/soc/hisilicon/kunpeng_hccs.c +++ b/drivers/soc/hisilicon/kunpeng_hccs.c @@ -85,8 +85,10 @@ static int hccs_get_pcc_chan_id(struct hccs_dev *hdev) struct hccs_register_ctx ctx = {0}; acpi_status status; - if (!acpi_has_method(handle, METHOD_NAME__CRS)) + if (!acpi_has_method(handle, METHOD_NAME__CRS)) { + dev_err(hdev->dev, "No _CRS method.\n"); return -ENODEV; + } ctx.dev = hdev->dev; status = acpi_walk_resources(handle, METHOD_NAME__CRS, @@ -108,6 +110,14 @@ static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret) *(u8 *)msg, ret); } +static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg) +{ + struct hccs_mbox_client_info *cl_info = + container_of(cl, struct hccs_mbox_client_info, client); + + complete(&cl_info->done); +} + static void hccs_unregister_pcc_channel(struct hccs_dev *hdev) { struct hccs_mbox_client_info *cl_info = &hdev->cl_info; @@ -129,6 +139,9 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev) cl->tx_block = false; cl->knows_txdone = true; cl->tx_done = hccs_chan_tx_done; + cl->rx_callback = hdev->verspec_data->rx_callback; + init_completion(&cl_info->done); + pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id); if (IS_ERR(pcc_chan)) { dev_err(dev, "PPC channel request failed.\n"); @@ -145,17 +158,23 @@ static int hccs_register_pcc_channel(struct hccs_dev *hdev) */ cl_info->deadline_us = HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency; - if (cl_info->mbox_chan->mbox->txdone_irq) { + if (!hdev->verspec_data->has_txdone_irq && + cl_info->mbox_chan->mbox->txdone_irq) { dev_err(dev, "PCC IRQ in PCCT is enabled.\n"); rc = -EINVAL; goto err_mbx_channel_free; + } else if (hdev->verspec_data->has_txdone_irq && + !cl_info->mbox_chan->mbox->txdone_irq) { + dev_err(dev, "PCC IRQ in PCCT isn't supported.\n"); + rc = -EINVAL; + goto err_mbx_channel_free; } if (pcc_chan->shmem_base_addr) { cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr, pcc_chan->shmem_size); if (!cl_info->pcc_comm_addr) { - dev_err(dev, "Failed to ioremap PCC communication region for channel-%d.\n", + dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n", hdev->chan_id); rc = -ENOMEM; goto err_mbx_channel_free; @@ -170,7 +189,7 @@ out: return rc; } -static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev) +static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev) { struct hccs_mbox_client_info *cl_info = &hdev->cl_info; struct acpi_pcct_shared_memory __iomem *comm_base = @@ -192,30 +211,74 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev) return ret; } +static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev) +{ + struct hccs_mbox_client_info *cl_info = &hdev->cl_info; + + if (!wait_for_completion_timeout(&cl_info->done, + usecs_to_jiffies(cl_info->deadline_us))) { + dev_err(hdev->dev, "PCC command executed timeout!\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev, + u8 cmd, + struct hccs_desc *desc, + void __iomem *comm_space, + u16 space_size) +{ + struct acpi_pcct_shared_memory tmp = { + .signature = PCC_SIGNATURE | hdev->chan_id, + .command = cmd, + .status = 0, + }; + + memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, + sizeof(struct acpi_pcct_shared_memory)); + + /* Copy the message to the PCC comm space */ + memcpy_toio(comm_space, (void *)desc, space_size); +} + +static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev, + u8 cmd, + struct hccs_desc *desc, + void __iomem *comm_space, + u16 space_size) +{ + struct acpi_pcct_ext_pcc_shared_memory tmp = { + .signature = PCC_SIGNATURE | hdev->chan_id, + .flags = PCC_CMD_COMPLETION_NOTIFY, + .length = HCCS_PCC_SHARE_MEM_BYTES, + .command = cmd, + }; + + memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp, + sizeof(struct acpi_pcct_ext_pcc_shared_memory)); + + /* Copy the message to the PCC comm space */ + memcpy_toio(comm_space, (void *)desc, space_size); +} + static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, struct hccs_desc *desc) { + const struct hccs_verspecific_data *verspec_data = hdev->verspec_data; struct hccs_mbox_client_info *cl_info = &hdev->cl_info; - void __iomem *comm_space = cl_info->pcc_comm_addr + - sizeof(struct acpi_pcct_shared_memory); struct hccs_fw_inner_head *fw_inner_head; - struct acpi_pcct_shared_memory tmp = {0}; - u16 comm_space_size; + void __iomem *comm_space; + u16 space_size; int ret; - /* Write signature for this subspace */ - tmp.signature = PCC_SIGNATURE | hdev->chan_id; - /* Write to the shared command region */ - tmp.command = cmd; - /* Clear cmd complete bit */ - tmp.status = 0; - memcpy_toio(cl_info->pcc_comm_addr, (void *)&tmp, - sizeof(struct acpi_pcct_shared_memory)); - - /* Copy the message to the PCC comm space */ - comm_space_size = HCCS_PCC_SHARE_MEM_BYTES - - sizeof(struct acpi_pcct_shared_memory); - memcpy_toio(comm_space, (void *)desc, comm_space_size); + comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size; + space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size; + verspec_data->fill_pcc_shared_mem(hdev, cmd, desc, + comm_space, space_size); + if (verspec_data->has_txdone_irq) + reinit_completion(&cl_info->done); /* Ring doorbell */ ret = mbox_send_message(cl_info->mbox_chan, &cmd); @@ -225,13 +288,12 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, goto end; } - /* Wait for completion */ - ret = hccs_check_chan_cmd_complete(hdev); + ret = verspec_data->wait_cmd_complete(hdev); if (ret) goto end; /* Copy response data */ - memcpy_fromio((void *)desc, comm_space, comm_space_size); + memcpy_fromio((void *)desc, comm_space, space_size); fw_inner_head = &desc->rsp.fw_inner_head; if (fw_inner_head->retStatus) { dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n", @@ -240,7 +302,10 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd, } end: - mbox_client_txdone(cl_info->mbox_chan, ret); + if (verspec_data->has_txdone_irq) + mbox_chan_txdone(cl_info->mbox_chan, ret); + else + mbox_client_txdone(cl_info->mbox_chan, ret); return ret; } @@ -527,7 +592,6 @@ out: static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev) { - struct device *dev = hdev->dev; struct hccs_chip_info *chip; struct hccs_die_info *die; @@ -1097,7 +1161,7 @@ static int hccs_create_hccs_dir(struct hccs_dev *hdev, int ret; ret = kobject_init_and_add(&port->kobj, &hccs_port_type, - &die->kobj, "hccs%d", port->port_id); + &die->kobj, "hccs%u", port->port_id); if (ret) { kobject_put(&port->kobj); return ret; @@ -1115,7 +1179,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev, u16 i; ret = kobject_init_and_add(&die->kobj, &hccs_die_type, - &chip->kobj, "die%d", die->die_id); + &chip->kobj, "die%u", die->die_id); if (ret) { kobject_put(&die->kobj); return ret; @@ -1125,7 +1189,7 @@ static int hccs_create_die_dir(struct hccs_dev *hdev, port = &die->ports[i]; ret = hccs_create_hccs_dir(hdev, die, port); if (ret) { - dev_err(hdev->dev, "create hccs%d dir failed.\n", + dev_err(hdev->dev, "create hccs%u dir failed.\n", port->port_id); goto err; } @@ -1147,7 +1211,7 @@ static int hccs_create_chip_dir(struct hccs_dev *hdev, u16 id; ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type, - &hdev->dev->kobj, "chip%d", chip->chip_id); + &hdev->dev->kobj, "chip%u", chip->chip_id); if (ret) { kobject_put(&chip->kobj); return ret; @@ -1178,7 +1242,7 @@ static int hccs_create_topo_dirs(struct hccs_dev *hdev) chip = &hdev->chips[id]; ret = hccs_create_chip_dir(hdev, chip); if (ret) { - dev_err(hdev->dev, "init chip%d dir failed!\n", id); + dev_err(hdev->dev, "init chip%u dir failed!\n", id); goto err; } } @@ -1212,6 +1276,11 @@ static int hccs_probe(struct platform_device *pdev) hdev->dev = &pdev->dev; platform_set_drvdata(pdev, hdev); + /* + * Here would never be failure as the driver and device has been matched. + */ + hdev->verspec_data = acpi_device_get_match_data(hdev->dev); + mutex_init(&hdev->lock); rc = hccs_get_pcc_chan_id(hdev); if (rc) @@ -1248,9 +1317,26 @@ static void hccs_remove(struct platform_device *pdev) hccs_unregister_pcc_channel(hdev); } +static const struct hccs_verspecific_data hisi04b1_verspec_data = { + .rx_callback = NULL, + .wait_cmd_complete = hccs_wait_cmd_complete_by_poll, + .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region, + .shared_mem_size = sizeof(struct acpi_pcct_shared_memory), + .has_txdone_irq = false, +}; + +static const struct hccs_verspecific_data hisi04b2_verspec_data = { + .rx_callback = hccs_pcc_rx_callback, + .wait_cmd_complete = hccs_wait_cmd_complete_by_irq, + .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region, + .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory), + .has_txdone_irq = true, +}; + static const struct acpi_device_id hccs_acpi_match[] = { - { "HISI04B1"}, - { ""}, + { "HISI04B1", (unsigned long)&hisi04b1_verspec_data}, + { "HISI04B2", (unsigned long)&hisi04b2_verspec_data}, + { } }; MODULE_DEVICE_TABLE(acpi, hccs_acpi_match); diff --git a/drivers/soc/hisilicon/kunpeng_hccs.h b/drivers/soc/hisilicon/kunpeng_hccs.h index 6012d2776028..c3adbc01b471 100644 --- a/drivers/soc/hisilicon/kunpeng_hccs.h +++ b/drivers/soc/hisilicon/kunpeng_hccs.h @@ -51,11 +51,26 @@ struct hccs_mbox_client_info { struct pcc_mbox_chan *pcc_chan; u64 deadline_us; void __iomem *pcc_comm_addr; + struct completion done; +}; + +struct hccs_desc; + +struct hccs_verspecific_data { + void (*rx_callback)(struct mbox_client *cl, void *mssg); + int (*wait_cmd_complete)(struct hccs_dev *hdev); + void (*fill_pcc_shared_mem)(struct hccs_dev *hdev, + u8 cmd, struct hccs_desc *desc, + void __iomem *comm_space, + u16 space_size); + u16 shared_mem_size; + bool has_txdone_irq; }; struct hccs_dev { struct device *dev; struct acpi_device *acpi_dev; + const struct hccs_verspecific_data *verspec_data; u64 caps; u8 chip_num; struct hccs_chip_info *chips; diff --git a/drivers/soc/mediatek/mt8188-mmsys.h b/drivers/soc/mediatek/mt8188-mmsys.h index 448cc3761b43..6bebf1a69fc0 100644 --- a/drivers/soc/mediatek/mt8188-mmsys.h +++ b/drivers/soc/mediatek/mt8188-mmsys.h @@ -3,6 +3,10 @@ #ifndef __SOC_MEDIATEK_MT8188_MMSYS_H #define __SOC_MEDIATEK_MT8188_MMSYS_H +#include <linux/soc/mediatek/mtk-mmsys.h> +#include <dt-bindings/reset/mt8188-resets.h> + +#define MT8188_VDO0_SW0_RST_B 0x190 #define MT8188_VDO0_OVL_MOUT_EN 0xf14 #define MT8188_MOUT_DISP_OVL0_TO_DISP_RDMA0 BIT(0) #define MT8188_MOUT_DISP_OVL0_TO_DISP_WDMA0 BIT(1) @@ -67,6 +71,136 @@ #define MT8188_SOUT_DSC_WRAP0_OUT_TO_VPP_MERGE BIT(18) #define MT8188_SOUT_DSC_WRAP0_OUT_TO_DISP_WDMA0 BIT(19) +#define MT8188_VDO1_SW0_RST_B 0x1d0 +#define MT8188_VDO1_HDR_TOP_CFG 0xd00 +#define MT8188_VDO1_MIXER_IN1_ALPHA 0xd30 +#define MT8188_VDO1_MIXER_IN1_PAD 0xd40 +#define MT8188_VDO1_MIXER_VSYNC_LEN 0xd5c +#define MT8188_VDO1_MERGE0_ASYNC_CFG_WD 0xe30 +#define MT8188_VDO1_HDRBE_ASYNC_CFG_WD 0xe70 +#define MT8188_VDO1_VPP_MERGE0_P0_SEL_IN 0xf04 +#define MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 1 +#define MT8188_VDO1_VPP_MERGE0_P1_SEL_IN 0xf08 +#define MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 1 +#define MT8188_VDO1_DISP_DPI1_SEL_IN 0xf10 +#define MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT 0 +#define MT8188_VDO1_DISP_DP_INTF0_SEL_IN 0xf14 +#define MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT 0 +#define MT8188_VDO1_MERGE4_SOUT_SEL 0xf18 +#define MT8188_MERGE4_SOUT_TO_DPI1_SEL BIT(2) +#define MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL BIT(3) +#define MT8188_VDO1_MIXER_IN1_SEL_IN 0xf24 +#define MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT 1 +#define MT8188_VDO1_MIXER_IN2_SEL_IN 0xf28 +#define MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT 1 +#define MT8188_VDO1_MIXER_IN3_SEL_IN 0xf2c +#define MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT 1 +#define MT8188_VDO1_MIXER_IN4_SEL_IN 0xf30 +#define MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT 1 +#define MT8188_VDO1_MIXER_OUT_SOUT_SEL 0xf34 +#define MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL 1 +#define MT8188_VDO1_VPP_MERGE1_P0_SEL_IN 0xf3c +#define MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 1 +#define MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL 0xf40 +#define MT8188_SOUT_TO_MIXER_IN1_SEL 1 +#define MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL 0xf44 +#define MT8188_SOUT_TO_MIXER_IN2_SEL 1 +#define MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL 0xf48 +#define MT8188_SOUT_TO_MIXER_IN3_SEL 1 +#define MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL 0xf4c +#define MT8188_SOUT_TO_MIXER_IN4_SEL 1 +#define MT8188_VDO1_MERGE4_ASYNC_SEL_IN 0xf50 +#define MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT 1 +#define MT8188_VDO1_MIXER_IN1_SOUT_SEL 0xf58 +#define MT8188_MIXER_IN1_SOUT_TO_DISP_MIXER 0 +#define MT8188_VDO1_MIXER_IN2_SOUT_SEL 0xf5c +#define MT8188_MIXER_IN2_SOUT_TO_DISP_MIXER 0 +#define MT8188_VDO1_MIXER_IN3_SOUT_SEL 0xf60 +#define MT8188_MIXER_IN3_SOUT_TO_DISP_MIXER 0 +#define MT8188_VDO1_MIXER_IN4_SOUT_SEL 0xf64 +#define MT8188_MIXER_IN4_SOUT_TO_DISP_MIXER 0 +#define MT8188_VDO1_MIXER_SOUT_SEL_IN 0xf68 +#define MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER 0 + +static const u8 mmsys_mt8188_vdo0_rst_tb[] = { + [MT8188_VDO0_RST_DISP_OVL0] = MMSYS_RST_NR(0, 0), + [MT8188_VDO0_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 2), + [MT8188_VDO0_RST_DISP_CCORR0] = MMSYS_RST_NR(0, 4), + [MT8188_VDO0_RST_DISP_MUTEX0] = MMSYS_RST_NR(0, 6), + [MT8188_VDO0_RST_DISP_GAMMA0] = MMSYS_RST_NR(0, 8), + [MT8188_VDO0_RST_DISP_DITHER0] = MMSYS_RST_NR(0, 10), + [MT8188_VDO0_RST_DISP_WDMA0] = MMSYS_RST_NR(0, 17), + [MT8188_VDO0_RST_DISP_RDMA0] = MMSYS_RST_NR(0, 19), + [MT8188_VDO0_RST_DSI0] = MMSYS_RST_NR(0, 21), + [MT8188_VDO0_RST_DSI1] = MMSYS_RST_NR(0, 22), + [MT8188_VDO0_RST_DSC_WRAP0] = MMSYS_RST_NR(0, 23), + [MT8188_VDO0_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 24), + [MT8188_VDO0_RST_DP_INTF0] = MMSYS_RST_NR(0, 25), + [MT8188_VDO0_RST_DISP_AAL0] = MMSYS_RST_NR(0, 26), + [MT8188_VDO0_RST_INLINEROT0] = MMSYS_RST_NR(0, 27), + [MT8188_VDO0_RST_APB_BUS] = MMSYS_RST_NR(0, 28), + [MT8188_VDO0_RST_DISP_COLOR0] = MMSYS_RST_NR(0, 29), + [MT8188_VDO0_RST_MDP_WROT0] = MMSYS_RST_NR(0, 30), + [MT8188_VDO0_RST_DISP_RSZ0] = MMSYS_RST_NR(0, 31), +}; + +static const u8 mmsys_mt8188_vdo1_rst_tb[] = { + [MT8188_VDO1_RST_SMI_LARB2] = MMSYS_RST_NR(0, 0), + [MT8188_VDO1_RST_SMI_LARB3] = MMSYS_RST_NR(0, 1), + [MT8188_VDO1_RST_GALS] = MMSYS_RST_NR(0, 2), + [MT8188_VDO1_RST_FAKE_ENG0] = MMSYS_RST_NR(0, 3), + [MT8188_VDO1_RST_FAKE_ENG1] = MMSYS_RST_NR(0, 4), + [MT8188_VDO1_RST_MDP_RDMA0] = MMSYS_RST_NR(0, 5), + [MT8188_VDO1_RST_MDP_RDMA1] = MMSYS_RST_NR(0, 6), + [MT8188_VDO1_RST_MDP_RDMA2] = MMSYS_RST_NR(0, 7), + [MT8188_VDO1_RST_MDP_RDMA3] = MMSYS_RST_NR(0, 8), + [MT8188_VDO1_RST_VPP_MERGE0] = MMSYS_RST_NR(0, 9), + [MT8188_VDO1_RST_VPP_MERGE1] = MMSYS_RST_NR(0, 10), + [MT8188_VDO1_RST_VPP_MERGE2] = MMSYS_RST_NR(0, 11), + [MT8188_VDO1_RST_VPP_MERGE3] = MMSYS_RST_NR(1, 0), + [MT8188_VDO1_RST_VPP_MERGE4] = MMSYS_RST_NR(1, 1), + [MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 2), + [MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 3), + [MT8188_VDO1_RST_DISP_MUTEX] = MMSYS_RST_NR(1, 4), + [MT8188_VDO1_RST_MDP_RDMA4] = MMSYS_RST_NR(1, 5), + [MT8188_VDO1_RST_MDP_RDMA5] = MMSYS_RST_NR(1, 6), + [MT8188_VDO1_RST_MDP_RDMA6] = MMSYS_RST_NR(1, 7), + [MT8188_VDO1_RST_MDP_RDMA7] = MMSYS_RST_NR(1, 8), + [MT8188_VDO1_RST_DP_INTF1_MMCK] = MMSYS_RST_NR(1, 9), + [MT8188_VDO1_RST_DPI0_MM_CK] = MMSYS_RST_NR(1, 10), + [MT8188_VDO1_RST_DPI1_MM_CK] = MMSYS_RST_NR(1, 11), + [MT8188_VDO1_RST_MERGE0_DL_ASYNC] = MMSYS_RST_NR(1, 13), + [MT8188_VDO1_RST_MERGE1_DL_ASYNC] = MMSYS_RST_NR(1, 14), + [MT8188_VDO1_RST_MERGE2_DL_ASYNC] = MMSYS_RST_NR(1, 15), + [MT8188_VDO1_RST_MERGE3_DL_ASYNC] = MMSYS_RST_NR(1, 16), + [MT8188_VDO1_RST_MERGE4_DL_ASYNC] = MMSYS_RST_NR(1, 17), + [MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 18), + [MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC] = MMSYS_RST_NR(1, 19), + [MT8188_VDO1_RST_PADDING0] = MMSYS_RST_NR(1, 20), + [MT8188_VDO1_RST_PADDING1] = MMSYS_RST_NR(1, 21), + [MT8188_VDO1_RST_PADDING2] = MMSYS_RST_NR(1, 22), + [MT8188_VDO1_RST_PADDING3] = MMSYS_RST_NR(1, 23), + [MT8188_VDO1_RST_PADDING4] = MMSYS_RST_NR(1, 24), + [MT8188_VDO1_RST_PADDING5] = MMSYS_RST_NR(1, 25), + [MT8188_VDO1_RST_PADDING6] = MMSYS_RST_NR(1, 26), + [MT8188_VDO1_RST_PADDING7] = MMSYS_RST_NR(1, 27), + [MT8188_VDO1_RST_DISP_RSZ0] = MMSYS_RST_NR(1, 28), + [MT8188_VDO1_RST_DISP_RSZ1] = MMSYS_RST_NR(1, 29), + [MT8188_VDO1_RST_DISP_RSZ2] = MMSYS_RST_NR(1, 30), + [MT8188_VDO1_RST_DISP_RSZ3] = MMSYS_RST_NR(1, 31), + [MT8188_VDO1_RST_HDR_VDO_FE0] = MMSYS_RST_NR(2, 0), + [MT8188_VDO1_RST_HDR_GFX_FE0] = MMSYS_RST_NR(2, 1), + [MT8188_VDO1_RST_HDR_VDO_BE] = MMSYS_RST_NR(2, 2), + [MT8188_VDO1_RST_HDR_VDO_FE1] = MMSYS_RST_NR(2, 16), + [MT8188_VDO1_RST_HDR_GFX_FE1] = MMSYS_RST_NR(2, 17), + [MT8188_VDO1_RST_DISP_MIXER] = MMSYS_RST_NR(2, 18), + [MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 19), + [MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 20), + [MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC] = MMSYS_RST_NR(2, 21), + [MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC] = MMSYS_RST_NR(2, 22), + [MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC] = MMSYS_RST_NR(2, 23), +}; + static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = { { DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0, @@ -146,4 +280,80 @@ static const struct mtk_mmsys_routes mmsys_mt8188_routing_table[] = { }, }; +static const struct mtk_mmsys_routes mmsys_mt8188_vdo1_routing_table[] = { + { + DDP_COMPONENT_MDP_RDMA0, DDP_COMPONENT_MERGE1, + MT8188_VDO1_VPP_MERGE0_P0_SEL_IN, GENMASK(0, 0), + MT8188_VPP_MERGE0_P0_SEL_IN_FROM_MDP_RDMA0 + }, { + DDP_COMPONENT_MDP_RDMA1, DDP_COMPONENT_MERGE1, + MT8188_VDO1_VPP_MERGE0_P1_SEL_IN, GENMASK(0, 0), + MT8188_VPP_MERGE0_P1_SEL_IN_FROM_MDP_RDMA1 + }, { + DDP_COMPONENT_MDP_RDMA2, DDP_COMPONENT_MERGE2, + MT8188_VDO1_VPP_MERGE1_P0_SEL_IN, GENMASK(0, 0), + MT8188_VPP_MERGE1_P0_SEL_IN_FROM_MDP_RDMA2 + }, { + DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MERGE0_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8188_SOUT_TO_MIXER_IN1_SEL + }, { + DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MERGE1_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8188_SOUT_TO_MIXER_IN2_SEL + }, { + DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MERGE2_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8188_SOUT_TO_MIXER_IN3_SEL + }, { + DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MERGE3_ASYNC_SOUT_SEL, GENMASK(1, 0), + MT8188_SOUT_TO_MIXER_IN4_SEL + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8188_VDO1_MIXER_OUT_SOUT_SEL, GENMASK(0, 0), + MT8188_MIXER_SOUT_TO_MERGE4_ASYNC_SEL + }, { + DDP_COMPONENT_MERGE1, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MIXER_IN1_SEL_IN, GENMASK(0, 0), + MT8188_MIXER_IN1_SEL_IN_FROM_MERGE0_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE2, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MIXER_IN2_SEL_IN, GENMASK(0, 0), + MT8188_MIXER_IN2_SEL_IN_FROM_MERGE1_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE3, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MIXER_IN3_SEL_IN, GENMASK(0, 0), + MT8188_MIXER_IN3_SEL_IN_FROM_MERGE2_ASYNC_SOUT + }, { + DDP_COMPONENT_MERGE4, DDP_COMPONENT_ETHDR_MIXER, + MT8188_VDO1_MIXER_IN4_SEL_IN, GENMASK(0, 0), + MT8188_MIXER_IN4_SEL_IN_FROM_MERGE3_ASYNC_SOUT + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8188_VDO1_MIXER_SOUT_SEL_IN, GENMASK(2, 0), + MT8188_MIXER_SOUT_SEL_IN_FROM_DISP_MIXER + }, { + DDP_COMPONENT_ETHDR_MIXER, DDP_COMPONENT_MERGE5, + MT8188_VDO1_MERGE4_ASYNC_SEL_IN, GENMASK(2, 0), + MT8188_MERGE4_ASYNC_SEL_IN_FROM_MIXER_OUT_SOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1, + MT8188_VDO1_DISP_DPI1_SEL_IN, GENMASK(1, 0), + MT8188_DISP_DPI1_SEL_IN_FROM_VPP_MERGE4_MOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DPI1, + MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(1, 0), + MT8188_MERGE4_SOUT_TO_DPI1_SEL + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1, + MT8188_VDO1_DISP_DP_INTF0_SEL_IN, GENMASK(1, 0), + MT8188_DISP_DP_INTF0_SEL_IN_FROM_VPP_MERGE4_MOUT + }, { + DDP_COMPONENT_MERGE5, DDP_COMPONENT_DP_INTF1, + MT8188_VDO1_MERGE4_SOUT_SEL, GENMASK(3, 0), + MT8188_MERGE4_SOUT_TO_DP_INTF0_SEL + } +}; + #endif /* __SOC_MEDIATEK_MT8188_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c index 88209102ff3b..f370f4ec4b88 100644 --- a/drivers/soc/mediatek/mtk-mmsys.c +++ b/drivers/soc/mediatek/mtk-mmsys.c @@ -87,6 +87,29 @@ static const struct mtk_mmsys_driver_data mt8188_vdosys0_driver_data = { .clk_driver = "clk-mt8188-vdo0", .routes = mmsys_mt8188_routing_table, .num_routes = ARRAY_SIZE(mmsys_mt8188_routing_table), + .sw0_rst_offset = MT8188_VDO0_SW0_RST_B, + .rst_tb = mmsys_mt8188_vdo0_rst_tb, + .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo0_rst_tb), +}; + +static const struct mtk_mmsys_driver_data mt8188_vdosys1_driver_data = { + .clk_driver = "clk-mt8188-vdo1", + .routes = mmsys_mt8188_vdo1_routing_table, + .num_routes = ARRAY_SIZE(mmsys_mt8188_vdo1_routing_table), + .sw0_rst_offset = MT8188_VDO1_SW0_RST_B, + .rst_tb = mmsys_mt8188_vdo1_rst_tb, + .num_resets = ARRAY_SIZE(mmsys_mt8188_vdo1_rst_tb), + .vsync_len = 1, +}; + +static const struct mtk_mmsys_driver_data mt8188_vppsys0_driver_data = { + .clk_driver = "clk-mt8188-vpp0", + .is_vppsys = true, +}; + +static const struct mtk_mmsys_driver_data mt8188_vppsys1_driver_data = { + .clk_driver = "clk-mt8188-vpp1", + .is_vppsys = true, }; static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = { @@ -169,6 +192,10 @@ void mtk_mmsys_ddp_connect(struct device *dev, if (cur == routes[i].from_comp && next == routes[i].to_comp) mtk_mmsys_update_bits(mmsys, routes[i].addr, routes[i].mask, routes[i].val, NULL); + + if (mmsys->data->vsync_len) + mtk_mmsys_update_bits(mmsys, MT8188_VDO1_MIXER_VSYNC_LEN, GENMASK(31, 0), + mmsys->data->vsync_len, NULL); } EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_connect); @@ -302,6 +329,15 @@ static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned l u32 offset; u32 reg; + if (mmsys->data->rst_tb) { + if (id >= mmsys->data->num_resets) { + dev_err(rcdev->dev, "Invalid reset ID: %lu (>=%u)\n", + id, mmsys->data->num_resets); + return -EINVAL; + } + id = mmsys->data->rst_tb[id]; + } + offset = (id / MMSYS_SW_RESET_PER_REG) * sizeof(u32); id = id % MMSYS_SW_RESET_PER_REG; reg = mmsys->data->sw0_rst_offset + offset; @@ -429,6 +465,9 @@ static const struct of_device_id of_match_mtk_mmsys[] = { { .compatible = "mediatek,mt8183-mmsys", .data = &mt8183_mmsys_driver_data }, { .compatible = "mediatek,mt8186-mmsys", .data = &mt8186_mmsys_driver_data }, { .compatible = "mediatek,mt8188-vdosys0", .data = &mt8188_vdosys0_driver_data }, + { .compatible = "mediatek,mt8188-vdosys1", .data = &mt8188_vdosys1_driver_data }, + { .compatible = "mediatek,mt8188-vppsys0", .data = &mt8188_vppsys0_driver_data }, + { .compatible = "mediatek,mt8188-vppsys1", .data = &mt8188_vppsys1_driver_data }, { .compatible = "mediatek,mt8192-mmsys", .data = &mt8192_mmsys_driver_data }, /* "mediatek,mt8195-mmsys" compatible is deprecated */ { .compatible = "mediatek,mt8195-mmsys", .data = &mt8195_vdosys0_driver_data }, diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h index 6725403d2e3a..d370192737ca 100644 --- a/drivers/soc/mediatek/mtk-mmsys.h +++ b/drivers/soc/mediatek/mtk-mmsys.h @@ -78,6 +78,8 @@ #define DSI_SEL_IN_RDMA 0x1 #define DSI_SEL_IN_MASK 0x1 +#define MMSYS_RST_NR(bank, bit) (((bank) * 32) + (bit)) + struct mtk_mmsys_routes { u32 from_comp; u32 to_comp; @@ -86,13 +88,43 @@ struct mtk_mmsys_routes { u32 val; }; +/** + * struct mtk_mmsys_driver_data - Settings of the mmsys + * @clk_driver: Clock driver name that the mmsys is using + * (defined in drivers/clk/mediatek/clk-*.c). + * @routes: Routing table of the mmsys. + * It provides mux settings from one module to another. + * @num_routes: Array size of the routes. + * @sw0_rst_offset: Register offset for the reset control. + * @num_resets: Number of reset bits that are defined + * @is_vppsys: Whether the mmsys is VPPSYS (Video Processing Pipe) + * or VDOSYS (Video). Only VDOSYS needs to be added to drm driver. + * @vsync_len: VSYNC length of the MIXER. + * VSYNC is usually triggered by the connector, so its length is a + * fixed value when the frame rate is decided, but ETHDR and + * MIXER generate their own VSYNC due to hardware design, therefore + * MIXER has to sync with ETHDR by adjusting VSYNC length. + * On MT8195, there is no such setting so we use the gap between + * falling edge and rising edge of SOF (Start of Frame) signal to + * do the job, but since MT8188, VSYNC_LEN setting is introduced to + * solve the problem and is given 0x40 (ticks) as the default value. + * Please notice that this value has to be set to 1 (minimum) if + * ETHDR is bypassed, otherwise MIXER could wait too long and causing + * underflow. + * + * Each MMSYS (multi-media system) may have different settings, they may use + * different clock sources, mux settings, reset control ...etc., and these + * differences are all stored here. + */ struct mtk_mmsys_driver_data { const char *clk_driver; const struct mtk_mmsys_routes *routes; const unsigned int num_routes; const u16 sw0_rst_offset; + const u8 *rst_tb; const u32 num_resets; const bool is_vppsys; + const u8 vsync_len; }; /* diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 9d9f5ae578ac..73c256d3950b 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -133,6 +133,30 @@ #define MT8188_MUTEX_MOD_DISP_POSTMASK0 24 #define MT8188_MUTEX_MOD2_DISP_PWM0 33 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA0 0 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA1 1 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA2 2 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA3 3 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA4 4 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA5 5 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA6 6 +#define MT8188_MUTEX_MOD_DISP1_MDP_RDMA7 7 +#define MT8188_MUTEX_MOD_DISP1_PADDING0 8 +#define MT8188_MUTEX_MOD_DISP1_PADDING1 9 +#define MT8188_MUTEX_MOD_DISP1_PADDING2 10 +#define MT8188_MUTEX_MOD_DISP1_PADDING3 11 +#define MT8188_MUTEX_MOD_DISP1_PADDING4 12 +#define MT8188_MUTEX_MOD_DISP1_PADDING5 13 +#define MT8188_MUTEX_MOD_DISP1_PADDING6 14 +#define MT8188_MUTEX_MOD_DISP1_PADDING7 15 +#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE0 20 +#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE1 21 +#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE2 22 +#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE3 23 +#define MT8188_MUTEX_MOD_DISP1_VPP_MERGE4 24 +#define MT8188_MUTEX_MOD_DISP1_DISP_MIXER 30 +#define MT8188_MUTEX_MOD_DISP1_DP_INTF1 39 + #define MT8195_MUTEX_MOD_DISP_OVL0 0 #define MT8195_MUTEX_MOD_DISP_WDMA0 1 #define MT8195_MUTEX_MOD_DISP_RDMA0 2 @@ -264,6 +288,7 @@ #define MT8183_MUTEX_SOF_DPI0 2 #define MT8188_MUTEX_SOF_DSI0 1 #define MT8188_MUTEX_SOF_DP_INTF0 3 +#define MT8188_MUTEX_SOF_DP_INTF1 4 #define MT8195_MUTEX_SOF_DSI0 1 #define MT8195_MUTEX_SOF_DSI1 2 #define MT8195_MUTEX_SOF_DP_INTF0 3 @@ -275,6 +300,7 @@ #define MT8183_MUTEX_EOF_DPI0 (MT8183_MUTEX_SOF_DPI0 << 6) #define MT8188_MUTEX_EOF_DSI0 (MT8188_MUTEX_SOF_DSI0 << 7) #define MT8188_MUTEX_EOF_DP_INTF0 (MT8188_MUTEX_SOF_DP_INTF0 << 7) +#define MT8188_MUTEX_EOF_DP_INTF1 (MT8188_MUTEX_SOF_DP_INTF1 << 7) #define MT8195_MUTEX_EOF_DSI0 (MT8195_MUTEX_SOF_DSI0 << 7) #define MT8195_MUTEX_EOF_DSI1 (MT8195_MUTEX_SOF_DSI1 << 7) #define MT8195_MUTEX_EOF_DP_INTF0 (MT8195_MUTEX_SOF_DP_INTF0 << 7) @@ -445,6 +471,29 @@ static const unsigned int mt8188_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DSI0] = MT8188_MUTEX_MOD_DISP_DSI0, [DDP_COMPONENT_PWM0] = MT8188_MUTEX_MOD2_DISP_PWM0, [DDP_COMPONENT_DP_INTF0] = MT8188_MUTEX_MOD_DISP_DP_INTF0, + [DDP_COMPONENT_DP_INTF1] = MT8188_MUTEX_MOD_DISP1_DP_INTF1, + [DDP_COMPONENT_ETHDR_MIXER] = MT8188_MUTEX_MOD_DISP1_DISP_MIXER, + [DDP_COMPONENT_MDP_RDMA0] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA0, + [DDP_COMPONENT_MDP_RDMA1] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA1, + [DDP_COMPONENT_MDP_RDMA2] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA2, + [DDP_COMPONENT_MDP_RDMA3] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA3, + [DDP_COMPONENT_MDP_RDMA4] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA4, + [DDP_COMPONENT_MDP_RDMA5] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA5, + [DDP_COMPONENT_MDP_RDMA6] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA6, + [DDP_COMPONENT_MDP_RDMA7] = MT8188_MUTEX_MOD_DISP1_MDP_RDMA7, + [DDP_COMPONENT_PADDING0] = MT8188_MUTEX_MOD_DISP1_PADDING0, + [DDP_COMPONENT_PADDING1] = MT8188_MUTEX_MOD_DISP1_PADDING1, + [DDP_COMPONENT_PADDING2] = MT8188_MUTEX_MOD_DISP1_PADDING2, + [DDP_COMPONENT_PADDING3] = MT8188_MUTEX_MOD_DISP1_PADDING3, + [DDP_COMPONENT_PADDING4] = MT8188_MUTEX_MOD_DISP1_PADDING4, + [DDP_COMPONENT_PADDING5] = MT8188_MUTEX_MOD_DISP1_PADDING5, + [DDP_COMPONENT_PADDING6] = MT8188_MUTEX_MOD_DISP1_PADDING6, + [DDP_COMPONENT_PADDING7] = MT8188_MUTEX_MOD_DISP1_PADDING7, + [DDP_COMPONENT_MERGE1] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE0, + [DDP_COMPONENT_MERGE2] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE1, + [DDP_COMPONENT_MERGE3] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE2, + [DDP_COMPONENT_MERGE4] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE3, + [DDP_COMPONENT_MERGE5] = MT8188_MUTEX_MOD_DISP1_VPP_MERGE4, }; static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -605,6 +654,8 @@ static const unsigned int mt8188_mutex_sof[DDP_MUTEX_SOF_MAX] = { MT8188_MUTEX_SOF_DSI0 | MT8188_MUTEX_EOF_DSI0, [MUTEX_SOF_DP_INTF0] = MT8188_MUTEX_SOF_DP_INTF0 | MT8188_MUTEX_EOF_DP_INTF0, + [MUTEX_SOF_DP_INTF1] = + MT8188_MUTEX_SOF_DP_INTF1 | MT8188_MUTEX_EOF_DP_INTF1, }; static const unsigned int mt8195_mutex_sof[DDP_MUTEX_SOF_MAX] = { diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c index f31e3bedff50..c832f5c670bc 100644 --- a/drivers/soc/mediatek/mtk-svs.c +++ b/drivers/soc/mediatek/mtk-svs.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2022 MediaTek Inc. + * Copyright (C) 2022 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> */ #include <linux/bitfield.h> @@ -32,16 +34,6 @@ #include <linux/spinlock.h> #include <linux/thermal.h> -/* svs bank 1-line software id */ -#define SVSB_CPU_LITTLE BIT(0) -#define SVSB_CPU_BIG BIT(1) -#define SVSB_CCI BIT(2) -#define SVSB_GPU BIT(3) - -/* svs bank 2-line type */ -#define SVSB_LOW BIT(8) -#define SVSB_HIGH BIT(9) - /* svs bank mode support */ #define SVSB_MODE_ALL_DISABLE 0 #define SVSB_MODE_INIT01 BIT(1) @@ -128,6 +120,13 @@ #define SVSB_VOPS_FLD_VOP2_6 GENMASK(23, 16) #define SVSB_VOPS_FLD_VOP3_7 GENMASK(31, 24) +/* SVS Thermal Coefficients */ +#define SVSB_TS_COEFF_MT8195 250460 +#define SVSB_TS_COEFF_MT8186 204650 + +/* Algo helpers */ +#define FUSE_DATA_NOT_VALID U32_MAX + /* svs bank related setting */ #define BITS8 8 #define MAX_OPP_ENTRIES 16 @@ -175,6 +174,36 @@ static DEFINE_SPINLOCK(svs_lock); #endif /** + * enum svsb_sw_id - SVS Bank Software ID + * @SVSB_SWID_CPU_LITTLE: CPU little cluster Bank + * @SVSB_SWID_CPU_BIG: CPU big cluster Bank + * @SVSB_SWID_CCI: Cache Coherent Interconnect Bank + * @SVSB_SWID_GPU: GPU Bank + * @SVSB_SWID_MAX: Total number of Banks + */ +enum svsb_sw_id { + SVSB_SWID_CPU_LITTLE, + SVSB_SWID_CPU_BIG, + SVSB_SWID_CCI, + SVSB_SWID_GPU, + SVSB_SWID_MAX +}; + +/** + * enum svsb_type - SVS Bank 2-line: Type and Role + * @SVSB_TYPE_NONE: One-line type Bank - Global role + * @SVSB_TYPE_LOW: Two-line type Bank - Low bank role + * @SVSB_TYPE_HIGH: Two-line type Bank - High bank role + * @SVSB_TYPE_MAX: Total number of bank types + */ +enum svsb_type { + SVSB_TYPE_NONE, + SVSB_TYPE_LOW, + SVSB_TYPE_HIGH, + SVSB_TYPE_MAX +}; + +/** * enum svsb_phase - svs bank phase enumeration * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition * @SVSB_PHASE_INIT01: svs bank basic init for data calibration @@ -256,60 +285,88 @@ enum svs_reg_index { }; static const u32 svs_regs_v2[] = { - [DESCHAR] = 0xc00, - [TEMPCHAR] = 0xc04, - [DETCHAR] = 0xc08, - [AGECHAR] = 0xc0c, - [DCCONFIG] = 0xc10, - [AGECONFIG] = 0xc14, - [FREQPCT30] = 0xc18, - [FREQPCT74] = 0xc1c, - [LIMITVALS] = 0xc20, - [VBOOT] = 0xc24, - [DETWINDOW] = 0xc28, - [CONFIG] = 0xc2c, - [TSCALCS] = 0xc30, - [RUNCONFIG] = 0xc34, - [SVSEN] = 0xc38, - [INIT2VALS] = 0xc3c, - [DCVALUES] = 0xc40, - [AGEVALUES] = 0xc44, - [VOP30] = 0xc48, - [VOP74] = 0xc4c, - [TEMP] = 0xc50, - [INTSTS] = 0xc54, - [INTSTSRAW] = 0xc58, - [INTEN] = 0xc5c, - [CHKINT] = 0xc60, - [CHKSHIFT] = 0xc64, - [STATUS] = 0xc68, - [VDESIGN30] = 0xc6c, - [VDESIGN74] = 0xc70, - [DVT30] = 0xc74, - [DVT74] = 0xc78, - [AGECOUNT] = 0xc7c, - [SMSTATE0] = 0xc80, - [SMSTATE1] = 0xc84, - [CTL0] = 0xc88, - [DESDETSEC] = 0xce0, - [TEMPAGESEC] = 0xce4, - [CTRLSPARE0] = 0xcf0, - [CTRLSPARE1] = 0xcf4, - [CTRLSPARE2] = 0xcf8, - [CTRLSPARE3] = 0xcfc, - [CORESEL] = 0xf00, - [THERMINTST] = 0xf04, - [INTST] = 0xf08, - [THSTAGE0ST] = 0xf0c, - [THSTAGE1ST] = 0xf10, - [THSTAGE2ST] = 0xf14, - [THAHBST0] = 0xf18, - [THAHBST1] = 0xf1c, - [SPARE0] = 0xf20, - [SPARE1] = 0xf24, - [SPARE2] = 0xf28, - [SPARE3] = 0xf2c, - [THSLPEVEB] = 0xf30, + [DESCHAR] = 0x00, + [TEMPCHAR] = 0x04, + [DETCHAR] = 0x08, + [AGECHAR] = 0x0c, + [DCCONFIG] = 0x10, + [AGECONFIG] = 0x14, + [FREQPCT30] = 0x18, + [FREQPCT74] = 0x1c, + [LIMITVALS] = 0x20, + [VBOOT] = 0x24, + [DETWINDOW] = 0x28, + [CONFIG] = 0x2c, + [TSCALCS] = 0x30, + [RUNCONFIG] = 0x34, + [SVSEN] = 0x38, + [INIT2VALS] = 0x3c, + [DCVALUES] = 0x40, + [AGEVALUES] = 0x44, + [VOP30] = 0x48, + [VOP74] = 0x4c, + [TEMP] = 0x50, + [INTSTS] = 0x54, + [INTSTSRAW] = 0x58, + [INTEN] = 0x5c, + [CHKINT] = 0x60, + [CHKSHIFT] = 0x64, + [STATUS] = 0x68, + [VDESIGN30] = 0x6c, + [VDESIGN74] = 0x70, + [DVT30] = 0x74, + [DVT74] = 0x78, + [AGECOUNT] = 0x7c, + [SMSTATE0] = 0x80, + [SMSTATE1] = 0x84, + [CTL0] = 0x88, + [DESDETSEC] = 0xe0, + [TEMPAGESEC] = 0xe4, + [CTRLSPARE0] = 0xf0, + [CTRLSPARE1] = 0xf4, + [CTRLSPARE2] = 0xf8, + [CTRLSPARE3] = 0xfc, + [CORESEL] = 0x300, + [THERMINTST] = 0x304, + [INTST] = 0x308, + [THSTAGE0ST] = 0x30c, + [THSTAGE1ST] = 0x310, + [THSTAGE2ST] = 0x314, + [THAHBST0] = 0x318, + [THAHBST1] = 0x31c, + [SPARE0] = 0x320, + [SPARE1] = 0x324, + [SPARE2] = 0x328, + [SPARE3] = 0x32c, + [THSLPEVEB] = 0x330, +}; + +static const char * const svs_swid_names[SVSB_SWID_MAX] = { + "SVSB_CPU_LITTLE", "SVSB_CPU_BIG", "SVSB_CCI", "SVSB_GPU" +}; + +static const char * const svs_type_names[SVSB_TYPE_MAX] = { + "", "_LOW", "_HIGH" +}; + +enum svs_fusemap_dev { + BDEV_BDES, + BDEV_MDES, + BDEV_MTDES, + BDEV_DCBDET, + BDEV_DCMDET, + BDEV_MAX +}; + +enum svs_fusemap_glb { + GLB_FT_PGM, + GLB_VMIN, + GLB_MAX +}; + +struct svs_fusemap { + s8 index; + u8 ofst; }; /** @@ -317,88 +374,124 @@ static const u32 svs_regs_v2[] = { * @base: svs platform register base * @dev: svs platform device * @main_clk: main clock for svs bank - * @pbank: svs bank pointer needing to be protected by spin_lock section * @banks: svs banks that svs platform supports * @rst: svs platform reset control * @efuse_max: total number of svs efuse * @tefuse_max: total number of thermal efuse * @regs: svs platform registers map - * @bank_max: total number of svs banks * @efuse: svs efuse data received from NVMEM framework * @tefuse: thermal efuse data received from NVMEM framework + * @ts_coeff: thermal sensors coefficient + * @bank_max: total number of svs banks */ struct svs_platform { void __iomem *base; struct device *dev; struct clk *main_clk; - struct svs_bank *pbank; struct svs_bank *banks; struct reset_control *rst; size_t efuse_max; size_t tefuse_max; const u32 *regs; - u32 bank_max; u32 *efuse; u32 *tefuse; + u32 ts_coeff; + u16 bank_max; }; struct svs_platform_data { char *name; struct svs_bank *banks; - bool (*efuse_parsing)(struct svs_platform *svsp); + bool (*efuse_parsing)(struct svs_platform *svsp, const struct svs_platform_data *pdata); int (*probe)(struct svs_platform *svsp); + const struct svs_fusemap *glb_fuse_map; const u32 *regs; - u32 bank_max; + u32 ts_coeff; + u16 bank_max; +}; + +/** + * struct svs_bank_pdata - SVS Bank immutable config parameters + * @dev_fuse_map: Bank fuse map data + * @buck_name: Regulator name + * @tzone_name: Thermal zone name + * @age_config: Bank age configuration + * @ctl0: TS-x selection + * @dc_config: Bank dc configuration + * @int_st: Bank interrupt identification + * @turn_freq_base: Reference frequency for 2-line turn point + * @tzone_htemp: Thermal zone high temperature threshold + * @tzone_ltemp: Thermal zone low temperature threshold + * @volt_step: Bank voltage step + * @volt_base: Bank voltage base + * @tzone_htemp_voffset: Thermal zone high temperature voltage offset + * @tzone_ltemp_voffset: Thermal zone low temperature voltage offset + * @chk_shift: Bank chicken shift + * @cpu_id: CPU core ID for SVS CPU bank use only + * @opp_count: Bank opp count + * @vboot: Voltage request for bank init01 only + * @vco: Bank VCO value + * @sw_id: Bank software identification + * @type: SVS Bank Type (1 or 2-line) and Role (high/low) + * @set_freq_pct: function pointer to set bank frequency percent table + * @get_volts: function pointer to get bank voltages + */ +struct svs_bank_pdata { + const struct svs_fusemap *dev_fuse_map; + char *buck_name; + char *tzone_name; + u32 age_config; + u32 ctl0; + u32 dc_config; + u32 int_st; + u32 turn_freq_base; + u32 tzone_htemp; + u32 tzone_ltemp; + u32 volt_step; + u32 volt_base; + u16 tzone_htemp_voffset; + u16 tzone_ltemp_voffset; + u8 chk_shift; + u8 cpu_id; + u8 opp_count; + u8 vboot; + u8 vco; + u8 sw_id; + u8 type; + + /* Callbacks */ + void (*set_freq_pct)(struct svs_platform *svsp, struct svs_bank *svsb); + void (*get_volts)(struct svs_platform *svsp, struct svs_bank *svsb); }; /** * struct svs_bank - svs bank representation + * @pdata: SVS Bank immutable config parameters * @dev: bank device * @opp_dev: device for opp table/buck control * @init_completion: the timeout completion for bank init * @buck: regulator used by opp_dev * @tzd: thermal zone device for getting temperature * @lock: mutex lock to protect voltage update process - * @set_freq_pct: function pointer to set bank frequency percent table - * @get_volts: function pointer to get bank voltages * @name: bank name - * @buck_name: regulator name - * @tzone_name: thermal zone name * @phase: bank current phase * @volt_od: bank voltage overdrive * @reg_data: bank register data in different phase for debug purpose * @pm_runtime_enabled_count: bank pm runtime enabled count - * @mode_support: bank mode support. + * @mode_support: bank mode support * @freq_base: reference frequency for bank init - * @turn_freq_base: refenrece frequency for 2-line turn point - * @vboot: voltage request for bank init01 only * @opp_dfreq: default opp frequency table * @opp_dvolt: default opp voltage table * @freq_pct: frequency percent table for bank init * @volt: bank voltage table - * @volt_step: bank voltage step - * @volt_base: bank voltage base * @volt_flags: bank voltage flags * @vmax: bank voltage maximum * @vmin: bank voltage minimum - * @age_config: bank age configuration * @age_voffset_in: bank age voltage offset - * @dc_config: bank dc configuration * @dc_voffset_in: bank dc voltage offset * @dvt_fixed: bank dvt fixed value - * @vco: bank VCO value - * @chk_shift: bank chicken shift * @core_sel: bank selection - * @opp_count: bank opp count - * @int_st: bank interrupt identification - * @sw_id: bank software identification - * @cpu_id: cpu core id for SVS CPU bank use only - * @ctl0: TS-x selection * @temp: bank temperature - * @tzone_htemp: thermal zone high temperature threshold - * @tzone_htemp_voffset: thermal zone high temperature voltage offset - * @tzone_ltemp: thermal zone low temperature threshold - * @tzone_ltemp_voffset: thermal zone low temperature voltage offset * @bts: svs efuse data * @mts: svs efuse data * @bdes: svs efuse data @@ -408,70 +501,48 @@ struct svs_platform_data { * @dcmdet: svs efuse data * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank * @vbin_turn_pt: voltage bin turn point helps know which svsb_volt should be overridden - * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank * - * Svs bank will generate suitalbe voltages by below general math equation + * Svs bank will generate suitable voltages by below general math equation * and provide these voltages to opp voltage table. * * opp_volt[i] = (volt[i] * volt_step) + volt_base; */ struct svs_bank { + const struct svs_bank_pdata pdata; struct device *dev; struct device *opp_dev; struct completion init_completion; struct regulator *buck; struct thermal_zone_device *tzd; - struct mutex lock; /* lock to protect voltage update process */ - void (*set_freq_pct)(struct svs_platform *svsp); - void (*get_volts)(struct svs_platform *svsp); + struct mutex lock; + int pm_runtime_enabled_count; + short int volt_od; char *name; - char *buck_name; - char *tzone_name; enum svsb_phase phase; - s32 volt_od; u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX]; - u32 pm_runtime_enabled_count; - u32 mode_support; - u32 freq_base; - u32 turn_freq_base; - u32 vboot; + u8 mode_support; u32 opp_dfreq[MAX_OPP_ENTRIES]; u32 opp_dvolt[MAX_OPP_ENTRIES]; u32 freq_pct[MAX_OPP_ENTRIES]; u32 volt[MAX_OPP_ENTRIES]; - u32 volt_step; - u32 volt_base; u32 volt_flags; - u32 vmax; - u32 vmin; - u32 age_config; - u32 age_voffset_in; - u32 dc_config; - u32 dc_voffset_in; - u32 dvt_fixed; - u32 vco; - u32 chk_shift; - u32 core_sel; - u32 opp_count; - u32 int_st; - u32 sw_id; - u32 cpu_id; - u32 ctl0; - u32 temp; - u32 tzone_htemp; - u32 tzone_htemp_voffset; - u32 tzone_ltemp; - u32 tzone_ltemp_voffset; - u32 bts; - u32 mts; - u32 bdes; - u32 mdes; - u32 mtdes; - u32 dcbdet; - u32 dcmdet; + u32 freq_base; u32 turn_pt; u32 vbin_turn_pt; - u32 type; + u32 core_sel; + u32 temp; + u16 age_voffset_in; + u16 dc_voffset_in; + u8 dvt_fixed; + u8 vmax; + u8 vmin; + u16 bts; + u16 mts; + u16 bdes; + u16 mdes; + u8 mtdes; + u8 dcbdet; + u8 dcmdet; }; static u32 percent(u32 numerator, u32 denominator) @@ -494,10 +565,8 @@ static void svs_writel_relaxed(struct svs_platform *svsp, u32 val, writel_relaxed(val, svsp->base + svsp->regs[rg_i]); } -static void svs_switch_bank(struct svs_platform *svsp) +static void svs_switch_bank(struct svs_platform *svsp, struct svs_bank *svsb) { - struct svs_bank *svsb = svsp->pbank; - svs_writel_relaxed(svsp, svsb->core_sel, CORESEL); } @@ -515,10 +584,11 @@ static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step, static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) { + const struct svs_bank_pdata *bdata = &svsb->pdata; struct dev_pm_opp *opp; u32 i, opp_u_volt; - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < bdata->opp_count; i++) { opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, svsb->opp_dfreq[i], true); @@ -530,8 +600,8 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) opp_u_volt = dev_pm_opp_get_voltage(opp); svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt, - svsb->volt_step, - svsb->volt_base); + bdata->volt_step, + bdata->volt_base); dev_pm_opp_put(opp); } @@ -541,6 +611,7 @@ static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) { int ret = -EPERM, tzone_temp = 0; + const struct svs_bank_pdata *bdata = &svsb->pdata; u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop; mutex_lock(&svsb->lock); @@ -549,15 +620,15 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) * 2-line bank updates its corresponding opp volts. * 1-line bank updates all opp volts. */ - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { opp_start = 0; opp_stop = svsb->turn_pt; - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { opp_start = svsb->turn_pt; - opp_stop = svsb->opp_count; + opp_stop = bdata->opp_count; } else { opp_start = 0; - opp_stop = svsb->opp_count; + opp_stop = bdata->opp_count; } /* Get thermal effect */ @@ -566,20 +637,20 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND && svsb->temp < SVSB_TEMP_LOWER_BOUND)) { dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n", - svsb->tzone_name, ret, svsb->temp); + bdata->tzone_name, ret, svsb->temp); svsb->phase = SVSB_PHASE_ERROR; } - if (tzone_temp >= svsb->tzone_htemp) - temp_voffset += svsb->tzone_htemp_voffset; - else if (tzone_temp <= svsb->tzone_ltemp) - temp_voffset += svsb->tzone_ltemp_voffset; + if (tzone_temp >= bdata->tzone_htemp) + temp_voffset += bdata->tzone_htemp_voffset; + else if (tzone_temp <= bdata->tzone_ltemp) + temp_voffset += bdata->tzone_ltemp_voffset; /* 2-line bank update all opp volts when running mon mode */ - if (svsb->phase == SVSB_PHASE_MON && (svsb->type == SVSB_HIGH || - svsb->type == SVSB_LOW)) { + if (svsb->phase == SVSB_PHASE_MON && (bdata->type == SVSB_TYPE_HIGH || + bdata->type == SVSB_TYPE_LOW)) { opp_start = 0; - opp_stop = svsb->opp_count; + opp_stop = bdata->opp_count; } } @@ -596,8 +667,8 @@ static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) case SVSB_PHASE_MON: svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin); opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, - svsb->volt_step, - svsb->volt_base); + bdata->volt_step, + bdata->volt_base); break; default: dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase); @@ -632,8 +703,7 @@ static void svs_bank_disable_and_restore_default_volts(struct svs_platform *svsp return; spin_lock_irqsave(&svs_lock, flags); - svsp->pbank = svsb; - svs_switch_bank(svsp); + svs_switch_bank(svsp, svsb); svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN); svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS); spin_unlock_irqrestore(&svs_lock, flags); @@ -760,7 +830,7 @@ static int svs_status_debug_show(struct seq_file *m, void *v) svsb->name, tzone_temp, svsb->vbin_turn_pt, svsb->turn_pt); - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < svsb->pdata.opp_count; i++) { opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, svsb->opp_dfreq[i], true); if (IS_ERR(opp)) { @@ -865,12 +935,12 @@ static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) return DIV_ROUND_UP(vx, 100); } -static void svs_get_bank_volts_v3(struct svs_platform *svsp) +static void svs_get_bank_volts_v3(struct svs_platform *svsp, struct svs_bank *svsb) { - struct svs_bank *svsb = svsp->pbank; + const struct svs_bank_pdata *bdata = &svsb->pdata; u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt; u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0; - u32 middle_index = (svsb->opp_count / 2); + u32 middle_index = (bdata->opp_count / 2); if (svsb->phase == SVSB_PHASE_MON && svsb->volt_flags & SVSB_MON_VOLT_IGNORE) @@ -881,7 +951,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) /* Target is to set svsb->volt[] by algorithm */ if (turn_pt < middle_index) { - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { /* volt[0] ~ volt[turn_pt - 1] */ for (i = 0; i < turn_pt; i++) { b_sft = BITS8 * (shift_byte % REG_BYTES); @@ -890,12 +960,12 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); shift_byte++; } - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */ - j = svsb->opp_count - 7; + j = bdata->opp_count - 7; svsb->volt[turn_pt] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30); shift_byte++; - for (i = j; i < svsb->opp_count; i++) { + for (i = j; i < bdata->opp_count; i++) { b_sft = BITS8 * (shift_byte % REG_BYTES); vop = (shift_byte < REG_BYTES) ? &vop30 : &vop74; @@ -912,7 +982,7 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) svsb->freq_pct[i]); } } else { - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { /* volt[0] + volt[j] ~ volt[turn_pt - 1] */ j = turn_pt - 7; svsb->volt[0] = FIELD_GET(SVSB_VOPS_FLD_VOP0_4, vop30); @@ -932,9 +1002,9 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) svsb->volt[0], svsb->volt[j], svsb->freq_pct[i]); - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { /* volt[turn_pt] ~ volt[opp_count - 1] */ - for (i = turn_pt; i < svsb->opp_count; i++) { + for (i = turn_pt; i < bdata->opp_count; i++) { b_sft = BITS8 * (shift_byte % REG_BYTES); vop = (shift_byte < REG_BYTES) ? &vop30 : &vop74; @@ -944,12 +1014,12 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) } } - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { opp_start = 0; opp_stop = svsb->turn_pt; - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { opp_start = svsb->turn_pt; - opp_stop = svsb->opp_count; + opp_stop = bdata->opp_count; } for (i = opp_start; i < opp_stop; i++) @@ -959,11 +1029,11 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) /* For voltage bin support */ if (svsb->opp_dfreq[0] > svsb->freq_base) { svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0], - svsb->volt_step, - svsb->volt_base); + bdata->volt_step, + bdata->volt_base); /* Find voltage bin turn point */ - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < bdata->opp_count; i++) { if (svsb->opp_dfreq[i] <= svsb->freq_base) { svsb->vbin_turn_pt = i; break; @@ -980,15 +1050,15 @@ static void svs_get_bank_volts_v3(struct svs_platform *svsp) } } -static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) +static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp, struct svs_bank *svsb) { - struct svs_bank *svsb = svsp->pbank; + const struct svs_bank_pdata *bdata = &svsb->pdata; u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0; u32 b_sft, shift_byte = 0, turn_pt; - u32 middle_index = (svsb->opp_count / 2); + u32 middle_index = (bdata->opp_count / 2); - for (i = 0; i < svsb->opp_count; i++) { - if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) { + for (i = 0; i < bdata->opp_count; i++) { + if (svsb->opp_dfreq[i] <= bdata->turn_freq_base) { svsb->turn_pt = i; break; } @@ -998,11 +1068,11 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */ if (turn_pt < middle_index) { - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { /* * If we don't handle this situation, - * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0" - * and this leads SVSB_LOW to work abnormally. + * SVSB_TYPE_HIGH's FREQPCT74 / FREQPCT30 would keep "0" + * and this leads SVSB_TYPE_LOW to work abnormally. */ if (turn_pt == 0) freq_pct30 = svsb->freq_pct[0]; @@ -1015,15 +1085,15 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) *freq_pct |= (svsb->freq_pct[i] << b_sft); shift_byte++; } - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { /* * freq_pct[turn_pt] + * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1] */ freq_pct30 = svsb->freq_pct[turn_pt]; shift_byte++; - j = svsb->opp_count - 7; - for (i = j; i < svsb->opp_count; i++) { + j = bdata->opp_count - 7; + for (i = j; i < bdata->opp_count; i++) { b_sft = BITS8 * (shift_byte % REG_BYTES); freq_pct = (shift_byte < REG_BYTES) ? &freq_pct30 : &freq_pct74; @@ -1032,7 +1102,7 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) } } } else { - if (svsb->type == SVSB_HIGH) { + if (bdata->type == SVSB_TYPE_HIGH) { /* * freq_pct[0] + * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1] @@ -1047,9 +1117,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) *freq_pct |= (svsb->freq_pct[i] << b_sft); shift_byte++; } - } else if (svsb->type == SVSB_LOW) { + } else if (bdata->type == SVSB_TYPE_LOW) { /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */ - for (i = turn_pt; i < svsb->opp_count; i++) { + for (i = turn_pt; i < bdata->opp_count; i++) { b_sft = BITS8 * (shift_byte % REG_BYTES); freq_pct = (shift_byte < REG_BYTES) ? &freq_pct30 : &freq_pct74; @@ -1063,9 +1133,9 @@ static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) svs_writel_relaxed(svsp, freq_pct30, FREQPCT30); } -static void svs_get_bank_volts_v2(struct svs_platform *svsp) +static void svs_get_bank_volts_v2(struct svs_platform *svsp, struct svs_bank *svsb) { - struct svs_bank *svsb = svsp->pbank; + const struct svs_bank_pdata *bdata = &svsb->pdata; u32 temp, i; temp = svs_readl_relaxed(svsp, VOP74); @@ -1093,17 +1163,17 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp) svsb->volt[14], svsb->freq_pct[15]); - for (i = 0; i < svsb->opp_count; i++) + for (i = 0; i < bdata->opp_count; i++) svsb->volt[i] += svsb->volt_od; /* For voltage bin support */ if (svsb->opp_dfreq[0] > svsb->freq_base) { svsb->volt[0] = svs_opp_volt_to_bank_volt(svsb->opp_dvolt[0], - svsb->volt_step, - svsb->volt_base); + bdata->volt_step, + bdata->volt_base); /* Find voltage bin turn point */ - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < bdata->opp_count; i++) { if (svsb->opp_dfreq[i] <= svsb->freq_base) { svsb->vbin_turn_pt = i; break; @@ -1120,9 +1190,8 @@ static void svs_get_bank_volts_v2(struct svs_platform *svsp) } } -static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) +static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp, struct svs_bank *svsb) { - struct svs_bank *svsb = svsp->pbank; u32 freqpct74_val, freqpct30_val; freqpct74_val = FIELD_PREP(SVSB_FREQPCTS_FLD_PCT0_4, svsb->freq_pct[8]) | @@ -1140,18 +1209,20 @@ static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) } static void svs_set_bank_phase(struct svs_platform *svsp, + unsigned int bank_idx, enum svsb_phase target_phase) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; + const struct svs_bank_pdata *bdata = &svsb->pdata; u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs; - svs_switch_bank(svsp); + svs_switch_bank(svsp, svsb); des_char = FIELD_PREP(SVSB_DESCHAR_FLD_BDES, svsb->bdes) | FIELD_PREP(SVSB_DESCHAR_FLD_MDES, svsb->mdes); svs_writel_relaxed(svsp, des_char, DESCHAR); - temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, svsb->vco) | + temp_char = FIELD_PREP(SVSB_TEMPCHAR_FLD_VCO, bdata->vco) | FIELD_PREP(SVSB_TEMPCHAR_FLD_MTDES, svsb->mtdes) | FIELD_PREP(SVSB_TEMPCHAR_FLD_DVT_FIXED, svsb->dvt_fixed); svs_writel_relaxed(svsp, temp_char, TEMPCHAR); @@ -1160,11 +1231,11 @@ static void svs_set_bank_phase(struct svs_platform *svsp, FIELD_PREP(SVSB_DETCHAR_FLD_DCMDET, svsb->dcmdet); svs_writel_relaxed(svsp, det_char, DETCHAR); - svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG); - svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG); + svs_writel_relaxed(svsp, bdata->dc_config, DCCONFIG); + svs_writel_relaxed(svsp, bdata->age_config, AGECONFIG); svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG); - svsb->set_freq_pct(svsp); + bdata->set_freq_pct(svsp, svsb); limit_vals = FIELD_PREP(SVSB_LIMITVALS_FLD_DTLO, SVSB_VAL_DTLO) | FIELD_PREP(SVSB_LIMITVALS_FLD_DTHI, SVSB_VAL_DTHI) | @@ -1174,13 +1245,13 @@ static void svs_set_bank_phase(struct svs_platform *svsp, svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW); svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG); - svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT); - svs_writel_relaxed(svsp, svsb->ctl0, CTL0); + svs_writel_relaxed(svsp, bdata->chk_shift, CHKSHIFT); + svs_writel_relaxed(svsp, bdata->ctl0, CTL0); svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS); switch (target_phase) { case SVSB_PHASE_INIT01: - svs_writel_relaxed(svsp, svsb->vboot, VBOOT); + svs_writel_relaxed(svsp, bdata->vboot, VBOOT); svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); svs_writel_relaxed(svsp, SVSB_PTPEN_INIT01, SVSEN); break; @@ -1206,18 +1277,20 @@ static void svs_set_bank_phase(struct svs_platform *svsp, } static inline void svs_save_bank_register_data(struct svs_platform *svsp, + unsigned short bank_idx, enum svsb_phase phase) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; enum svs_reg_index rg_i; for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++) svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i); } -static inline void svs_error_isr_handler(struct svs_platform *svsp) +static inline void svs_error_isr_handler(struct svs_platform *svsp, + unsigned short bank_idx) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n", __func__, svs_readl_relaxed(svsp, CORESEL)); @@ -1229,27 +1302,29 @@ static inline void svs_error_isr_handler(struct svs_platform *svsp) svs_readl_relaxed(svsp, SMSTATE1)); dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP)); - svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR); + svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_ERROR); svsb->phase = SVSB_PHASE_ERROR; svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN); svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS); } -static inline void svs_init01_isr_handler(struct svs_platform *svsp) +static inline void svs_init01_isr_handler(struct svs_platform *svsp, + unsigned short bank_idx) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; + u32 val; dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n", __func__, svs_readl_relaxed(svsp, VDESIGN74), svs_readl_relaxed(svsp, VDESIGN30), svs_readl_relaxed(svsp, DCVALUES)); - svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01); + svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT01); svsb->phase = SVSB_PHASE_INIT01; - svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) & - GENMASK(15, 0)) + 1; + val = ~(svs_readl_relaxed(svsp, DCVALUES) & GENMASK(15, 0)) + 1; + svsb->dc_voffset_in = val & GENMASK(15, 0); if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE || (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT && svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY)) @@ -1263,32 +1338,36 @@ static inline void svs_init01_isr_handler(struct svs_platform *svsp) svsb->core_sel &= ~SVSB_DET_CLK_EN; } -static inline void svs_init02_isr_handler(struct svs_platform *svsp) +static inline void svs_init02_isr_handler(struct svs_platform *svsp, + unsigned short bank_idx) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; + const struct svs_bank_pdata *bdata = &svsb->pdata; dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n", __func__, svs_readl_relaxed(svsp, VOP74), svs_readl_relaxed(svsp, VOP30), svs_readl_relaxed(svsp, DCVALUES)); - svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02); + svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_INIT02); svsb->phase = SVSB_PHASE_INIT02; - svsb->get_volts(svsp); + bdata->get_volts(svsp, svsb); svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN); svs_writel_relaxed(svsp, SVSB_INTSTS_F0_COMPLETE, INTSTS); } -static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) +static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp, + unsigned short bank_idx) { - struct svs_bank *svsb = svsp->pbank; + struct svs_bank *svsb = &svsp->banks[bank_idx]; + const struct svs_bank_pdata *bdata = &svsb->pdata; - svs_save_bank_register_data(svsp, SVSB_PHASE_MON); + svs_save_bank_register_data(svsp, bank_idx, SVSB_PHASE_MON); svsb->phase = SVSB_PHASE_MON; - svsb->get_volts(svsp); + bdata->get_volts(svsp, svsb); svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0); svs_writel_relaxed(svsp, SVSB_INTSTS_FLD_MONVOP, INTSTS); @@ -1297,37 +1376,38 @@ static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) static irqreturn_t svs_isr(int irq, void *data) { struct svs_platform *svsp = data; + const struct svs_bank_pdata *bdata; struct svs_bank *svsb = NULL; unsigned long flags; u32 idx, int_sts, svs_en; for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name); spin_lock_irqsave(&svs_lock, flags); - svsp->pbank = svsb; /* Find out which svs bank fires interrupt */ - if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) { + if (bdata->int_st & svs_readl_relaxed(svsp, INTST)) { spin_unlock_irqrestore(&svs_lock, flags); continue; } - svs_switch_bank(svsp); + svs_switch_bank(svsp, svsb); int_sts = svs_readl_relaxed(svsp, INTSTS); svs_en = svs_readl_relaxed(svsp, SVSEN); if (int_sts == SVSB_INTSTS_F0_COMPLETE && svs_en == SVSB_PTPEN_INIT01) - svs_init01_isr_handler(svsp); + svs_init01_isr_handler(svsp, idx); else if (int_sts == SVSB_INTSTS_F0_COMPLETE && svs_en == SVSB_PTPEN_INIT02) - svs_init02_isr_handler(svsp); + svs_init02_isr_handler(svsp, idx); else if (int_sts & SVSB_INTSTS_FLD_MONVOP) - svs_mon_mode_isr_handler(svsp); + svs_mon_mode_isr_handler(svsp, idx); else - svs_error_isr_handler(svsp); + svs_error_isr_handler(svsp, idx); spin_unlock_irqrestore(&svs_lock, flags); break; @@ -1342,20 +1422,35 @@ static irqreturn_t svs_isr(int irq, void *data) return IRQ_HANDLED; } +static bool svs_mode_available(struct svs_platform *svsp, u8 mode) +{ + int i; + + for (i = 0; i < svsp->bank_max; i++) + if (svsp->banks[i].mode_support & mode) + return true; + return false; +} + static int svs_init01(struct svs_platform *svsp) { + const struct svs_bank_pdata *bdata; struct svs_bank *svsb; unsigned long flags, time_left; bool search_done; int ret = 0, r; u32 opp_freq, opp_vboot, buck_volt, idx, i; + if (!svs_mode_available(svsp, SVSB_MODE_INIT01)) + return 0; + /* Keep CPUs' core power on for svs_init01 initialization */ cpuidle_pause_and_lock(); /* Svs bank init01 preparation - power enable */ for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; if (!(svsb->mode_support & SVSB_MODE_INIT01)) continue; @@ -1363,7 +1458,7 @@ static int svs_init01(struct svs_platform *svsp) ret = regulator_enable(svsb->buck); if (ret) { dev_err(svsb->dev, "%s enable fail: %d\n", - svsb->buck_name, ret); + bdata->buck_name, ret); goto svs_init01_resume_cpuidle; } @@ -1393,6 +1488,7 @@ static int svs_init01(struct svs_platform *svsp) */ for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; if (!(svsb->mode_support & SVSB_MODE_INIT01)) continue; @@ -1402,11 +1498,11 @@ static int svs_init01(struct svs_platform *svsp) * fix to that freq until svs_init01 is done. */ search_done = false; - opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, - svsb->volt_step, - svsb->volt_base); + opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot, + bdata->volt_step, + bdata->volt_base); - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < bdata->opp_count; i++) { opp_freq = svsb->opp_dfreq[i]; if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) { ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, @@ -1438,13 +1534,14 @@ static int svs_init01(struct svs_platform *svsp) /* Svs bank init01 begins */ for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; if (!(svsb->mode_support & SVSB_MODE_INIT01)) continue; - opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, - svsb->volt_step, - svsb->volt_base); + opp_vboot = svs_bank_volt_to_opp_volt(bdata->vboot, + bdata->volt_step, + bdata->volt_base); buck_volt = regulator_get_voltage(svsb->buck); if (buck_volt != opp_vboot) { @@ -1456,8 +1553,7 @@ static int svs_init01(struct svs_platform *svsp) } spin_lock_irqsave(&svs_lock, flags); - svsp->pbank = svsb; - svs_set_bank_phase(svsp, SVSB_PHASE_INIT01); + svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT01); spin_unlock_irqrestore(&svs_lock, flags); time_left = wait_for_completion_timeout(&svsb->init_completion, @@ -1472,11 +1568,12 @@ static int svs_init01(struct svs_platform *svsp) svs_init01_finish: for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; if (!(svsb->mode_support & SVSB_MODE_INIT01)) continue; - for (i = 0; i < svsb->opp_count; i++) { + for (i = 0; i < bdata->opp_count; i++) { r = dev_pm_opp_enable(svsb->opp_dev, svsb->opp_dfreq[i]); if (r) @@ -1502,7 +1599,7 @@ svs_init01_finish: r = regulator_disable(svsb->buck); if (r) dev_err(svsb->dev, "%s disable fail: %d\n", - svsb->buck_name, r); + bdata->buck_name, r); } svs_init01_resume_cpuidle: @@ -1513,11 +1610,15 @@ svs_init01_resume_cpuidle: static int svs_init02(struct svs_platform *svsp) { + const struct svs_bank_pdata *bdata; struct svs_bank *svsb; unsigned long flags, time_left; int ret; u32 idx; + if (!svs_mode_available(svsp, SVSB_MODE_INIT02)) + return 0; + for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; @@ -1526,8 +1627,7 @@ static int svs_init02(struct svs_platform *svsp) reinit_completion(&svsb->init_completion); spin_lock_irqsave(&svs_lock, flags); - svsp->pbank = svsb; - svs_set_bank_phase(svsp, SVSB_PHASE_INIT02); + svs_set_bank_phase(svsp, idx, SVSB_PHASE_INIT02); spin_unlock_irqrestore(&svs_lock, flags); time_left = wait_for_completion_timeout(&svsb->init_completion, @@ -1546,11 +1646,12 @@ static int svs_init02(struct svs_platform *svsp) */ for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; if (!(svsb->mode_support & SVSB_MODE_INIT02)) continue; - if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + if (bdata->type == SVSB_TYPE_HIGH || bdata->type == SVSB_TYPE_LOW) { if (svs_sync_bank_volts_from_opp(svsb)) { dev_err(svsb->dev, "sync volt fail\n"); ret = -EPERM; @@ -1583,8 +1684,7 @@ static void svs_mon_mode(struct svs_platform *svsp) continue; spin_lock_irqsave(&svs_lock, flags); - svsp->pbank = svsb; - svs_set_bank_phase(svsp, SVSB_PHASE_MON); + svs_set_bank_phase(svsp, idx, SVSB_PHASE_MON); spin_unlock_irqrestore(&svs_lock, flags); } } @@ -1609,12 +1709,12 @@ static int svs_start(struct svs_platform *svsp) static int svs_suspend(struct device *dev) { struct svs_platform *svsp = dev_get_drvdata(dev); - struct svs_bank *svsb; int ret; u32 idx; for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; + struct svs_bank *svsb = &svsp->banks[idx]; + svs_bank_disable_and_restore_default_volts(svsp, svsb); } @@ -1665,6 +1765,7 @@ out_of_resume: static int svs_bank_resource_setup(struct svs_platform *svsp) { + const struct svs_bank_pdata *bdata; struct svs_bank *svsb; struct dev_pm_opp *opp; unsigned long freq; @@ -1675,35 +1776,23 @@ static int svs_bank_resource_setup(struct svs_platform *svsp) for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; - switch (svsb->sw_id) { - case SVSB_CPU_LITTLE: - svsb->name = "SVSB_CPU_LITTLE"; - break; - case SVSB_CPU_BIG: - svsb->name = "SVSB_CPU_BIG"; - break; - case SVSB_CCI: - svsb->name = "SVSB_CCI"; - break; - case SVSB_GPU: - if (svsb->type == SVSB_HIGH) - svsb->name = "SVSB_GPU_HIGH"; - else if (svsb->type == SVSB_LOW) - svsb->name = "SVSB_GPU_LOW"; - else - svsb->name = "SVSB_GPU"; - break; - default: - dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + if (bdata->sw_id >= SVSB_SWID_MAX || bdata->type >= SVSB_TYPE_MAX) { + dev_err(svsb->dev, "unknown bank sw_id or type\n"); return -EINVAL; } - svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), - GFP_KERNEL); + svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), GFP_KERNEL); if (!svsb->dev) return -ENOMEM; + svsb->name = devm_kasprintf(svsp->dev, GFP_KERNEL, "%s%s", + svs_swid_names[bdata->sw_id], + svs_type_names[bdata->type]); + if (!svsb->name) + return -ENOMEM; + ret = dev_set_name(svsb->dev, "%s", svsb->name); if (ret) return ret; @@ -1721,32 +1810,32 @@ static int svs_bank_resource_setup(struct svs_platform *svsp) if (svsb->mode_support & SVSB_MODE_INIT01) { svsb->buck = devm_regulator_get_optional(svsb->opp_dev, - svsb->buck_name); + bdata->buck_name); if (IS_ERR(svsb->buck)) { dev_err(svsb->dev, "cannot get \"%s-supply\"\n", - svsb->buck_name); + bdata->buck_name); return PTR_ERR(svsb->buck); } } - if (!IS_ERR_OR_NULL(svsb->tzone_name)) { - svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name); + if (!IS_ERR_OR_NULL(bdata->tzone_name)) { + svsb->tzd = thermal_zone_get_zone_by_name(bdata->tzone_name); if (IS_ERR(svsb->tzd)) { dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n", - svsb->tzone_name); + bdata->tzone_name); return PTR_ERR(svsb->tzd); } } count = dev_pm_opp_get_opp_count(svsb->opp_dev); - if (svsb->opp_count != count) { + if (bdata->opp_count != count) { dev_err(svsb->dev, "opp_count not \"%u\" but get \"%d\"?\n", - svsb->opp_count, count); + bdata->opp_count, count); return count; } - for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) { + for (i = 0, freq = ULONG_MAX; i < bdata->opp_count; i++, freq--) { opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq); if (IS_ERR(opp)) { dev_err(svsb->dev, "cannot find freq = %ld\n", @@ -1780,8 +1869,6 @@ static int svs_get_efuse_data(struct svs_platform *svsp, *svsp_efuse = nvmem_cell_read(cell, svsp_efuse_max); if (IS_ERR(*svsp_efuse)) { - dev_err(svsp->dev, "cannot read \"%s\" efuse: %ld\n", - nvmem_cell_name, PTR_ERR(*svsp_efuse)); nvmem_cell_put(cell); return PTR_ERR(*svsp_efuse); } @@ -1792,139 +1879,91 @@ static int svs_get_efuse_data(struct svs_platform *svsp, return 0; } -static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp) +static u32 svs_get_fuse_val(u32 *fuse_array, const struct svs_fusemap *fmap, u8 nbits) { - struct svs_bank *svsb; - u32 idx, i, vmin, golden_temp; - int ret; - - for (i = 0; i < svsp->efuse_max; i++) - if (svsp->efuse[i]) - dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", - i, svsp->efuse[i]); - - if (!svsp->efuse[9]) { - dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n"); - return false; - } - - /* Svs efuse parsing */ - vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0); - - for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; + u32 val; - if (vmin == 0x1) - svsb->vmin = 0x1e; - - if (svsb->type == SVSB_LOW) { - svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0); - svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0); - } else if (svsb->type == SVSB_HIGH) { - svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0); - svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0); - } + if (fmap->index < 0) + return FUSE_DATA_NOT_VALID; - svsb->vmax += svsb->dvt_fixed; - } + val = fuse_array[fmap->index] >> fmap->ofst; + val &= GENMASK(nbits - 1, 0); - ret = svs_get_efuse_data(svsp, "t-calibration-data", - &svsp->tefuse, &svsp->tefuse_max); - if (ret) - return false; + return val; +} - for (i = 0; i < svsp->tefuse_max; i++) - if (svsp->tefuse[i] != 0) - break; +static bool svs_is_available(struct svs_platform *svsp) +{ + int i, num_populated = 0; - if (i == svsp->tefuse_max) - golden_temp = 50; /* All thermal efuse data are 0 */ - else - golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + /* If at least two fuse arrays are populated, SVS is calibrated */ + for (i = 0; i < svsp->efuse_max; i++) { + if (svsp->efuse[i]) + num_populated++; - for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; - svsb->mts = 500; - svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + if (num_populated > 1) + return true; } - return true; + return false; } -static bool svs_mt8188_efuse_parsing(struct svs_platform *svsp) +static bool svs_common_parse_efuse(struct svs_platform *svsp, + const struct svs_platform_data *pdata) { - struct svs_bank *svsb; - u32 idx, i, golden_temp; - int ret; - - for (i = 0; i < svsp->efuse_max; i++) - if (svsp->efuse[i]) - dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", - i, svsp->efuse[i]); + const struct svs_fusemap *gfmap = pdata->glb_fuse_map; + struct svs_fusemap tfm = { 0, 24 }; + u32 golden_temp, val; + u8 ft_pgm, vmin; + int i; - if (!svsp->efuse[5]) { - dev_notice(svsp->dev, "svs_efuse[5] = 0x0?\n"); + if (!svs_is_available(svsp)) return false; - } - /* Svs efuse parsing */ - for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; + /* Get golden temperature from SVS-Thermal calibration */ + val = svs_get_fuse_val(svsp->tefuse, &tfm, 8); - if (svsb->type == SVSB_LOW) { - svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); - svsb->bdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[5] >> 24) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[15] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[15] >> 24) & GENMASK(7, 0); - } else if (svsb->type == SVSB_HIGH) { - svsb->mtdes = svsp->efuse[4] & GENMASK(7, 0); - svsb->bdes = (svsp->efuse[4] >> 16) & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[4] >> 24) & GENMASK(7, 0); - svsb->dcbdet = svsp->efuse[14] & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[14] >> 8) & GENMASK(7, 0); - } + /* If golden temp is not programmed, use the default of 50 */ + golden_temp = val ? val : 50; - svsb->vmax += svsb->dvt_fixed; - } + /* Parse fused SVS calibration */ + ft_pgm = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_FT_PGM], 8); + vmin = svs_get_fuse_val(svsp->efuse, &gfmap[GLB_VMIN], 2); - ret = svs_get_efuse_data(svsp, "t-calibration-data", - &svsp->tefuse, &svsp->tefuse_max); - if (ret) - return false; + for (i = 0; i < svsp->bank_max; i++) { + struct svs_bank *svsb = &svsp->banks[i]; + const struct svs_bank_pdata *bdata = &svsb->pdata; + const struct svs_fusemap *dfmap = bdata->dev_fuse_map; - for (i = 0; i < svsp->tefuse_max; i++) - if (svsp->tefuse[i] != 0) - break; + if (vmin == 1) + svsb->vmin = 0x1e; - if (i == svsp->tefuse_max) - golden_temp = 50; /* All thermal efuse data are 0 */ - else - golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + if (ft_pgm == 0) + svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; - for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; - svsb->mts = 500; - svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8); + svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8); + svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8); + svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8); + svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8); + svsb->vmax += svsb->dvt_fixed; + + svsb->mts = (svsp->ts_coeff * 2) / 1000; + svsb->bts = (((500 * golden_temp + svsp->ts_coeff) / 1000) - 25) * 4; } return true; } -static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) +static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp, + const struct svs_platform_data *pdata) { struct svs_bank *svsb; + const struct svs_bank_pdata *bdata; int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0; int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t; int o_slope, o_slope_sign, ts_id; u32 idx, i, ft_pgm, mts, temp0, temp1, temp2; - int ret; for (i = 0; i < svsp->efuse_max; i++) if (svsp->efuse[i]) @@ -1937,74 +1976,48 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) } /* Svs efuse parsing */ - ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0); + ft_pgm = svs_get_fuse_val(svsp->efuse, &pdata->glb_fuse_map[GLB_FT_PGM], 4); for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; + const struct svs_fusemap *dfmap = bdata->dev_fuse_map; if (ft_pgm <= 1) svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; - switch (svsb->sw_id) { - case SVSB_CPU_LITTLE: - svsb->bdes = svsp->efuse[16] & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0); - svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + svsb->mtdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MTDES], 8); + svsb->bdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_BDES], 8); + svsb->mdes = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_MDES], 8); + svsb->dcbdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCBDET], 8); + svsb->dcmdet = svs_get_fuse_val(svsp->efuse, &dfmap[BDEV_DCMDET], 8); + switch (bdata->sw_id) { + case SVSB_SWID_CPU_LITTLE: + case SVSB_SWID_CCI: if (ft_pgm <= 3) svsb->volt_od += 10; else svsb->volt_od += 2; break; - case SVSB_CPU_BIG: - svsb->bdes = svsp->efuse[18] & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0); - svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0); - + case SVSB_SWID_CPU_BIG: if (ft_pgm <= 3) svsb->volt_od += 15; else svsb->volt_od += 12; break; - case SVSB_CCI: - svsb->bdes = svsp->efuse[4] & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0); - svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); - - if (ft_pgm <= 3) - svsb->volt_od += 10; - else - svsb->volt_od += 2; - break; - case SVSB_GPU: - svsb->bdes = svsp->efuse[6] & GENMASK(7, 0); - svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0); - svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0); - svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0); - svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); - - if (ft_pgm >= 2) { + case SVSB_SWID_GPU: + if (ft_pgm != FUSE_DATA_NOT_VALID && ft_pgm >= 2) { svsb->freq_base = 800000000; /* 800MHz */ svsb->dvt_fixed = 2; } break; default: - dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id); return false; } } - ret = svs_get_efuse_data(svsp, "t-calibration-data", - &svsp->tefuse, &svsp->tefuse_max); - if (ret) - return false; - /* Thermal efuse parsing */ adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0); adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0); @@ -2064,23 +2077,24 @@ static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) for (idx = 0; idx < svsp->bank_max; idx++) { svsb = &svsp->banks[idx]; + bdata = &svsb->pdata; svsb->mts = mts; - switch (svsb->sw_id) { - case SVSB_CPU_LITTLE: + switch (bdata->sw_id) { + case SVSB_SWID_CPU_LITTLE: tb_roomt = x_roomt[3]; break; - case SVSB_CPU_BIG: + case SVSB_SWID_CPU_BIG: tb_roomt = x_roomt[4]; break; - case SVSB_CCI: + case SVSB_SWID_CCI: tb_roomt = x_roomt[3]; break; - case SVSB_GPU: + case SVSB_SWID_GPU: tb_roomt = x_roomt[1]; break; default: - dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id); goto remove_mt8183_svsb_mon_mode; } @@ -2153,7 +2167,6 @@ static struct device *svs_add_device_link(struct svs_platform *svsp, static int svs_mt8192_platform_probe(struct svs_platform *svsp) { struct device *dev; - struct svs_bank *svsb; u32 idx; svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); @@ -2161,18 +2174,33 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst), "cannot get svs reset control\n"); - dev = svs_add_device_link(svsp, "lvts"); + dev = svs_add_device_link(svsp, "thermal-sensor"); if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get lvts device\n"); for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; + struct svs_bank *svsb = &svsp->banks[idx]; + const struct svs_bank_pdata *bdata = &svsb->pdata; - if (svsb->type == SVSB_HIGH) - svsb->opp_dev = svs_add_device_link(svsp, "gpu"); - else if (svsb->type == SVSB_LOW) - svsb->opp_dev = svs_get_subsys_device(svsp, "gpu"); + switch (bdata->sw_id) { + case SVSB_SWID_CPU_LITTLE: + case SVSB_SWID_CPU_BIG: + svsb->opp_dev = get_cpu_device(bdata->cpu_id); + break; + case SVSB_SWID_CCI: + svsb->opp_dev = svs_add_device_link(svsp, "cci"); + break; + case SVSB_SWID_GPU: + if (bdata->type == SVSB_TYPE_LOW) + svsb->opp_dev = svs_get_subsys_device(svsp, "gpu"); + else + svsb->opp_dev = svs_add_device_link(svsp, "gpu"); + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id); + return -EINVAL; + } if (IS_ERR(svsb->opp_dev)) return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), @@ -2186,30 +2214,30 @@ static int svs_mt8192_platform_probe(struct svs_platform *svsp) static int svs_mt8183_platform_probe(struct svs_platform *svsp) { struct device *dev; - struct svs_bank *svsb; u32 idx; - dev = svs_add_device_link(svsp, "thermal"); + dev = svs_add_device_link(svsp, "thermal-sensor"); if (IS_ERR(dev)) return dev_err_probe(svsp->dev, PTR_ERR(dev), "failed to get thermal device\n"); for (idx = 0; idx < svsp->bank_max; idx++) { - svsb = &svsp->banks[idx]; + struct svs_bank *svsb = &svsp->banks[idx]; + const struct svs_bank_pdata *bdata = &svsb->pdata; - switch (svsb->sw_id) { - case SVSB_CPU_LITTLE: - case SVSB_CPU_BIG: - svsb->opp_dev = get_cpu_device(svsb->cpu_id); + switch (bdata->sw_id) { + case SVSB_SWID_CPU_LITTLE: + case SVSB_SWID_CPU_BIG: + svsb->opp_dev = get_cpu_device(bdata->cpu_id); break; - case SVSB_CCI: + case SVSB_SWID_CCI: svsb->opp_dev = svs_add_device_link(svsp, "cci"); break; - case SVSB_GPU: + case SVSB_SWID_GPU: svsb->opp_dev = svs_add_device_link(svsp, "gpu"); break; default: - dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + dev_err(svsb->dev, "unknown sw_id: %u\n", bdata->sw_id); return -EINVAL; } @@ -2222,241 +2250,544 @@ static int svs_mt8183_platform_probe(struct svs_platform *svsp) return 0; } +static struct svs_bank svs_mt8195_banks[] = { + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 640000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .int_st = BIT(0), + .ctl0 = 0x00540003, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 10, 16 }, { 10, 24 }, { 10, 0 }, { 8, 0 }, { 8, 8 } + } + }, + .mode_support = SVSB_MODE_INIT02, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .freq_base = 640000000, + .core_sel = 0x0fff0100, + .dvt_fixed = 0x1, + .vmax = 0x38, + .vmin = 0x14, + }, + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 640000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 9, 16 }, { 9, 24 }, { 9, 0 }, { 8, 0 }, { 8, 8 } + }, + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 880000000, + .core_sel = 0x0fff0101, + .dvt_fixed = 0x6, + .vmax = 0x38, + .vmin = 0x14, + }, +}; + static struct svs_bank svs_mt8192_banks[] = { { - .sw_id = SVSB_GPU, - .type = SVSB_LOW, - .set_freq_pct = svs_set_bank_freq_pct_v3, - .get_volts = svs_get_bank_volts_v3, - .tzone_name = "gpu1", - .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, - .mode_support = SVSB_MODE_INIT02, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 688000000, - .turn_freq_base = 688000000, - .volt_step = 6250, - .volt_base = 400000, - .vmax = 0x60, - .vmin = 0x1a, - .age_config = 0x555555, - .dc_config = 0x1, - .dvt_fixed = 0x1, - .vco = 0x18, - .chk_shift = 0x87, - .core_sel = 0x0fff0100, - .int_st = BIT(0), - .ctl0 = 0x00540003, - .tzone_htemp = 85000, - .tzone_htemp_voffset = 0, - .tzone_ltemp = 25000, - .tzone_ltemp_voffset = 7, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .int_st = BIT(0), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 10, 16 }, { 10, 24 }, { 10, 0 }, { 17, 0 }, { 17, 8 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .freq_base = 688000000, + .core_sel = 0x0fff0100, + .dvt_fixed = 0x1, + .vmax = 0x60, + .vmin = 0x1a, }, { - .sw_id = SVSB_GPU, - .type = SVSB_HIGH, - .set_freq_pct = svs_set_bank_freq_pct_v3, - .get_volts = svs_get_bank_volts_v3, - .tzone_name = "gpu1", - .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | - SVSB_MON_VOLT_IGNORE, - .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 902000000, - .turn_freq_base = 688000000, - .volt_step = 6250, - .volt_base = 400000, - .vmax = 0x60, - .vmin = 0x1a, - .age_config = 0x555555, - .dc_config = 0x1, - .dvt_fixed = 0x6, - .vco = 0x18, - .chk_shift = 0x87, - .core_sel = 0x0fff0101, - .int_st = BIT(1), - .ctl0 = 0x00540003, - .tzone_htemp = 85000, - .tzone_htemp_voffset = 0, - .tzone_ltemp = 25000, - .tzone_ltemp_voffset = 7, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 9, 16 }, { 9, 24 }, { 17, 0 }, { 17, 16 }, { 17, 24 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 902000000, + .core_sel = 0x0fff0101, + .dvt_fixed = 0x6, + .vmax = 0x60, + .vmin = 0x1a, }, }; static struct svs_bank svs_mt8188_banks[] = { { - .sw_id = SVSB_GPU, - .type = SVSB_LOW, - .set_freq_pct = svs_set_bank_freq_pct_v3, - .get_volts = svs_get_bank_volts_v3, - .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, - .mode_support = SVSB_MODE_INIT02, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 640000000, - .turn_freq_base = 640000000, - .volt_step = 6250, - .volt_base = 400000, - .vmax = 0x38, - .vmin = 0x1c, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x1, - .vco = 0x10, - .chk_shift = 0x87, - .core_sel = 0x0fff0000, - .int_st = BIT(0), - .ctl0 = 0x00100003, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 640000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(0), + .ctl0 = 0x00100003, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .freq_base = 640000000, + .core_sel = 0x0fff0000, + .dvt_fixed = 0x1, + .vmax = 0x38, + .vmin = 0x1c, + }, + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .type = SVSB_TYPE_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 640000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(1), + .ctl0 = 0x00100003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 880000000, + .core_sel = 0x0fff0001, + .dvt_fixed = 0x4, + .vmax = 0x38, + .vmin = 0x1c, + }, +}; + +static struct svs_bank svs_mt8186_banks[] = { + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CPU_BIG, + .type = SVSB_TYPE_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .cpu_id = 6, + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 1670000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x1, + .dc_config = 0x1, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(0), + .ctl0 = 0x00540003, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 3, 16 }, { 3, 24 }, { 3, 0 }, { 14, 16 }, { 14, 24 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .volt_od = 4, + .mode_support = SVSB_MODE_INIT02, + .freq_base = 1670000000, + .core_sel = 0x0fff0100, + .dvt_fixed = 0x3, + .vmax = 0x59, + .vmin = 0x20, + }, + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CPU_BIG, + .type = SVSB_TYPE_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .cpu_id = 6, + .tzone_name = "cpu-big", + .opp_count = MAX_OPP_ENTRIES, + .turn_freq_base = 1670000000, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x1, + .dc_config = 0x1, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 8, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 8, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 2, 16 }, { 2, 24 }, { 2, 0 }, { 13, 0 }, { 13, 8 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .volt_od = 4, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 2050000000, + .core_sel = 0x0fff0101, + .dvt_fixed = 0x6, + .vmax = 0x73, + .vmin = 0x20, + }, + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .tzone_name = "cpu-little", + .opp_count = MAX_OPP_ENTRIES, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x1, + .dc_config = 0x1, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(2), + .ctl0 = 0x3210000f, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 8, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 8, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 4, 16 }, { 4, 24 }, { 4, 0 }, { 14, 0 }, { 14, 8 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .volt_od = 3, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 2000000000, + .core_sel = 0x0fff0102, + .dvt_fixed = 0x6, + .vmax = 0x65, + .vmin = 0x20, }, { - .sw_id = SVSB_GPU, - .type = SVSB_HIGH, - .set_freq_pct = svs_set_bank_freq_pct_v3, - .get_volts = svs_get_bank_volts_v3, - .tzone_name = "gpu1", - .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | - SVSB_MON_VOLT_IGNORE, - .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 880000000, - .turn_freq_base = 640000000, - .volt_step = 6250, - .volt_base = 400000, - .vmax = 0x38, - .vmin = 0x1c, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x4, - .vco = 0x10, - .chk_shift = 0x87, - .core_sel = 0x0fff0001, - .int_st = BIT(1), - .ctl0 = 0x00100003, - .tzone_htemp = 85000, - .tzone_htemp_voffset = 0, - .tzone_ltemp = 25000, - .tzone_ltemp_voffset = 7, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .tzone_name = "cci", + .opp_count = MAX_OPP_ENTRIES, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x1, + .dc_config = 0x1, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(3), + .ctl0 = 0x3210000f, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 8, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 8, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 5, 16 }, { 5, 24 }, { 5, 0 }, { 15, 16 }, { 15, 24 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .volt_od = 3, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 1400000000, + .core_sel = 0x0fff0103, + .dvt_fixed = 0x6, + .vmax = 0x65, + .vmin = 0x20, + }, + { + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .volt_step = 6250, + .volt_base = 400000, + .age_config = 0x555555, + .dc_config = 0x1, + .vco = 0x10, + .chk_shift = 0x87, + .int_st = BIT(4), + .ctl0 = 0x00100003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 8, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 6, 16 }, { 6, 24 }, { 6, 0 }, { 15, 8 }, { 15, 0 } + } + }, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 850000000, + .core_sel = 0x0fff0104, + .dvt_fixed = 0x4, + .vmax = 0x58, + .vmin = 0x20, }, }; static struct svs_bank svs_mt8183_banks[] = { { - .sw_id = SVSB_CPU_LITTLE, - .set_freq_pct = svs_set_bank_freq_pct_v2, - .get_volts = svs_get_bank_volts_v2, - .cpu_id = 0, - .buck_name = "proc", - .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, - .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 1989000000, - .vboot = 0x30, - .volt_step = 6250, - .volt_base = 500000, - .vmax = 0x64, - .vmin = 0x18, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x7, - .vco = 0x10, - .chk_shift = 0x77, - .core_sel = 0x8fff0000, - .int_st = BIT(0), - .ctl0 = 0x00010001, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .buck_name = "proc", + .opp_count = MAX_OPP_ENTRIES, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x77, + .int_st = BIT(0), + .ctl0 = 0x00010001, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 16, 0 }, { 16, 8 }, { 17, 16 }, { 16, 16 }, { 16, 24 } + } + }, + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .freq_base = 1989000000, + .core_sel = 0x8fff0000, + .dvt_fixed = 0x7, + .vmax = 0x64, + .vmin = 0x18, + }, { - .sw_id = SVSB_CPU_BIG, - .set_freq_pct = svs_set_bank_freq_pct_v2, - .get_volts = svs_get_bank_volts_v2, - .cpu_id = 4, - .buck_name = "proc", - .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, - .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 1989000000, - .vboot = 0x30, - .volt_step = 6250, - .volt_base = 500000, - .vmax = 0x58, - .vmin = 0x10, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x7, - .vco = 0x10, - .chk_shift = 0x77, - .core_sel = 0x8fff0001, - .int_st = BIT(1), - .ctl0 = 0x00000001, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CPU_BIG, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 4, + .buck_name = "proc", + .opp_count = MAX_OPP_ENTRIES, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x77, + .int_st = BIT(1), + .ctl0 = 0x00000001, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 18, 0 }, { 18, 8 }, { 17, 0 }, { 18, 16 }, { 18, 24 } + } + }, + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .freq_base = 1989000000, + .core_sel = 0x8fff0001, + .dvt_fixed = 0x7, + .vmax = 0x58, + .vmin = 0x10, + }, { - .sw_id = SVSB_CCI, - .set_freq_pct = svs_set_bank_freq_pct_v2, - .get_volts = svs_get_bank_volts_v2, - .buck_name = "proc", - .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, - .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 1196000000, - .vboot = 0x30, - .volt_step = 6250, - .volt_base = 500000, - .vmax = 0x64, - .vmin = 0x18, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x7, - .vco = 0x10, - .chk_shift = 0x77, - .core_sel = 0x8fff0002, - .int_st = BIT(2), - .ctl0 = 0x00100003, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "proc", + .opp_count = MAX_OPP_ENTRIES, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x77, + .int_st = BIT(2), + .ctl0 = 0x00100003, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 4, 0 }, { 4, 8 }, { 5, 16 }, { 4, 16 }, { 4, 24 } + } + }, + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .freq_base = 1196000000, + .core_sel = 0x8fff0002, + .dvt_fixed = 0x7, + .vmax = 0x64, + .vmin = 0x18, }, { - .sw_id = SVSB_GPU, - .set_freq_pct = svs_set_bank_freq_pct_v2, - .get_volts = svs_get_bank_volts_v2, - .buck_name = "mali", - .tzone_name = "tzts2", - .volt_flags = SVSB_INIT01_PD_REQ | - SVSB_INIT01_VOLT_INC_ONLY, - .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | - SVSB_MODE_MON, - .opp_count = MAX_OPP_ENTRIES, - .freq_base = 900000000, - .vboot = 0x30, - .volt_step = 6250, - .volt_base = 500000, - .vmax = 0x40, - .vmin = 0x14, - .age_config = 0x555555, - .dc_config = 0x555555, - .dvt_fixed = 0x3, - .vco = 0x10, - .chk_shift = 0x77, - .core_sel = 0x8fff0003, - .int_st = BIT(3), - .ctl0 = 0x00050001, - .tzone_htemp = 85000, - .tzone_htemp_voffset = 0, - .tzone_ltemp = 25000, - .tzone_ltemp_voffset = 3, + .pdata = (const struct svs_bank_pdata) { + .sw_id = SVSB_SWID_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "mali", + .tzone_name = "gpu", + .opp_count = MAX_OPP_ENTRIES, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .age_config = 0x555555, + .dc_config = 0x555555, + .vco = 0x10, + .chk_shift = 0x77, + .int_st = BIT(3), + .ctl0 = 0x00050001, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 3, + .dev_fuse_map = (const struct svs_fusemap[BDEV_MAX]) { + { 6, 0 }, { 6, 8 }, { 5, 0 }, { 6, 16 }, { 6, 24 } + } + }, + .volt_flags = SVSB_INIT01_PD_REQ | SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | SVSB_MODE_MON, + .freq_base = 900000000, + .core_sel = 0x8fff0003, + .dvt_fixed = 0x3, + .vmax = 0x40, + .vmin = 0x14, }, }; +static const struct svs_platform_data svs_mt8195_platform_data = { + .name = "mt8195-svs", + .banks = svs_mt8195_banks, + .efuse_parsing = svs_common_parse_efuse, + .probe = svs_mt8192_platform_probe, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8195_banks), + .ts_coeff = SVSB_TS_COEFF_MT8195, + .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) { + { 0, 0 }, { 19, 4 } + } +}; + static const struct svs_platform_data svs_mt8192_platform_data = { .name = "mt8192-svs", .banks = svs_mt8192_banks, - .efuse_parsing = svs_mt8192_efuse_parsing, + .efuse_parsing = svs_common_parse_efuse, .probe = svs_mt8192_platform_probe, .regs = svs_regs_v2, .bank_max = ARRAY_SIZE(svs_mt8192_banks), + .ts_coeff = SVSB_TS_COEFF_MT8195, + .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) { + /* FT_PGM not present */ + { -1, 0 }, { 19, 4 } + } }; static const struct svs_platform_data svs_mt8188_platform_data = { .name = "mt8188-svs", .banks = svs_mt8188_banks, - .efuse_parsing = svs_mt8188_efuse_parsing, + .efuse_parsing = svs_common_parse_efuse, .probe = svs_mt8192_platform_probe, .regs = svs_regs_v2, .bank_max = ARRAY_SIZE(svs_mt8188_banks), + .ts_coeff = SVSB_TS_COEFF_MT8195, + .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) { + /* FT_PGM and VMIN not present */ + { -1, 0 }, { -1, 0 } + } +}; + +static const struct svs_platform_data svs_mt8186_platform_data = { + .name = "mt8186-svs", + .banks = svs_mt8186_banks, + .efuse_parsing = svs_common_parse_efuse, + .probe = svs_mt8192_platform_probe, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8186_banks), + .ts_coeff = SVSB_TS_COEFF_MT8186, + .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) { + /* FT_PGM and VMIN not present */ + { -1, 0 }, { -1, 0 } + } }; static const struct svs_platform_data svs_mt8183_platform_data = { @@ -2466,21 +2797,19 @@ static const struct svs_platform_data svs_mt8183_platform_data = { .probe = svs_mt8183_platform_probe, .regs = svs_regs_v2, .bank_max = ARRAY_SIZE(svs_mt8183_banks), + .glb_fuse_map = (const struct svs_fusemap[GLB_MAX]) { + /* VMIN not present */ + { 0, 4 }, { -1, 0 } + } }; static const struct of_device_id svs_of_match[] = { - { - .compatible = "mediatek,mt8192-svs", - .data = &svs_mt8192_platform_data, - }, { - .compatible = "mediatek,mt8188-svs", - .data = &svs_mt8188_platform_data, - }, { - .compatible = "mediatek,mt8183-svs", - .data = &svs_mt8183_platform_data, - }, { - /* Sentinel */ - }, + { .compatible = "mediatek,mt8195-svs", .data = &svs_mt8195_platform_data }, + { .compatible = "mediatek,mt8192-svs", .data = &svs_mt8192_platform_data }, + { .compatible = "mediatek,mt8188-svs", .data = &svs_mt8188_platform_data }, + { .compatible = "mediatek,mt8186-svs", .data = &svs_mt8186_platform_data }, + { .compatible = "mediatek,mt8183-svs", .data = &svs_mt8183_platform_data }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, svs_of_match); @@ -2500,6 +2829,7 @@ static int svs_probe(struct platform_device *pdev) svsp->banks = svsp_data->banks; svsp->regs = svsp_data->regs; svsp->bank_max = svsp_data->bank_max; + svsp->ts_coeff = svsp_data->ts_coeff; ret = svsp_data->probe(svsp); if (ret) @@ -2507,20 +2837,24 @@ static int svs_probe(struct platform_device *pdev) ret = svs_get_efuse_data(svsp, "svs-calibration-data", &svsp->efuse, &svsp->efuse_max); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Cannot read SVS calibration\n"); + + ret = svs_get_efuse_data(svsp, "t-calibration-data", + &svsp->tefuse, &svsp->tefuse_max); if (ret) { - ret = -EPERM; + dev_err_probe(&pdev->dev, ret, "Cannot read SVS-Thermal calibration\n"); goto svs_probe_free_efuse; } - if (!svsp_data->efuse_parsing(svsp)) { - dev_err(svsp->dev, "efuse data parsing failed\n"); - ret = -EPERM; + if (!svsp_data->efuse_parsing(svsp, svsp_data)) { + ret = dev_err_probe(svsp->dev, -EINVAL, "efuse data parsing failed\n"); goto svs_probe_free_tefuse; } ret = svs_bank_resource_setup(svsp); if (ret) { - dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret); + dev_err_probe(svsp->dev, ret, "svs bank resource setup fail\n"); goto svs_probe_free_tefuse; } @@ -2532,43 +2866,40 @@ static int svs_probe(struct platform_device *pdev) svsp->main_clk = devm_clk_get(svsp->dev, "main"); if (IS_ERR(svsp->main_clk)) { - dev_err(svsp->dev, "failed to get clock: %ld\n", - PTR_ERR(svsp->main_clk)); - ret = PTR_ERR(svsp->main_clk); + ret = dev_err_probe(svsp->dev, PTR_ERR(svsp->main_clk), + "failed to get clock\n"); goto svs_probe_free_tefuse; } ret = clk_prepare_enable(svsp->main_clk); if (ret) { - dev_err(svsp->dev, "cannot enable main clk: %d\n", ret); + dev_err_probe(svsp->dev, ret, "cannot enable main clk\n"); goto svs_probe_free_tefuse; } svsp->base = of_iomap(svsp->dev->of_node, 0); if (IS_ERR_OR_NULL(svsp->base)) { - dev_err(svsp->dev, "cannot find svs register base\n"); - ret = -EINVAL; + ret = dev_err_probe(svsp->dev, -EINVAL, "cannot find svs register base\n"); goto svs_probe_clk_disable; } ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, IRQF_ONESHOT, svsp_data->name, svsp); if (ret) { - dev_err(svsp->dev, "register irq(%d) failed: %d\n", - svsp_irq, ret); + dev_err_probe(svsp->dev, ret, "register irq(%d) failed\n", svsp_irq); goto svs_probe_iounmap; } ret = svs_start(svsp); if (ret) { - dev_err(svsp->dev, "svs start fail: %d\n", ret); + dev_err_probe(svsp->dev, ret, "svs start fail\n"); goto svs_probe_iounmap; } #ifdef CONFIG_DEBUG_FS ret = svs_create_debug_cmds(svsp); if (ret) { - dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); + dev_err_probe(svsp->dev, ret, "svs create debug cmds fail\n"); goto svs_probe_iounmap; } #endif @@ -2577,18 +2908,12 @@ static int svs_probe(struct platform_device *pdev) svs_probe_iounmap: iounmap(svsp->base); - svs_probe_clk_disable: clk_disable_unprepare(svsp->main_clk); - svs_probe_free_tefuse: - if (!IS_ERR_OR_NULL(svsp->tefuse)) - kfree(svsp->tefuse); - + kfree(svsp->tefuse); svs_probe_free_efuse: - if (!IS_ERR_OR_NULL(svsp->efuse)) - kfree(svsp->efuse); - + kfree(svsp->efuse); return ret; } @@ -2606,5 +2931,6 @@ static struct platform_driver svs_driver = { module_platform_driver(svs_driver); MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>"); +MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>"); MODULE_DESCRIPTION("MediaTek SVS driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig index eb656b33156b..9b0fdd95276e 100644 --- a/drivers/soc/microchip/Kconfig +++ b/drivers/soc/microchip/Kconfig @@ -1,6 +1,7 @@ config POLARFIRE_SOC_SYS_CTRL tristate "POLARFIRE_SOC_SYS_CTRL" depends on POLARFIRE_SOC_MAILBOX + depends on MTD help This driver adds support for the PolarFire SoC (MPFS) system controller. diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c index 0935e9e94172..7a4936019329 100644 --- a/drivers/soc/microchip/mpfs-sys-controller.c +++ b/drivers/soc/microchip/mpfs-sys-controller.c @@ -12,6 +12,8 @@ #include <linux/kref.h> #include <linux/module.h> #include <linux/jiffies.h> +#include <linux/mtd/mtd.h> +#include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/mailbox_client.h> @@ -30,6 +32,7 @@ struct mpfs_sys_controller { struct mbox_client client; struct mbox_chan *chan; struct completion c; + struct mtd_info *flash; struct kref consumers; }; @@ -63,7 +66,9 @@ int mpfs_blocking_transaction(struct mpfs_sys_controller *sys_controller, struct */ if (!wait_for_completion_timeout(&sys_controller->c, timeout)) { ret = -EBADMSG; - dev_warn(sys_controller->client.dev, "MPFS sys controller service failed\n"); + dev_warn(sys_controller->client.dev, + "MPFS sys controller service failed with status: %d\n", + msg->response->resp_status); } else { ret = 0; } @@ -99,6 +104,12 @@ static void mpfs_sys_controller_put(void *data) kref_put(&sys_controller->consumers, mpfs_sys_controller_delete); } +struct mtd_info *mpfs_sys_controller_get_flash(struct mpfs_sys_controller *mpfs_client) +{ + return mpfs_client->flash; +} +EXPORT_SYMBOL(mpfs_sys_controller_get_flash); + static struct platform_device subdevs[] = { { .name = "mpfs-rng", @@ -107,19 +118,34 @@ static struct platform_device subdevs[] = { { .name = "mpfs-generic-service", .id = -1, - } + }, + { + .name = "mpfs-auto-update", + .id = -1, + }, }; static int mpfs_sys_controller_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mpfs_sys_controller *sys_controller; + struct device_node *np; int i, ret; sys_controller = kzalloc(sizeof(*sys_controller), GFP_KERNEL); if (!sys_controller) return -ENOMEM; + np = of_parse_phandle(dev->of_node, "microchip,bitstream-flash", 0); + if (!np) + goto no_flash; + + sys_controller->flash = of_get_mtd_device_by_node(np); + of_node_put(np); + if (IS_ERR(sys_controller->flash)) + return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n"); + +no_flash: sys_controller->client.dev = dev; sys_controller->client.rx_callback = mpfs_sys_controller_rx_callback; sys_controller->client.tx_block = 1U; @@ -138,7 +164,6 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sys_controller); - dev_info(&pdev->dev, "Registered MPFS system controller\n"); for (i = 0; i < ARRAY_SIZE(subdevs); i++) { subdevs[i].dev.parent = dev; @@ -146,6 +171,8 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev) dev_warn(dev, "Error registering sub device %s\n", subdevs[i].name); } + dev_info(&pdev->dev, "Registered MPFS system controller\n"); + return 0; } diff --git a/drivers/soc/pxa/ssp.c b/drivers/soc/pxa/ssp.c index a1e8a07f7275..7af04e8b8163 100644 --- a/drivers/soc/pxa/ssp.c +++ b/drivers/soc/pxa/ssp.c @@ -152,11 +152,11 @@ static int pxa_ssp_probe(struct platform_device *pdev) if (dev->of_node) { const struct of_device_id *id = of_match_device(of_match_ptr(pxa_ssp_of_ids), dev); - ssp->type = (int) id->data; + ssp->type = (uintptr_t) id->data; } else { const struct platform_device_id *id = platform_get_device_id(pdev); - ssp->type = (int) id->driver_data; + ssp->type = id->driver_data; /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id * starts from 0, do a translation here diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index b3634e10f6f5..c6ca4de42586 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -77,6 +77,18 @@ config QCOM_PDR_HELPERS select QCOM_QMI_HELPERS depends on NET +config QCOM_PMIC_PDCHARGER_ULOG + tristate "Qualcomm PMIC PDCharger ULOG driver" + depends on RPMSG + depends on EVENT_TRACING + help + The Qualcomm PMIC PDCharger ULOG driver provides access to logs of + the ADSP firmware PDCharger module in charge of Battery and Power + Delivery on modern systems. + + Say yes here to support PDCharger ULOG event tracing on modern + Qualcomm platforms. + config QCOM_PMIC_GLINK tristate "Qualcomm PMIC GLINK driver" depends on RPMSG @@ -86,6 +98,7 @@ config QCOM_PMIC_GLINK depends on OF select AUXILIARY_BUS select QCOM_PDR_HELPERS + select DRM_AUX_HPD_BRIDGE help The Qualcomm PMIC GLINK driver provides access, over GLINK, to the USB and battery firmware running on one of the coprocessors in @@ -209,6 +222,7 @@ config QCOM_STATS tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver" depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST depends on QCOM_SMEM + depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n help Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read the shared memory exported by the remote processor related to diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index bbca2e1e55bb..05b3d54e8dc9 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -9,6 +9,8 @@ obj-$(CONFIG_QCOM_OCMEM) += ocmem.o obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink.o obj-$(CONFIG_QCOM_PMIC_GLINK) += pmic_glink_altmode.o +obj-$(CONFIG_QCOM_PMIC_PDCHARGER_ULOG) += pmic_pdcharger_ulog.o +CFLAGS_pmic_pdcharger_ulog.o := -I$(src) obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o qmi_helpers-y += qmi_encdec.o qmi_interface.o obj-$(CONFIG_QCOM_RAMP_CTRL) += ramp_controller.o diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index 674abd0d6700..4ca88eaebf06 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -47,7 +47,7 @@ #define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K) #define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n) #define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n) -#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_8 * n) +#define LLCC_TRP_ATTR2_CFGn(n) (0x21100 + SZ_4 * n) #define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00 #define LLCC_TRP_PCB_ACT 0x21f04 @@ -92,6 +92,19 @@ * @write_scid_en: Bit enables write cache support for a given scid. * @write_scid_cacheable_en: Enables write cache cacheable support for a * given scid (not supported on v2 or older hardware). + * @stale_en: Bit enables stale. + * @stale_cap_en: Bit enables stale only if current scid is over-cap. + * @mru_uncap_en: Roll-over on reserved cache ways if current scid is + * under-cap. + * @mru_rollover: Roll-over on reserved cache ways. + * @alloc_oneway_en: Allways allocate one way on over-cap even if there's no + * same-scid lines for replacement. + * @ovcap_en: Once current scid is over-capacity, allocate other over-cap SCID. + * @ovcap_prio: Once current scid is over-capacity, allocate other low priority + * over-cap scid. Depends on corresponding bit being set in + * ovcap_en. + * @vict_prio: When current scid is under-capacity, allocate over other + * lower-than victim priority-line threshold scid. */ struct llcc_slice_config { u32 usecase_id; @@ -362,6 +375,33 @@ static const struct llcc_slice_config sm8550_data[] = { {LLCC_VIDVSP, 28, 256, 4, 1, 0xFFFFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, }; +static const struct llcc_slice_config sm8650_data[] = { + {LLCC_CPUSS, 1, 5120, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AUDIO, 6, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MDMHPGRW, 25, 1024, 3, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MODHW, 26, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CMPT, 10, 4096, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPUHTW, 11, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPU, 9, 3096, 1, 0, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MMUHWT, 18, 768, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_DISP, 16, 6144, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MDMHPFX, 24, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MDMPNG, 27, 1024, 0, 1, 0x000000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CVP, 8, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MODPE, 29, 128, 1, 1, 0xF00000, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP0, 4, 256, 3, 1, 0xF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP1, 7, 3200, 3, 1, 0xFFFFF0, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CMPTHCP, 17, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_LCPDARE, 30, 128, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, + {LLCC_AENPU, 3, 3072, 1, 1, 0xFFFFFF, 0x0, 2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND1, 12, 5888, 7, 1, 0x0, 0x7FFFFF, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_DISP_WB, 23, 1024, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_VIDVSP, 28, 256, 3, 1, 0xFFFFFF, 0x0, 0, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, +}; + static const struct llcc_slice_config qdu1000_data_2ch[] = { { LLCC_MDMHPGRW, 7, 512, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 }, { LLCC_MODHW, 9, 256, 1, 1, 0xfff, 0x0, 0, 0, 0, 1, 0, 0, 0 }, @@ -392,6 +432,29 @@ static const struct llcc_slice_config qdu1000_data_8ch[] = { { LLCC_WRCACHE, 31, 512, 1, 1, 0x3, 0x0, 0, 0, 0, 0, 1, 0, 0 }, }; +static const struct llcc_slice_config x1e80100_data[] = { + {LLCC_CPUSS, 1, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_VIDSC0, 2, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AUDIO, 6, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CMPT, 10, 6144, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPUHTW, 11, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_GPU, 9, 4096, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_MMUHWT, 18, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CVP, 8, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_WRCACHE, 31, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP1, 7, 3072, 2, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_LCPDARE, 30, 512, 3, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_AENPU, 3, 3072, 1, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND1, 12, 512, 7, 1, 0x1, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND2, 13, 512, 7, 1, 0x2, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND3, 14, 512, 7, 1, 0x3, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_ISLAND4, 15, 512, 7, 1, 0x4, 0x0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP2, 19, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP3, 20, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {LLCC_CAMEXP4, 21, 3072, 3, 1, 0xFFF, 0x0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, +}; + static const struct llcc_edac_reg_offset llcc_v1_edac_reg_offset = { .trp_ecc_error_status0 = 0x20344, .trp_ecc_error_status1 = 0x20348, @@ -610,6 +673,26 @@ static const struct qcom_llcc_config sm8550_cfg[] = { }, }; +static const struct qcom_llcc_config sm8650_cfg[] = { + { + .sct_data = sm8650_data, + .size = ARRAY_SIZE(sm8650_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, + }, +}; + +static const struct qcom_llcc_config x1e80100_cfg[] = { + { + .sct_data = x1e80100_data, + .size = ARRAY_SIZE(x1e80100_data), + .need_llcc_cfg = true, + .reg_offset = llcc_v2_1_reg_offset, + .edac_reg_offset = &llcc_v2_1_edac_reg_offset, + }, +}; + static const struct qcom_sct_config qdu1000_cfgs = { .llcc_config = qdu1000_cfg, .num_config = ARRAY_SIZE(qdu1000_cfg), @@ -675,6 +758,16 @@ static const struct qcom_sct_config sm8550_cfgs = { .num_config = ARRAY_SIZE(sm8550_cfg), }; +static const struct qcom_sct_config sm8650_cfgs = { + .llcc_config = sm8650_cfg, + .num_config = ARRAY_SIZE(sm8650_cfg), +}; + +static const struct qcom_sct_config x1e80100_cfgs = { + .llcc_config = x1e80100_cfg, + .num_config = ARRAY_SIZE(x1e80100_cfg), +}; + static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; /** @@ -715,7 +808,7 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid) EXPORT_SYMBOL_GPL(llcc_slice_getd); /** - * llcc_slice_putd - llcc slice descritpor + * llcc_slice_putd - llcc slice descriptor * @desc: Pointer to llcc slice descriptor */ void llcc_slice_putd(struct llcc_slice_desc *desc) @@ -941,15 +1034,15 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, u32 disable_cap_alloc, retain_pc; disable_cap_alloc = config->dis_cap_alloc << config->slice_id; - ret = regmap_write(drv_data->bcast_regmap, - LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc); + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC, + BIT(config->slice_id), disable_cap_alloc); if (ret) return ret; if (drv_data->version < LLCC_VERSION_4_1_0_0) { retain_pc = config->retain_on_pc << config->slice_id; - ret = regmap_write(drv_data->bcast_regmap, - LLCC_TRP_PCB_ACT, retain_pc); + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT, + BIT(config->slice_id), retain_pc); if (ret) return ret; } @@ -1249,6 +1342,8 @@ static const struct of_device_id qcom_llcc_of_match[] = { { .compatible = "qcom,sm8350-llcc", .data = &sm8350_cfgs }, { .compatible = "qcom,sm8450-llcc", .data = &sm8450_cfgs }, { .compatible = "qcom,sm8550-llcc", .data = &sm8550_cfgs }, + { .compatible = "qcom,sm8650-llcc", .data = &sm8650_cfgs }, + { .compatible = "qcom,x1e80100-llcc", .data = &x1e80100_cfgs }, { } }; MODULE_DEVICE_TABLE(of, qcom_llcc_of_match); diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 914057331afd..f4bfd24386f1 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -18,9 +18,6 @@ enum { PMIC_GLINK_CLIENT_UCSI, }; -#define PMIC_GLINK_CLIENT_DEFAULT (BIT(PMIC_GLINK_CLIENT_BATT) | \ - BIT(PMIC_GLINK_CLIENT_ALTMODE)) - struct pmic_glink { struct device *dev; struct pdr_handle *pdr; @@ -263,10 +260,10 @@ static int pmic_glink_probe(struct platform_device *pdev) mutex_init(&pg->state_lock); match_data = (unsigned long *)of_device_get_match_data(&pdev->dev); - if (match_data) - pg->client_mask = *match_data; - else - pg->client_mask = PMIC_GLINK_CLIENT_DEFAULT; + if (!match_data) + return -EINVAL; + + pg->client_mask = *match_data; if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) { ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi"); @@ -336,14 +333,17 @@ static void pmic_glink_remove(struct platform_device *pdev) mutex_unlock(&__pmic_glink_lock); } +static const unsigned long pmic_glink_sc8180x_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | + BIT(PMIC_GLINK_CLIENT_ALTMODE); + static const unsigned long pmic_glink_sm8450_client_mask = BIT(PMIC_GLINK_CLIENT_BATT) | BIT(PMIC_GLINK_CLIENT_ALTMODE) | BIT(PMIC_GLINK_CLIENT_UCSI); static const struct of_device_id pmic_glink_of_match[] = { - { .compatible = "qcom,sm8450-pmic-glink", .data = &pmic_glink_sm8450_client_mask }, - { .compatible = "qcom,sm8550-pmic-glink", .data = &pmic_glink_sm8450_client_mask }, - { .compatible = "qcom,pmic-glink" }, + { .compatible = "qcom,sc8180x-pmic-glink", .data = &pmic_glink_sc8180x_client_mask }, + { .compatible = "qcom,sc8280xp-pmic-glink", .data = &pmic_glink_sc8180x_client_mask }, + { .compatible = "qcom,pmic-glink", .data = &pmic_glink_sm8450_client_mask }, {} }; MODULE_DEVICE_TABLE(of, pmic_glink_of_match); @@ -363,14 +363,14 @@ static int pmic_glink_init(void) register_rpmsg_driver(&pmic_glink_rpmsg_driver); return 0; -}; +} module_init(pmic_glink_init); static void pmic_glink_exit(void) { unregister_rpmsg_driver(&pmic_glink_rpmsg_driver); platform_driver_unregister(&pmic_glink_driver); -}; +} module_exit(pmic_glink_exit); MODULE_DESCRIPTION("Qualcomm PMIC GLINK driver"); diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c index b78279e2f54c..5fcd0fdd2faa 100644 --- a/drivers/soc/qcom/pmic_glink_altmode.c +++ b/drivers/soc/qcom/pmic_glink_altmode.c @@ -11,7 +11,7 @@ #include <linux/mutex.h> #include <linux/property.h> #include <linux/soc/qcom/pdr.h> -#include <drm/drm_bridge.h> +#include <drm/bridge/aux-bridge.h> #include <linux/usb/typec_altmode.h> #include <linux/usb/typec_dp.h> @@ -76,7 +76,7 @@ struct pmic_glink_altmode_port { struct work_struct work; - struct drm_bridge bridge; + struct device *bridge; enum typec_orientation orientation; u16 svid; @@ -230,13 +230,13 @@ static void pmic_glink_altmode_worker(struct work_struct *work) else pmic_glink_altmode_enable_usb(altmode, alt_port); - if (alt_port->hpd_state) - drm_bridge_hpd_notify(&alt_port->bridge, connector_status_connected); - else - drm_bridge_hpd_notify(&alt_port->bridge, connector_status_disconnected); + drm_aux_hpd_bridge_notify(alt_port->bridge, + alt_port->hpd_state ? + connector_status_connected : + connector_status_disconnected); pmic_glink_altmode_request(altmode, ALTMODE_PAN_ACK, alt_port->index); -}; +} static enum typec_orientation pmic_glink_altmode_orientation(unsigned int orientation) { @@ -285,7 +285,7 @@ static void pmic_glink_altmode_sc8180xp_notify(struct pmic_glink_altmode *altmod svid = mux == 2 ? USB_TYPEC_DP_SID : 0; - if (!altmode->ports[port].altmode) { + if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) { dev_dbg(altmode->dev, "notification on undefined port %d\n", port); return; } @@ -328,7 +328,7 @@ static void pmic_glink_altmode_sc8280xp_notify(struct pmic_glink_altmode *altmod hpd_state = FIELD_GET(SC8280XP_HPD_STATE_MASK, notify->payload[8]); hpd_irq = FIELD_GET(SC8280XP_HPD_IRQ_MASK, notify->payload[8]); - if (!altmode->ports[port].altmode) { + if (port >= ARRAY_SIZE(altmode->ports) || !altmode->ports[port].altmode) { dev_dbg(altmode->dev, "notification on undefined port %d\n", port); return; } @@ -365,16 +365,6 @@ static void pmic_glink_altmode_callback(const void *data, size_t len, void *priv } } -static int pmic_glink_altmode_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; -} - -static const struct drm_bridge_funcs pmic_glink_altmode_bridge_funcs = { - .attach = pmic_glink_altmode_attach, -}; - static void pmic_glink_altmode_put_retimer(void *data) { typec_retimer_put(data); @@ -464,15 +454,10 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev, alt_port->index = port; INIT_WORK(&alt_port->work, pmic_glink_altmode_worker); - alt_port->bridge.funcs = &pmic_glink_altmode_bridge_funcs; - alt_port->bridge.of_node = to_of_node(fwnode); - alt_port->bridge.ops = DRM_BRIDGE_OP_HPD; - alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; - - ret = devm_drm_bridge_add(dev, &alt_port->bridge); - if (ret) { + alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode)); + if (IS_ERR(alt_port->bridge)) { fwnode_handle_put(fwnode); - return ret; + return PTR_ERR(alt_port->bridge); } alt_port->dp_alt.svid = USB_TYPEC_DP_SID; diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.c b/drivers/soc/qcom/pmic_pdcharger_ulog.c new file mode 100644 index 000000000000..238cd38589dc --- /dev/null +++ b/drivers/soc/qcom/pmic_pdcharger_ulog.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2022, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Ltd + */ +#include <linux/of_device.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/rpmsg.h> +#include <linux/slab.h> +#include <linux/soc/qcom/pdr.h> +#include <linux/debugfs.h> + +#define CREATE_TRACE_POINTS +#include "pmic_pdcharger_ulog.h" + +#define MSG_OWNER_CHG_ULOG 32778 +#define MSG_TYPE_REQ_RESP 1 + +#define GET_CHG_ULOG_REQ 0x18 +#define SET_CHG_ULOG_PROP_REQ 0x19 + +#define LOG_DEFAULT_TIME_MS 1000 + +#define MAX_ULOG_SIZE 8192 + +struct pmic_pdcharger_ulog_hdr { + __le32 owner; + __le32 type; + __le32 opcode; +}; + +struct pmic_pdcharger_ulog { + struct rpmsg_device *rpdev; + struct delayed_work ulog_work; +}; + +struct get_ulog_req_msg { + struct pmic_pdcharger_ulog_hdr hdr; + u32 log_size; +}; + +struct get_ulog_resp_msg { + struct pmic_pdcharger_ulog_hdr hdr; + u8 buf[MAX_ULOG_SIZE]; +}; + +static int pmic_pdcharger_ulog_write_async(struct pmic_pdcharger_ulog *pg, void *data, size_t len) +{ + return rpmsg_send(pg->rpdev->ept, data, len); +} + +static int pmic_pdcharger_ulog_request(struct pmic_pdcharger_ulog *pg) +{ + struct get_ulog_req_msg req_msg = { + .hdr = { + .owner = cpu_to_le32(MSG_OWNER_CHG_ULOG), + .type = cpu_to_le32(MSG_TYPE_REQ_RESP), + .opcode = cpu_to_le32(GET_CHG_ULOG_REQ) + }, + .log_size = MAX_ULOG_SIZE + }; + + return pmic_pdcharger_ulog_write_async(pg, &req_msg, sizeof(req_msg)); +} + +static void pmic_pdcharger_ulog_work(struct work_struct *work) +{ + struct pmic_pdcharger_ulog *pg = container_of(work, struct pmic_pdcharger_ulog, + ulog_work.work); + int rc; + + rc = pmic_pdcharger_ulog_request(pg); + if (rc) { + dev_err(&pg->rpdev->dev, "Error requesting ulog, rc=%d\n", rc); + return; + } +} + +static void pmic_pdcharger_ulog_handle_message(struct pmic_pdcharger_ulog *pg, + struct get_ulog_resp_msg *resp_msg, + size_t len) +{ + char *token, *buf = resp_msg->buf; + + if (len != sizeof(*resp_msg)) { + dev_err(&pg->rpdev->dev, "Expected data length: %zu, received: %zu\n", + sizeof(*resp_msg), len); + return; + } + + buf[MAX_ULOG_SIZE - 1] = '\0'; + + do { + token = strsep((char **)&buf, "\n"); + if (token && strlen(token)) + trace_pmic_pdcharger_ulog_msg(token); + } while (token); +} + +static int pmic_pdcharger_ulog_rpmsg_callback(struct rpmsg_device *rpdev, void *data, + int len, void *priv, u32 addr) +{ + struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev); + struct pmic_pdcharger_ulog_hdr *hdr = data; + u32 opcode; + + opcode = le32_to_cpu(hdr->opcode); + + switch (opcode) { + case GET_CHG_ULOG_REQ: + schedule_delayed_work(&pg->ulog_work, msecs_to_jiffies(LOG_DEFAULT_TIME_MS)); + pmic_pdcharger_ulog_handle_message(pg, data, len); + break; + default: + dev_err(&pg->rpdev->dev, "Unknown opcode %u\n", opcode); + break; + } + + return 0; +} + +static int pmic_pdcharger_ulog_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct pmic_pdcharger_ulog *pg; + struct device *dev = &rpdev->dev; + + pg = devm_kzalloc(dev, sizeof(*pg), GFP_KERNEL); + if (!pg) + return -ENOMEM; + + pg->rpdev = rpdev; + INIT_DELAYED_WORK(&pg->ulog_work, pmic_pdcharger_ulog_work); + + dev_set_drvdata(dev, pg); + + pmic_pdcharger_ulog_request(pg); + + return 0; +} + +static void pmic_pdcharger_ulog_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct pmic_pdcharger_ulog *pg = dev_get_drvdata(&rpdev->dev); + + cancel_delayed_work_sync(&pg->ulog_work); +} + +static const struct rpmsg_device_id pmic_pdcharger_ulog_rpmsg_id_match[] = { + { "PMIC_LOGS_ADSP_APPS" }, + {} +}; + +static struct rpmsg_driver pmic_pdcharger_ulog_rpmsg_driver = { + .probe = pmic_pdcharger_ulog_rpmsg_probe, + .remove = pmic_pdcharger_ulog_rpmsg_remove, + .callback = pmic_pdcharger_ulog_rpmsg_callback, + .id_table = pmic_pdcharger_ulog_rpmsg_id_match, + .drv = { + .name = "qcom_pmic_pdcharger_ulog_rpmsg", + }, +}; + +module_rpmsg_driver(pmic_pdcharger_ulog_rpmsg_driver); +MODULE_DESCRIPTION("Qualcomm PMIC ChargerPD ULOG driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/pmic_pdcharger_ulog.h b/drivers/soc/qcom/pmic_pdcharger_ulog.h new file mode 100644 index 000000000000..152e3a6b5480 --- /dev/null +++ b/drivers/soc/qcom/pmic_pdcharger_ulog.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Linaro Ltd + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pmic_pdcharger_ulog + +#if !defined(_TRACE_PMIC_PDCHARGER_ULOG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PMIC_PDCHARGER_ULOG_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(pmic_pdcharger_ulog_msg, + TP_PROTO(char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(msg, msg); + ), + TP_printk("%s", __get_str(msg)) +); + +#endif /* _TRACE_PMIC_PDCHARGER_ULOG_H */ + +/* This part must be outside protection */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE pmic_pdcharger_ulog + +#include <trace/define_trace.h> diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 51e05bec5bfc..6349a0debeb5 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -51,6 +51,11 @@ #define SMEM_IMAGE_TABLE_ADSP_INDEX 12 #define SMEM_IMAGE_TABLE_CNSS_INDEX 13 #define SMEM_IMAGE_TABLE_VIDEO_INDEX 14 +#define SMEM_IMAGE_TABLE_DSPS_INDEX 15 +#define SMEM_IMAGE_TABLE_CDSP_INDEX 16 +#define SMEM_IMAGE_TABLE_CDSP1_INDEX 19 +#define SMEM_IMAGE_TABLE_GPDSP_INDEX 20 +#define SMEM_IMAGE_TABLE_GPDSP1_INDEX 21 #define SMEM_IMAGE_VERSION_TABLE 469 /* @@ -65,6 +70,11 @@ static const char *const socinfo_image_names[] = { [SMEM_IMAGE_TABLE_RPM_INDEX] = "rpm", [SMEM_IMAGE_TABLE_TZ_INDEX] = "tz", [SMEM_IMAGE_TABLE_VIDEO_INDEX] = "video", + [SMEM_IMAGE_TABLE_DSPS_INDEX] = "dsps", + [SMEM_IMAGE_TABLE_CDSP_INDEX] = "cdsp", + [SMEM_IMAGE_TABLE_CDSP1_INDEX] = "cdsp1", + [SMEM_IMAGE_TABLE_GPDSP_INDEX] = "gpdsp", + [SMEM_IMAGE_TABLE_GPDSP1_INDEX] = "gpdsp1", }; static const char *const pmic_models[] = { @@ -93,7 +103,7 @@ static const char *const pmic_models[] = { [22] = "PM8821", [23] = "PM8038", [24] = "PM8005/PM8922", - [25] = "PM8917", + [25] = "PM8917/PM8937", [26] = "PM660L", [27] = "PM660", [30] = "PM8150", @@ -417,6 +427,7 @@ static const struct soc_id soc_id[] = { { qcom_board_id(SA8775P) }, { qcom_board_id(QRU1000) }, { qcom_board_id(QDU1000) }, + { qcom_board_id(SM8650) }, { qcom_board_id(SM4450) }, { qcom_board_id(QDU1010) }, { qcom_board_id(QRU1032) }, diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig index 0071864c2111..0986672f6375 100644 --- a/drivers/soc/renesas/Kconfig +++ b/drivers/soc/renesas/Kconfig @@ -340,6 +340,7 @@ if RISCV config ARCH_R9A07G043 bool "RISC-V Platform support for RZ/Five" depends on NONPORTABLE + depends on !DMA_DIRECT_REMAP depends on RISCV_ALTERNATIVE depends on !RISCV_ISA_ZICBOM depends on RISCV_SBI diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c index c732d4a5b26a..27eae1a354ab 100644 --- a/drivers/soc/renesas/renesas-soc.c +++ b/drivers/soc/renesas/renesas-soc.c @@ -486,10 +486,6 @@ static int __init renesas_soc_init(void) return -ENOMEM; } - np = of_find_node_by_path("/"); - of_property_read_string(np, "model", &soc_dev_attr->machine); - of_node_put(np); - soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL); soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL); diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index 3fd0f2b84dd3..b1118d37779e 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -59,6 +59,7 @@ static const struct exynos_soc_id { { "EXYNOS7885", 0xE7885000 }, { "EXYNOS850", 0xE3830000 }, { "EXYNOSAUTOV9", 0xAAA80000 }, + { "EXYNOSAUTOV920", 0x0A920000 }, }; static const char *product_id_to_soc_id(unsigned int product_id) diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig deleted file mode 100644 index 139884addc41..000000000000 --- a/drivers/soc/sifive/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -if ARCH_SIFIVE || ARCH_STARFIVE - -config SIFIVE_CCACHE - bool "Sifive Composable Cache controller" - help - Support for the composable cache controller on SiFive platforms. - -endif diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile deleted file mode 100644 index 1f5dc339bf82..000000000000 --- a/drivers/soc/sifive/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o diff --git a/drivers/soc/sifive/sifive_ccache.c b/drivers/soc/sifive/sifive_ccache.c deleted file mode 100644 index 3684f5b40a80..000000000000 --- a/drivers/soc/sifive/sifive_ccache.c +++ /dev/null @@ -1,272 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SiFive composable cache controller Driver - * - * Copyright (C) 2018-2022 SiFive, Inc. - * - */ - -#define pr_fmt(fmt) "CCACHE: " fmt - -#include <linux/debugfs.h> -#include <linux/interrupt.h> -#include <linux/of_irq.h> -#include <linux/of_address.h> -#include <linux/device.h> -#include <linux/bitfield.h> -#include <asm/cacheinfo.h> -#include <soc/sifive/sifive_ccache.h> - -#define SIFIVE_CCACHE_DIRECCFIX_LOW 0x100 -#define SIFIVE_CCACHE_DIRECCFIX_HIGH 0x104 -#define SIFIVE_CCACHE_DIRECCFIX_COUNT 0x108 - -#define SIFIVE_CCACHE_DIRECCFAIL_LOW 0x120 -#define SIFIVE_CCACHE_DIRECCFAIL_HIGH 0x124 -#define SIFIVE_CCACHE_DIRECCFAIL_COUNT 0x128 - -#define SIFIVE_CCACHE_DATECCFIX_LOW 0x140 -#define SIFIVE_CCACHE_DATECCFIX_HIGH 0x144 -#define SIFIVE_CCACHE_DATECCFIX_COUNT 0x148 - -#define SIFIVE_CCACHE_DATECCFAIL_LOW 0x160 -#define SIFIVE_CCACHE_DATECCFAIL_HIGH 0x164 -#define SIFIVE_CCACHE_DATECCFAIL_COUNT 0x168 - -#define SIFIVE_CCACHE_CONFIG 0x00 -#define SIFIVE_CCACHE_CONFIG_BANK_MASK GENMASK_ULL(7, 0) -#define SIFIVE_CCACHE_CONFIG_WAYS_MASK GENMASK_ULL(15, 8) -#define SIFIVE_CCACHE_CONFIG_SETS_MASK GENMASK_ULL(23, 16) -#define SIFIVE_CCACHE_CONFIG_BLKS_MASK GENMASK_ULL(31, 24) - -#define SIFIVE_CCACHE_WAYENABLE 0x08 -#define SIFIVE_CCACHE_ECCINJECTERR 0x40 - -#define SIFIVE_CCACHE_MAX_ECCINTR 4 - -static void __iomem *ccache_base; -static int g_irq[SIFIVE_CCACHE_MAX_ECCINTR]; -static struct riscv_cacheinfo_ops ccache_cache_ops; -static int level; - -enum { - DIR_CORR = 0, - DATA_CORR, - DATA_UNCORR, - DIR_UNCORR, -}; - -#ifdef CONFIG_DEBUG_FS -static struct dentry *sifive_test; - -static ssize_t ccache_write(struct file *file, const char __user *data, - size_t count, loff_t *ppos) -{ - unsigned int val; - - if (kstrtouint_from_user(data, count, 0, &val)) - return -EINVAL; - if ((val < 0xFF) || (val >= 0x10000 && val < 0x100FF)) - writel(val, ccache_base + SIFIVE_CCACHE_ECCINJECTERR); - else - return -EINVAL; - return count; -} - -static const struct file_operations ccache_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = ccache_write -}; - -static void setup_sifive_debug(void) -{ - sifive_test = debugfs_create_dir("sifive_ccache_cache", NULL); - - debugfs_create_file("sifive_debug_inject_error", 0200, - sifive_test, NULL, &ccache_fops); -} -#endif - -static void ccache_config_read(void) -{ - u32 cfg; - - cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); - pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n", - FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg), - FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg), - BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)), - BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg))); - - cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE); - pr_info("Index of the largest way enabled: %u\n", cfg); -} - -static const struct of_device_id sifive_ccache_ids[] = { - { .compatible = "sifive,fu540-c000-ccache" }, - { .compatible = "sifive,fu740-c000-ccache" }, - { .compatible = "sifive,ccache0" }, - { /* end of table */ } -}; - -static ATOMIC_NOTIFIER_HEAD(ccache_err_chain); - -int register_sifive_ccache_error_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_register(&ccache_err_chain, nb); -} -EXPORT_SYMBOL_GPL(register_sifive_ccache_error_notifier); - -int unregister_sifive_ccache_error_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&ccache_err_chain, nb); -} -EXPORT_SYMBOL_GPL(unregister_sifive_ccache_error_notifier); - -static int ccache_largest_wayenabled(void) -{ - return readl(ccache_base + SIFIVE_CCACHE_WAYENABLE) & 0xFF; -} - -static ssize_t number_of_ways_enabled_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", ccache_largest_wayenabled()); -} - -static DEVICE_ATTR_RO(number_of_ways_enabled); - -static struct attribute *priv_attrs[] = { - &dev_attr_number_of_ways_enabled.attr, - NULL, -}; - -static const struct attribute_group priv_attr_group = { - .attrs = priv_attrs, -}; - -static const struct attribute_group *ccache_get_priv_group(struct cacheinfo - *this_leaf) -{ - /* We want to use private group for composable cache only */ - if (this_leaf->level == level) - return &priv_attr_group; - else - return NULL; -} - -static irqreturn_t ccache_int_handler(int irq, void *device) -{ - unsigned int add_h, add_l; - - if (irq == g_irq[DIR_CORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_LOW); - pr_err("DirError @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DirError interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DIRECCFIX_COUNT); - atomic_notifier_call_chain(&ccache_err_chain, - SIFIVE_CCACHE_ERR_TYPE_CE, - "DirECCFix"); - } - if (irq == g_irq[DIR_UNCORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_LOW); - /* Reading this register clears the DirFail interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DIRECCFAIL_COUNT); - atomic_notifier_call_chain(&ccache_err_chain, - SIFIVE_CCACHE_ERR_TYPE_UE, - "DirECCFail"); - panic("CCACHE: DirFail @ 0x%08X.%08X\n", add_h, add_l); - } - if (irq == g_irq[DATA_CORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_LOW); - pr_err("DataError @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DataError interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DATECCFIX_COUNT); - atomic_notifier_call_chain(&ccache_err_chain, - SIFIVE_CCACHE_ERR_TYPE_CE, - "DatECCFix"); - } - if (irq == g_irq[DATA_UNCORR]) { - add_h = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_HIGH); - add_l = readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_LOW); - pr_err("DataFail @ 0x%08X.%08X\n", add_h, add_l); - /* Reading this register clears the DataFail interrupt sig */ - readl(ccache_base + SIFIVE_CCACHE_DATECCFAIL_COUNT); - atomic_notifier_call_chain(&ccache_err_chain, - SIFIVE_CCACHE_ERR_TYPE_UE, - "DatECCFail"); - } - - return IRQ_HANDLED; -} - -static int __init sifive_ccache_init(void) -{ - struct device_node *np; - struct resource res; - int i, rc, intr_num; - - np = of_find_matching_node(NULL, sifive_ccache_ids); - if (!np) - return -ENODEV; - - if (of_address_to_resource(np, 0, &res)) { - rc = -ENODEV; - goto err_node_put; - } - - ccache_base = ioremap(res.start, resource_size(&res)); - if (!ccache_base) { - rc = -ENOMEM; - goto err_node_put; - } - - if (of_property_read_u32(np, "cache-level", &level)) { - rc = -ENOENT; - goto err_unmap; - } - - intr_num = of_property_count_u32_elems(np, "interrupts"); - if (!intr_num) { - pr_err("No interrupts property\n"); - rc = -ENODEV; - goto err_unmap; - } - - for (i = 0; i < intr_num; i++) { - g_irq[i] = irq_of_parse_and_map(np, i); - rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc", - NULL); - if (rc) { - pr_err("Could not request IRQ %d\n", g_irq[i]); - goto err_free_irq; - } - } - of_node_put(np); - - ccache_config_read(); - - ccache_cache_ops.get_priv_group = ccache_get_priv_group; - riscv_set_cacheinfo_ops(&ccache_cache_ops); - -#ifdef CONFIG_DEBUG_FS - setup_sifive_debug(); -#endif - return 0; - -err_free_irq: - while (--i >= 0) - free_irq(g_irq[i], NULL); -err_unmap: - iounmap(ccache_base); -err_node_put: - of_node_put(np); - return rc; -} - -device_initcall(sifive_ccache_init); diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 7fc3548e084c..59101bf7cf23 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -33,19 +33,35 @@ #define CTRLMMR_WKUP_JTAGID_MFG_TI 0x17 +#define JTAG_ID_PARTNO_AM65X 0xBB5A +#define JTAG_ID_PARTNO_J721E 0xBB64 +#define JTAG_ID_PARTNO_J7200 0xBB6D +#define JTAG_ID_PARTNO_AM64X 0xBB38 +#define JTAG_ID_PARTNO_J721S2 0xBB75 +#define JTAG_ID_PARTNO_AM62X 0xBB7E +#define JTAG_ID_PARTNO_J784S4 0xBB80 +#define JTAG_ID_PARTNO_AM62AX 0xBB8D +#define JTAG_ID_PARTNO_AM62PX 0xBB9D +#define JTAG_ID_PARTNO_J722S 0xBBA0 + static const struct k3_soc_id { unsigned int id; const char *family_name; } k3_soc_ids[] = { - { 0xBB5A, "AM65X" }, - { 0xBB64, "J721E" }, - { 0xBB6D, "J7200" }, - { 0xBB38, "AM64X" }, - { 0xBB75, "J721S2"}, - { 0xBB7E, "AM62X" }, - { 0xBB80, "J784S4" }, - { 0xBB8D, "AM62AX" }, - { 0xBB9D, "AM62PX" }, + { JTAG_ID_PARTNO_AM65X, "AM65X" }, + { JTAG_ID_PARTNO_J721E, "J721E" }, + { JTAG_ID_PARTNO_J7200, "J7200" }, + { JTAG_ID_PARTNO_AM64X, "AM64X" }, + { JTAG_ID_PARTNO_J721S2, "J721S2"}, + { JTAG_ID_PARTNO_AM62X, "AM62X" }, + { JTAG_ID_PARTNO_J784S4, "J784S4" }, + { JTAG_ID_PARTNO_AM62AX, "AM62AX" }, + { JTAG_ID_PARTNO_AM62PX, "AM62PX" }, + { JTAG_ID_PARTNO_J722S, "J722S" }, +}; + +static const char * const j721e_rev_string_map[] = { + "1.0", "1.1", }; static int @@ -63,6 +79,32 @@ k3_chipinfo_partno_to_names(unsigned int partno, return -ENODEV; } +static int +k3_chipinfo_variant_to_sr(unsigned int partno, unsigned int variant, + struct soc_device_attribute *soc_dev_attr) +{ + switch (partno) { + case JTAG_ID_PARTNO_J721E: + if (variant >= ARRAY_SIZE(j721e_rev_string_map)) + goto err_unknown_variant; + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%s", + j721e_rev_string_map[variant]); + break; + default: + variant++; + soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", + variant); + } + + if (!soc_dev_attr->revision) + return -ENOMEM; + + return 0; + +err_unknown_variant: + return -ENODEV; +} + static int k3_chipinfo_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -94,7 +136,6 @@ static int k3_chipinfo_probe(struct platform_device *pdev) variant = (jtag_id & CTRLMMR_WKUP_JTAGID_VARIANT_MASK) >> CTRLMMR_WKUP_JTAGID_VARIANT_SHIFT; - variant++; partno_id = (jtag_id & CTRLMMR_WKUP_JTAGID_PARTNO_MASK) >> CTRLMMR_WKUP_JTAGID_PARTNO_SHIFT; @@ -103,16 +144,16 @@ static int k3_chipinfo_probe(struct platform_device *pdev) if (!soc_dev_attr) return -ENOMEM; - soc_dev_attr->revision = kasprintf(GFP_KERNEL, "SR%x.0", variant); - if (!soc_dev_attr->revision) { - ret = -ENOMEM; + ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr); + if (ret) { + dev_err(dev, "Unknown SoC JTAGID[0x%08X]: %d\n", jtag_id, ret); goto err; } - ret = k3_chipinfo_partno_to_names(partno_id, soc_dev_attr); + ret = k3_chipinfo_variant_to_sr(partno_id, variant, soc_dev_attr); if (ret) { - dev_err(dev, "Unknown SoC JTAGID[0x%08X]: %d\n", jtag_id, ret); - goto err_free_rev; + dev_err(dev, "Unknown SoC SR[0x%08X]: %d\n", jtag_id, ret); + goto err; } node = of_find_node_by_path("/"); diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 86a048a10a13..253299e4214d 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -77,11 +77,26 @@ struct registered_event_data { static bool xlnx_is_error_event(const u32 node_id) { - if (node_id == EVENT_ERROR_PMC_ERR1 || - node_id == EVENT_ERROR_PMC_ERR2 || - node_id == EVENT_ERROR_PSM_ERR1 || - node_id == EVENT_ERROR_PSM_ERR2) - return true; + u32 pm_family_code, pm_sub_family_code; + + zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + + if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) { + if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 || + node_id == VERSAL_EVENT_ERROR_PMC_ERR2 || + node_id == VERSAL_EVENT_ERROR_PSM_ERR1 || + node_id == VERSAL_EVENT_ERROR_PSM_ERR2) + return true; + } else { + if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 || + node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 || + node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4) + return true; + } return false; } @@ -477,13 +492,13 @@ static void xlnx_call_notify_cb_handler(const u32 *payload) } } if (!is_callback_found) - pr_warn("Didn't find any registered callback for 0x%x 0x%x\n", + pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n", payload[1], payload[2]); } static void xlnx_get_event_callback_data(u32 *buf) { - zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } static irqreturn_t xlnx_event_handler(int irq, void *dev_id) @@ -555,7 +570,7 @@ static void xlnx_disable_percpu_irq(void *data) static int xlnx_event_init_sgi(struct platform_device *pdev) { int ret = 0; - int cpu = smp_processor_id(); + int cpu; /* * IRQ related structures are used for the following: * for each SGI interrupt ensure its mapped by GIC IRQ domain @@ -592,9 +607,12 @@ static int xlnx_event_init_sgi(struct platform_device *pdev) sgi_fwspec.param[0] = sgi_num; virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); + cpu = get_cpu(); per_cpu(cpu_number1, cpu) = cpu; ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", &cpu_number1); + put_cpu(); + WARN_ON(ret); if (ret) { irq_dispose_mapping(virq_sgi); @@ -653,7 +671,11 @@ static int xlnx_event_manager_probe(struct platform_device *pdev) ret = zynqmp_pm_register_sgi(sgi_num, 0); if (ret) { - dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); + if (ret == -EOPNOTSUPP) + dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n"); + else + dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret); + xlnx_event_cleanup_sgi(pdev); return ret; } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index c2c819701eec..965b1143936a 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -51,7 +51,7 @@ static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; static void zynqmp_pm_get_callback_data(u32 *buf) { - zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } static void suspend_event_callback(const u32 *payload, void *data) @@ -83,9 +83,11 @@ static irqreturn_t zynqmp_pm_isr(int irq, void *data) pm_suspend(PM_SUSPEND_MEM); break; default: - pr_err("%s Unsupported InitSuspendCb reason " - "code %d\n", __func__, payload[1]); + pr_err("%s Unsupported InitSuspendCb reason code %d\n", + __func__, payload[1]); } + } else { + pr_err("%s() Unsupported Callback %d\n", __func__, payload[0]); } return IRQ_HANDLED; @@ -252,8 +254,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev) dev_name(&pdev->dev), &pdev->dev); if (ret) { - dev_err(&pdev->dev, "devm_request_threaded_irq '%d' " - "failed with %d\n", irq, ret); + dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed with %d\n", + irq, ret); return ret; } } else { @@ -275,7 +277,7 @@ static int zynqmp_pm_probe(struct platform_device *pdev) return 0; } -static int zynqmp_pm_remove(struct platform_device *pdev) +static void zynqmp_pm_remove(struct platform_device *pdev) { sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); if (event_registered) @@ -283,8 +285,6 @@ static int zynqmp_pm_remove(struct platform_device *pdev) if (!rx_chan) mbox_free_channel(rx_chan); - - return 0; } static const struct of_device_id pm_of_match[] = { @@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(of, pm_of_match); static struct platform_driver zynqmp_pm_platform_driver = { .probe = zynqmp_pm_probe, - .remove = zynqmp_pm_remove, + .remove_new = zynqmp_pm_remove, .driver = { .name = "zynqmp_power", .of_match_table = pm_of_match, |