summaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig43
-rw-r--r--drivers/mailbox/Makefile8
-rw-r--r--drivers/mailbox/ast2700-mailbox.c235
-rw-r--r--drivers/mailbox/bcm74110-mailbox.c656
-rw-r--r--drivers/mailbox/cix-mailbox.c645
-rw-r--r--drivers/mailbox/cv1800-mailbox.c220
-rw-r--r--drivers/mailbox/imx-mailbox.c21
-rw-r--r--drivers/mailbox/mailbox-altera.c2
-rw-r--r--drivers/mailbox/mailbox.c199
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c61
-rw-r--r--drivers/mailbox/pcc.c102
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c16
-rw-r--r--drivers/mailbox/qcom-ipcc.c3
13 files changed, 2033 insertions, 178 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ed52db272f4d..02432d4a5ccd 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -36,6 +36,25 @@ config ARM_MHU_V3
that provides different means of transports: supported extensions
will be discovered and possibly managed at probe-time.
+config AST2700_MBOX
+ tristate "ASPEED AST2700 IPC driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ help
+ Mailbox driver implementation for ASPEED AST27XX SoCs. This driver
+ can be used to send message between different processors in SoC.
+ The driver provides mailbox support for sending interrupts to the
+ clients. Say Y here if you want to build this driver.
+
+config CV1800_MBOX
+ tristate "cv1800 mailbox"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ help
+ Mailbox driver implementation for Sophgo CV18XX SoCs. This driver
+ can be used to send message between different processors in SoC. Any
+ processer can write data in a channel, and set co-responding register
+ to raise interrupt to notice another processor, and it is allowed to
+ send data to itself.
+
config EXYNOS_MBOX
tristate "Exynos Mailbox"
depends on ARCH_EXYNOS || COMPILE_TEST
@@ -191,8 +210,8 @@ config POLARFIRE_SOC_MAILBOX
config MCHP_SBI_IPC_MBOX
tristate "Microchip Inter-processor Communication (IPC) SBI driver"
- depends on RISCV_SBI || COMPILE_TEST
- depends on ARCH_MICROCHIP
+ depends on RISCV_SBI
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
Mailbox implementation for Microchip devices with an
Inter-process communication (IPC) controller.
@@ -330,4 +349,24 @@ config THEAD_TH1520_MBOX
kernel is running, and E902 core used for power management among other
things.
+config CIX_MBOX
+ tristate "CIX Mailbox"
+ depends on ARCH_CIX || COMPILE_TEST
+ depends on OF
+ help
+ Mailbox implementation for CIX IPC system. The controller supports
+ 11 mailbox channels with different operating mode and every channel
+ is unidirectional. Say Y here if you want to use the CIX Mailbox
+ support.
+
+config BCM74110_MAILBOX
+ tristate "Brcmstb BCM74110 Mailbox"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ Broadcom STB mailbox driver present starting with brcmstb bcm74110
+ SoCs. The mailbox is a communication channel between the host
+ processor and coprocessor that handles various power management task
+ and more.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 9a1542b55539..98a68f838486 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -11,6 +11,10 @@ obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
obj-$(CONFIG_ARM_MHU_V3) += arm_mhuv3.o
+obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o
+
+obj-$(CONFIG_CV1800_MBOX) += cv1800-mailbox.o
+
obj-$(CONFIG_EXYNOS_MBOX) += exynos-mailbox.o
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
@@ -70,3 +74,7 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
+
+obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
+
+obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c
new file mode 100644
index 000000000000..83c6afe5411f
--- /dev/null
+++ b/drivers/mailbox/ast2700-mailbox.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Each bit in the register represents an IPC ID */
+#define IPCR_TX_TRIG 0x00
+#define IPCR_ENABLE 0x04
+#define IPCR_STATUS 0x08
+#define RX_IRQ(n) BIT(n)
+#define RX_IRQ_MASK 0xf
+#define IPCR_DATA 0x10
+
+struct ast2700_mbox_data {
+ u8 num_chans;
+ u8 msg_size;
+};
+
+struct ast2700_mbox {
+ struct mbox_controller mbox;
+ u8 msg_size;
+ void __iomem *tx_regs;
+ void __iomem *rx_regs;
+ spinlock_t lock;
+};
+
+static inline int ch_num(struct mbox_chan *chan)
+{
+ return chan - chan->mbox->chans;
+}
+
+static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx)
+{
+ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx));
+}
+
+static irqreturn_t ast2700_mbox_irq(int irq, void *p)
+{
+ struct ast2700_mbox *mb = p;
+ void __iomem *data_reg;
+ int num_words = mb->msg_size / sizeof(u32);
+ u32 *word_data;
+ u32 status;
+ int n, i;
+
+ /* Only examine channels that are currently enabled. */
+ status = readl(mb->rx_regs + IPCR_ENABLE) &
+ readl(mb->rx_regs + IPCR_STATUS);
+
+ if (!(status & RX_IRQ_MASK))
+ return IRQ_NONE;
+
+ for (n = 0; n < mb->mbox.num_chans; ++n) {
+ struct mbox_chan *chan = &mb->mbox.chans[n];
+
+ if (!(status & RX_IRQ(n)))
+ continue;
+
+ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n;
+ word_data = chan->con_priv;
+ /* Read the message data */
+ for (i = 0; i < num_words; i++)
+ word_data[i] = readl(data_reg + i * sizeof(u32));
+
+ mbox_chan_received_data(chan, chan->con_priv);
+
+ /* The IRQ can be cleared only once the FIFO is empty. */
+ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx;
+ u32 *word_data = data;
+ int num_words = mb->msg_size / sizeof(u32);
+ int i;
+
+ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx);
+ return -ENODEV;
+ }
+
+ if (!(ast2700_mbox_tx_done(mb, idx))) {
+ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx);
+ return -EBUSY;
+ }
+
+ /* Write the message data */
+ for (i = 0 ; i < num_words; i++)
+ writel(word_data[i], data_reg + i * sizeof(u32));
+
+ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG);
+ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx);
+
+ return 0;
+}
+
+static int ast2700_mbox_startup(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) | BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+
+ return 0;
+}
+
+static void ast2700_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+ void __iomem *reg = mb->rx_regs + IPCR_ENABLE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mb->lock, flags);
+ writel(readl(reg) & ~BIT(idx), reg);
+ spin_unlock_irqrestore(&mb->lock, flags);
+}
+
+static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev);
+ int idx = ch_num(chan);
+
+ return ast2700_mbox_tx_done(mb, idx);
+}
+
+static const struct mbox_chan_ops ast2700_mbox_chan_ops = {
+ .send_data = ast2700_mbox_send_data,
+ .startup = ast2700_mbox_startup,
+ .shutdown = ast2700_mbox_shutdown,
+ .last_tx_done = ast2700_mbox_last_tx_done,
+};
+
+static int ast2700_mbox_probe(struct platform_device *pdev)
+{
+ struct ast2700_mbox *mb;
+ const struct ast2700_mbox_data *dev_data;
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ dev_data = device_get_match_data(&pdev->dev);
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans,
+ sizeof(*mb->mbox.chans), GFP_KERNEL);
+ if (!mb->mbox.chans)
+ return -ENOMEM;
+
+ /* con_priv of each channel is used to store the message received */
+ for (int i = 0; i < dev_data->num_chans; i++) {
+ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size,
+ sizeof(u8), GFP_KERNEL);
+ if (!mb->mbox.chans[i].con_priv)
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx");
+ if (IS_ERR(mb->tx_regs))
+ return PTR_ERR(mb->tx_regs);
+
+ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx");
+ if (IS_ERR(mb->rx_regs))
+ return PTR_ERR(mb->rx_regs);
+
+ mb->msg_size = dev_data->msg_size;
+ mb->mbox.dev = dev;
+ mb->mbox.num_chans = dev_data->num_chans;
+ mb->mbox.ops = &ast2700_mbox_chan_ops;
+ mb->mbox.txdone_irq = false;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.txpoll_period = 5;
+ spin_lock_init(&mb->lock);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mb->mbox);
+}
+
+static const struct ast2700_mbox_data ast2700_dev_data = {
+ .num_chans = 4,
+ .msg_size = 0x20,
+};
+
+static const struct of_device_id ast2700_mbox_of_match[] = {
+ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match);
+
+static struct platform_driver ast2700_mbox_driver = {
+ .driver = {
+ .name = "ast2700-mailbox",
+ .of_match_table = ast2700_mbox_of_match,
+ },
+ .probe = ast2700_mbox_probe,
+};
+module_platform_driver(ast2700_mbox_driver);
+
+MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED AST2700 IPC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/bcm74110-mailbox.c b/drivers/mailbox/bcm74110-mailbox.c
new file mode 100644
index 000000000000..2e7e86f3e6a4
--- /dev/null
+++ b/drivers/mailbox/bcm74110-mailbox.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom BCM74110 Mailbox Driver
+ *
+ * Copyright (c) 2025 Broadcom
+ */
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/mailbox_controller.h>
+#include <linux/bitfield.h>
+#include <linux/slab.h>
+
+#define BCM_MBOX_BASE(sel) ((sel) * 0x40)
+#define BCM_MBOX_IRQ_BASE(sel) (((sel) * 0x20) + 0x800)
+
+#define BCM_MBOX_CFGA 0x0
+#define BCM_MBOX_CFGB 0x4
+#define BCM_MBOX_CFGC 0x8
+#define BCM_MBOX_CFGD 0xc
+#define BCM_MBOX_CTRL 0x10
+#define BCM_MBOX_CTRL_EN BIT(0)
+#define BCM_MBOX_CTRL_CLR BIT(1)
+#define BCM_MBOX_STATUS0 0x14
+#define BCM_MBOX_STATUS0_NOT_EMPTY BIT(28)
+#define BCM_MBOX_STATUS0_FULL BIT(29)
+#define BCM_MBOX_STATUS1 0x18
+#define BCM_MBOX_STATUS2 0x1c
+#define BCM_MBOX_WDATA 0x20
+#define BCM_MBOX_RDATA 0x28
+
+#define BCM_MBOX_IRQ_STATUS 0x0
+#define BCM_MBOX_IRQ_SET 0x4
+#define BCM_MBOX_IRQ_CLEAR 0x8
+#define BCM_MBOX_IRQ_MASK_STATUS 0xc
+#define BCM_MBOX_IRQ_MASK_SET 0x10
+#define BCM_MBOX_IRQ_MASK_CLEAR 0x14
+#define BCM_MBOX_IRQ_TIMEOUT BIT(0)
+#define BCM_MBOX_IRQ_NOT_EMPTY BIT(1)
+#define BCM_MBOX_IRQ_FULL BIT(2)
+#define BCM_MBOX_IRQ_LOW_WM BIT(3)
+#define BCM_MBOX_IRQ_HIGH_WM BIT(4)
+
+#define BCM_LINK_CODE0 0xbe0
+#define BCM_LINK_CODE1 0xbe1
+#define BCM_LINK_CODE2 0xbe2
+
+enum {
+ BCM_MSG_FUNC_LINK_START = 0,
+ BCM_MSG_FUNC_LINK_STOP,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ BCM_MSG_FUNC_MAX,
+};
+
+enum {
+ BCM_MSG_SVC_INIT = 0,
+ BCM_MSG_SVC_PMC,
+ BCM_MSG_SVC_SCMI,
+ BCM_MSG_SVC_DPFE,
+ BCM_MSG_SVC_MAX,
+};
+
+struct bcm74110_mbox_msg {
+ struct list_head list_entry;
+#define BCM_MSG_VERSION_MASK GENMASK(31, 29)
+#define BCM_MSG_VERSION 0x1
+#define BCM_MSG_REQ_MASK BIT(28)
+#define BCM_MSG_RPLY_MASK BIT(27)
+#define BCM_MSG_SVC_MASK GENMASK(26, 24)
+#define BCM_MSG_FUNC_MASK GENMASK(23, 16)
+#define BCM_MSG_LENGTH_MASK GENMASK(15, 4)
+#define BCM_MSG_SLOT_MASK GENMASK(3, 0)
+
+#define BCM_MSG_SET_FIELD(hdr, field, val) \
+ do { \
+ hdr &= ~BCM_MSG_##field##_MASK; \
+ hdr |= FIELD_PREP(BCM_MSG_##field##_MASK, val); \
+ } while (0)
+
+#define BCM_MSG_GET_FIELD(hdr, field) \
+ FIELD_GET(BCM_MSG_##field##_MASK, hdr)
+ u32 msg;
+};
+
+struct bcm74110_mbox_chan {
+ struct bcm74110_mbox *mbox;
+ bool en;
+ int slot;
+ int type;
+};
+
+struct bcm74110_mbox {
+ struct platform_device *pdev;
+ void __iomem *base;
+
+ int tx_chan;
+ int rx_chan;
+ int rx_irq;
+ struct list_head rx_svc_init_list;
+ spinlock_t rx_svc_list_lock;
+
+ struct mbox_controller controller;
+ struct bcm74110_mbox_chan *mbox_chan;
+};
+
+#define BCM74110_OFFSET_IO_WRITEL_MACRO(name, offset_base) \
+static void bcm74110_##name##_writel(struct bcm74110_mbox *mbox,\
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_WRITEL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_WRITEL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+#define BCM74110_OFFSET_IO_READL_MACRO(name, offset_base) \
+static u32 bcm74110_##name##_readl(struct bcm74110_mbox *mbox, \
+ u32 off) \
+{ \
+ return readl_relaxed(mbox->base + offset_base + off); \
+}
+BCM74110_OFFSET_IO_READL_MACRO(tx, BCM_MBOX_BASE(mbox->tx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(rx, BCM_MBOX_BASE(mbox->rx_chan));
+BCM74110_OFFSET_IO_READL_MACRO(irq, BCM_MBOX_IRQ_BASE(mbox->rx_chan));
+
+static inline struct bcm74110_mbox *bcm74110_mbox_from_cntrl(
+ struct mbox_controller *cntrl)
+{
+ return container_of(cntrl, struct bcm74110_mbox, controller);
+}
+
+static void bcm74110_rx_push_init_msg(struct bcm74110_mbox *mbox, u32 val)
+{
+ struct bcm74110_mbox_msg *msg;
+
+ msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ INIT_LIST_HEAD(&msg->list_entry);
+ msg->msg = val;
+
+ spin_lock(&mbox->rx_svc_list_lock);
+ list_add_tail(&msg->list_entry, &mbox->rx_svc_init_list);
+ spin_unlock(&mbox->rx_svc_list_lock);
+}
+
+static void bcm74110_rx_process_msg(struct bcm74110_mbox *mbox)
+{
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ struct mbox_chan *chan;
+ u32 msg, status;
+ int type;
+
+ do {
+ msg = bcm74110_rx_readl(mbox, BCM_MBOX_RDATA);
+ status = bcm74110_rx_readl(mbox, BCM_MBOX_STATUS0);
+
+ dev_dbg(dev, "rx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ type = BCM_MSG_GET_FIELD(msg, SVC);
+ switch (type) {
+ case BCM_MSG_SVC_INIT:
+ bcm74110_rx_push_init_msg(mbox, msg);
+ break;
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ chan = &mbox->controller.chans[type];
+ chan_priv = chan->con_priv;
+ if (chan_priv->en)
+ mbox_chan_received_data(chan, NULL);
+ else
+ dev_warn(dev, "Channel not enabled\n");
+ break;
+ default:
+ dev_warn(dev, "Unsupported msg received\n");
+ }
+ } while (status & BCM_MBOX_STATUS0_NOT_EMPTY);
+}
+
+static irqreturn_t bcm74110_mbox_isr(int irq, void *data)
+{
+ struct bcm74110_mbox *mbox = data;
+ u32 status;
+
+ status = bcm74110_irq_readl(mbox, BCM_MBOX_IRQ_STATUS);
+
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+
+ if (status & BCM_MBOX_IRQ_NOT_EMPTY)
+ bcm74110_rx_process_msg(mbox);
+ else
+ dev_warn(&mbox->pdev->dev, "Spurious interrupt\n");
+
+ return IRQ_HANDLED;
+}
+
+static void bcm74110_mbox_mask_and_clear(struct bcm74110_mbox *mbox)
+{
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_MASK_SET);
+ bcm74110_irq_writel(mbox, 0xffffffff, BCM_MBOX_IRQ_CLEAR);
+}
+
+static int bcm74110_rx_pop_init_msg(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_for_each_entry_safe(msg, msg_tmp, &mbox->rx_svc_init_list,
+ list_entry) {
+ if (BCM_MSG_GET_FIELD(msg->msg, FUNC) == func_type) {
+ list_del(&msg->list_entry);
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ if (!found)
+ return -EINVAL;
+
+ *val = msg->msg;
+ kfree(msg);
+
+ return 0;
+}
+
+static void bcm74110_rx_flush_msg(struct bcm74110_mbox *mbox)
+{
+ struct bcm74110_mbox_msg *msg, *msg_tmp;
+ LIST_HEAD(list_temp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->rx_svc_list_lock, flags);
+ list_splice_init(&mbox->rx_svc_init_list, &list_temp);
+ spin_unlock_irqrestore(&mbox->rx_svc_list_lock, flags);
+
+ list_for_each_entry_safe(msg, msg_tmp, &list_temp, list_entry) {
+ list_del(&msg->list_entry);
+ kfree(msg);
+ }
+}
+
+#define BCM_DEQUEUE_TIMEOUT_MS 30
+static int bcm74110_rx_pop_init_msg_block(struct bcm74110_mbox *mbox, u32 func_type,
+ u32 *val)
+{
+ int ret, timeout = 0;
+
+ do {
+ ret = bcm74110_rx_pop_init_msg(mbox, func_type, val);
+
+ if (!ret)
+ return 0;
+
+ /* TODO: Figure out what is a good sleep here. */
+ usleep_range(1000, 2000);
+ timeout++;
+ } while (timeout < BCM_DEQUEUE_TIMEOUT_MS);
+
+ dev_warn(&mbox->pdev->dev, "Timeout waiting for service init response\n");
+ return -ETIMEDOUT;
+}
+
+static int bcm74110_mbox_create_msg(int req, int rply, int svc, int func,
+ int length, int slot)
+{
+ u32 msg = 0;
+
+ BCM_MSG_SET_FIELD(msg, REQ, req);
+ BCM_MSG_SET_FIELD(msg, RPLY, rply);
+ BCM_MSG_SET_FIELD(msg, SVC, svc);
+ BCM_MSG_SET_FIELD(msg, FUNC, func);
+ BCM_MSG_SET_FIELD(msg, LENGTH, length);
+ BCM_MSG_SET_FIELD(msg, SLOT, slot);
+
+ return msg;
+}
+
+static int bcm74110_mbox_tx_msg(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int val;
+
+ /* We can potentially poll with timeout here instead */
+ val = bcm74110_tx_readl(mbox, BCM_MBOX_STATUS0);
+ if (val & BCM_MBOX_STATUS0_FULL) {
+ dev_err(&mbox->pdev->dev, "Mailbox full\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&mbox->pdev->dev, "tx: [{req=%lu|rply=%lu|srv=%lu|fn=%lu|length=%lu|slot=%lu]\n",
+ BCM_MSG_GET_FIELD(msg, REQ), BCM_MSG_GET_FIELD(msg, RPLY),
+ BCM_MSG_GET_FIELD(msg, SVC), BCM_MSG_GET_FIELD(msg, FUNC),
+ BCM_MSG_GET_FIELD(msg, LENGTH), BCM_MSG_GET_FIELD(msg, SLOT));
+
+ bcm74110_tx_writel(mbox, msg, BCM_MBOX_WDATA);
+
+ return 0;
+}
+
+#define BCM_MBOX_LINK_TRAINING_RETRIES 5
+static int bcm74110_mbox_link_training(struct bcm74110_mbox *mbox)
+{
+ int ret, retries = 0;
+ u32 msg = 0, orig_len = 0, len = BCM_LINK_CODE0;
+
+ do {
+ switch (len) {
+ case 0:
+ retries++;
+ dev_warn(&mbox->pdev->dev,
+ "Link train failed, trying again... %d\n",
+ retries);
+ if (retries > BCM_MBOX_LINK_TRAINING_RETRIES)
+ return -EINVAL;
+ len = BCM_LINK_CODE0;
+ fallthrough;
+ case BCM_LINK_CODE0:
+ case BCM_LINK_CODE1:
+ case BCM_LINK_CODE2:
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_START,
+ len, BCM_MSG_SVC_INIT);
+ break;
+ default:
+ break;
+ }
+
+ bcm74110_mbox_tx_msg(mbox, msg);
+
+ /* No response expected for LINK_CODE2 */
+ if (len == BCM_LINK_CODE2)
+ return 0;
+
+ orig_len = len;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox,
+ BCM_MSG_GET_FIELD(msg, FUNC),
+ &msg);
+ if (ret) {
+ len = 0;
+ continue;
+ }
+
+ if ((BCM_MSG_GET_FIELD(msg, SVC) != BCM_MSG_SVC_INIT) ||
+ (BCM_MSG_GET_FIELD(msg, FUNC) != BCM_MSG_FUNC_LINK_START) ||
+ (BCM_MSG_GET_FIELD(msg, SLOT) != 0) ||
+ (BCM_MSG_GET_FIELD(msg, RPLY) != 1) ||
+ (BCM_MSG_GET_FIELD(msg, REQ) != 0)) {
+ len = 0;
+ continue;
+ }
+
+ len = BCM_MSG_GET_FIELD(msg, LENGTH);
+
+ /* Make sure sequence is good */
+ if (len != (orig_len + 1)) {
+ len = 0;
+ continue;
+ }
+ } while (1);
+
+ return -EINVAL;
+}
+
+static int bcm74110_mbox_tx_msg_and_wait_ack(struct bcm74110_mbox *mbox, u32 msg)
+{
+ int ret;
+ u32 recv_msg;
+
+ ret = bcm74110_mbox_tx_msg(mbox, msg);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_rx_pop_init_msg_block(mbox, BCM_MSG_GET_FIELD(msg, FUNC),
+ &recv_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * Modify tx message to verify rx ack.
+ * Flip RPLY/REQ for synchronous messages
+ */
+ if (BCM_MSG_GET_FIELD(msg, REQ) == 1) {
+ BCM_MSG_SET_FIELD(msg, RPLY, 1);
+ BCM_MSG_SET_FIELD(msg, REQ, 0);
+ }
+
+ if (msg != recv_msg) {
+ dev_err(&mbox->pdev->dev, "Found ack, but ack is invalid\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Each index points to 0x100 of HAB MEM. IDX size counts from 0 */
+#define BCM_MBOX_HAB_MEM_IDX_START 0x30
+#define BCM_MBOX_HAB_MEM_IDX_SIZE 0x0
+static int bcm74110_mbox_shmem_init(struct bcm74110_mbox *mbox)
+{
+ u32 msg = 0;
+ int ret;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_STOP,
+ 0, BCM_MSG_SVC_INIT);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_TX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_SHMEM_RX,
+ BCM_MBOX_HAB_MEM_IDX_START,
+ BCM_MBOX_HAB_MEM_IDX_SIZE);
+ ret = bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bcm74110_mbox_init(struct bcm74110_mbox *mbox)
+{
+ int ret = 0;
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Clear status & restart tx/rx*/
+ bcm74110_tx_writel(mbox, BCM_MBOX_CTRL_EN | BCM_MBOX_CTRL_CLR,
+ BCM_MBOX_CTRL);
+
+ /* Unmask irq */
+ bcm74110_irq_writel(mbox, BCM_MBOX_IRQ_NOT_EMPTY, BCM_MBOX_IRQ_MASK_CLEAR);
+
+ ret = bcm74110_mbox_link_training(mbox);
+ if (ret) {
+ dev_err(&mbox->pdev->dev, "Training failed\n");
+ return ret;
+ }
+
+ return bcm74110_mbox_shmem_init(mbox);
+}
+
+static int bcm74110_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+ u32 msg;
+
+ switch (chan_priv->type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ msg = bcm74110_mbox_create_msg(1, 0, chan_priv->type, 0,
+ 128 + 28, chan_priv->slot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bcm74110_mbox_tx_msg(chan_priv->mbox, msg);
+}
+
+static int bcm74110_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = true;
+
+ return 0;
+}
+
+static void bcm74110_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct bcm74110_mbox_chan *chan_priv = chan->con_priv;
+
+ chan_priv->en = false;
+}
+
+static const struct mbox_chan_ops bcm74110_mbox_chan_ops = {
+ .send_data = bcm74110_mbox_send_data,
+ .startup = bcm74110_mbox_chan_startup,
+ .shutdown = bcm74110_mbox_chan_shutdown,
+};
+
+static void bcm74110_mbox_shutdown(struct platform_device *pdev)
+{
+ struct bcm74110_mbox *mbox = dev_get_drvdata(&pdev->dev);
+ u32 msg;
+
+ msg = bcm74110_mbox_create_msg(1, 0, BCM_MSG_SVC_INIT,
+ BCM_MSG_FUNC_LINK_STOP,
+ 0, 0);
+
+ bcm74110_mbox_tx_msg_and_wait_ack(mbox, msg);
+
+ /* Even if we don't receive ACK, lets shut it down */
+
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ /* Disable queues tx/rx */
+ bcm74110_tx_writel(mbox, 0x0, BCM_MBOX_CTRL);
+
+ /* Flush queues */
+ bcm74110_rx_flush_msg(mbox);
+}
+
+static struct mbox_chan *bcm74110_mbox_of_xlate(struct mbox_controller *cntrl,
+ const struct of_phandle_args *p)
+{
+ struct bcm74110_mbox *mbox = bcm74110_mbox_from_cntrl(cntrl);
+ struct device *dev = &mbox->pdev->dev;
+ struct bcm74110_mbox_chan *chan_priv;
+ int slot, type;
+
+ if (p->args_count != 2) {
+ dev_err(dev, "Invalid arguments\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = p->args[0];
+ slot = p->args[1];
+
+ switch (type) {
+ case BCM_MSG_SVC_PMC:
+ case BCM_MSG_SVC_SCMI:
+ case BCM_MSG_SVC_DPFE:
+ if (slot > BCM_MBOX_HAB_MEM_IDX_SIZE) {
+ dev_err(dev, "Not enough shared memory\n");
+ return ERR_PTR(-EINVAL);
+ }
+ chan_priv = cntrl->chans[type].con_priv;
+ chan_priv->slot = slot;
+ chan_priv->type = type;
+ break;
+ default:
+ dev_err(dev, "Invalid channel type: %d\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cntrl->chans[type];
+}
+
+static int bcm74110_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm74110_mbox *mbox;
+ int i, ret;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ mbox->pdev = pdev;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->base))
+ return dev_err_probe(dev, PTR_ERR(mbox->base), "Failed to iomap\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,tx", &mbox->tx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find tx channel\n");
+
+ ret = of_property_read_u32(dev->of_node, "brcm,rx", &mbox->rx_chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find rx channel\n");
+
+ mbox->rx_irq = platform_get_irq(pdev, 0);
+ if (mbox->rx_irq < 0)
+ return mbox->rx_irq;
+
+ INIT_LIST_HEAD(&mbox->rx_svc_init_list);
+ spin_lock_init(&mbox->rx_svc_list_lock);
+ bcm74110_mbox_mask_and_clear(mbox);
+
+ ret = devm_request_irq(dev, mbox->rx_irq, bcm74110_mbox_isr,
+ IRQF_NO_SUSPEND, pdev->name, mbox);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+
+ mbox->controller.ops = &bcm74110_mbox_chan_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = BCM_MSG_SVC_MAX;
+ mbox->controller.of_xlate = &bcm74110_mbox_of_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+
+ mbox->mbox_chan = devm_kcalloc(dev, BCM_MSG_SVC_MAX,
+ sizeof(*mbox->mbox_chan),
+ GFP_KERNEL);
+ if (!mbox->mbox_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < BCM_MSG_SVC_MAX; i++) {
+ mbox->mbox_chan[i].mbox = mbox;
+ mbox->controller.chans[i].con_priv = &mbox->mbox_chan[i];
+ }
+
+ ret = devm_mbox_controller_register(dev, &mbox->controller);
+ if (ret)
+ return ret;
+
+ ret = bcm74110_mbox_init(mbox);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id bcm74110_mbox_of_match[] = {
+ { .compatible = "brcm,bcm74110-mbox", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm74110_mbox_of_match);
+
+static struct platform_driver bcm74110_mbox_driver = {
+ .driver = {
+ .name = "bcm74110-mbox",
+ .of_match_table = bcm74110_mbox_of_match,
+ },
+ .probe = bcm74110_mbox_probe,
+ .shutdown = bcm74110_mbox_shutdown,
+};
+module_platform_driver(bcm74110_mbox_driver);
+
+MODULE_AUTHOR("Justin Chen <justin.chen@broadcom.com>");
+MODULE_DESCRIPTION("BCM74110 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cix-mailbox.c b/drivers/mailbox/cix-mailbox.c
new file mode 100644
index 000000000000..5bb1416c26a5
--- /dev/null
+++ b/drivers/mailbox/cix-mailbox.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Cix Technology Group Co., Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mailbox.h"
+
+/*
+ * The maximum transmission size is 32 words or 128 bytes.
+ */
+#define CIX_MBOX_MSG_WORDS 32 /* Max length = 32 words */
+#define CIX_MBOX_MSG_LEN_MASK 0x7fL /* Max length = 128 bytes */
+
+/* [0~7] Fast channel
+ * [8] doorbell base channel
+ * [9]fifo base channel
+ * [10] register base channel
+ */
+#define CIX_MBOX_FAST_IDX 7
+#define CIX_MBOX_DB_IDX 8
+#define CIX_MBOX_FIFO_IDX 9
+#define CIX_MBOX_REG_IDX 10
+#define CIX_MBOX_CHANS 11
+
+/* Register define */
+#define CIX_REG_MSG(n) (0x0 + 0x4*(n)) /* 0x0~0x7c */
+#define CIX_REG_DB_ACK CIX_REG_MSG(CIX_MBOX_MSG_WORDS) /* 0x80 */
+#define CIX_ERR_COMP (CIX_REG_DB_ACK + 0x4) /* 0x84 */
+#define CIX_ERR_COMP_CLR (CIX_REG_DB_ACK + 0x8) /* 0x88 */
+#define CIX_REG_F_INT(IDX) (CIX_ERR_COMP_CLR + 0x4*(IDX+1)) /* 0x8c~0xa8 */
+#define CIX_FIFO_WR (CIX_REG_F_INT(CIX_MBOX_FAST_IDX+1)) /* 0xac */
+#define CIX_FIFO_RD (CIX_FIFO_WR + 0x4) /* 0xb0 */
+#define CIX_FIFO_STAS (CIX_FIFO_WR + 0x8) /* 0xb4 */
+#define CIX_FIFO_WM (CIX_FIFO_WR + 0xc) /* 0xb8 */
+#define CIX_INT_ENABLE (CIX_FIFO_WR + 0x10) /* 0xbc */
+#define CIX_INT_ENABLE_SIDE_B (CIX_FIFO_WR + 0x14) /* 0xc0 */
+#define CIX_INT_CLEAR (CIX_FIFO_WR + 0x18) /* 0xc4 */
+#define CIX_INT_STATUS (CIX_FIFO_WR + 0x1c) /* 0xc8 */
+#define CIX_FIFO_RST (CIX_FIFO_WR + 0x20) /* 0xcc */
+
+#define CIX_MBOX_TX 0
+#define CIX_MBOX_RX 1
+
+#define CIX_DB_INT_BIT BIT(0)
+#define CIX_DB_ACK_INT_BIT BIT(1)
+
+#define CIX_FIFO_WM_DEFAULT CIX_MBOX_MSG_WORDS
+#define CIX_FIFO_STAS_WMK BIT(0)
+#define CIX_FIFO_STAS_FULL BIT(1)
+#define CIX_FIFO_STAS_EMPTY BIT(2)
+#define CIX_FIFO_STAS_UFLOW BIT(3)
+#define CIX_FIFO_STAS_OFLOW BIT(4)
+
+#define CIX_FIFO_RST_BIT BIT(0)
+
+#define CIX_DB_INT BIT(0)
+#define CIX_ACK_INT BIT(1)
+#define CIX_FIFO_FULL_INT BIT(2)
+#define CIX_FIFO_EMPTY_INT BIT(3)
+#define CIX_FIFO_WM01_INT BIT(4)
+#define CIX_FIFO_WM10_INT BIT(5)
+#define CIX_FIFO_OFLOW_INT BIT(6)
+#define CIX_FIFO_UFLOW_INT BIT(7)
+#define CIX_FIFO_N_EMPTY_INT BIT(8)
+#define CIX_FAST_CH_INT(IDX) BIT((IDX)+9)
+
+#define CIX_SHMEM_OFFSET 0x80
+
+enum cix_mbox_chan_type {
+ CIX_MBOX_TYPE_DB,
+ CIX_MBOX_TYPE_REG,
+ CIX_MBOX_TYPE_FIFO,
+ CIX_MBOX_TYPE_FAST,
+};
+
+struct cix_mbox_con_priv {
+ enum cix_mbox_chan_type type;
+ struct mbox_chan *chan;
+ int index;
+};
+
+struct cix_mbox_priv {
+ struct device *dev;
+ int irq;
+ int dir;
+ void __iomem *base; /* region for mailbox */
+ struct cix_mbox_con_priv con_priv[CIX_MBOX_CHANS];
+ struct mbox_chan mbox_chans[CIX_MBOX_CHANS];
+ struct mbox_controller mbox;
+ bool use_shmem;
+};
+
+/*
+ * The CIX mailbox supports four types of transfers:
+ * CIX_MBOX_TYPE_DB, CIX_MBOX_TYPE_FAST, CIX_MBOX_TYPE_REG, and CIX_MBOX_TYPE_FIFO.
+ * For the REG and FIFO types of transfers, the message format is as follows:
+ */
+union cix_mbox_msg_reg_fifo {
+ u32 length; /* unit is byte */
+ u32 buf[CIX_MBOX_MSG_WORDS]; /* buf[0] must be the byte length of this array */
+};
+
+static struct cix_mbox_priv *to_cix_mbox_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct cix_mbox_priv, mbox);
+}
+
+static void cix_mbox_write(struct cix_mbox_priv *priv, u32 val, u32 offset)
+{
+ if (priv->use_shmem)
+ iowrite32(val, priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ iowrite32(val, priv->base + offset);
+}
+
+static u32 cix_mbox_read(struct cix_mbox_priv *priv, u32 offset)
+{
+ if (priv->use_shmem)
+ return ioread32(priv->base + offset - CIX_SHMEM_OFFSET);
+ else
+ return ioread32(priv->base + offset);
+}
+
+static bool mbox_fifo_empty(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ return ((cix_mbox_read(priv, CIX_FIFO_STAS) & CIX_FIFO_STAS_EMPTY) ? true : false);
+}
+
+/*
+ *The transmission unit of the CIX mailbox is word.
+ *The byte length should be converted into the word length.
+ */
+static inline u32 mbox_get_msg_size(void *msg)
+{
+ u32 len;
+
+ len = ((u32 *)msg)[0] & CIX_MBOX_MSG_LEN_MASK;
+ return DIV_ROUND_UP(len, 4);
+}
+
+static int cix_mbox_send_data_db(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_reg(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_REG_MSG(i));
+
+ /* trigger doorbell irq */
+ cix_mbox_write(priv, CIX_DB_INT_BIT, CIX_REG_DB_ACK);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fifo(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ union cix_mbox_msg_reg_fifo *msg = data;
+ u32 len, val, i;
+
+ if (!data)
+ return -EINVAL;
+
+ len = mbox_get_msg_size(data);
+ cix_mbox_write(priv, len, CIX_FIFO_WM);
+ for (i = 0; i < len; i++)
+ cix_mbox_write(priv, msg->buf[i], CIX_FIFO_WR);
+
+ /* Enable fifo empty interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+
+ return 0;
+}
+
+static int cix_mbox_send_data_fast(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 *arg = (u32 *)data;
+ int index = cp->index;
+
+ if (!data)
+ return -EINVAL;
+
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid Mbox index %d\n", index);
+ return -EINVAL;
+ }
+
+ cix_mbox_write(priv, arg[0], CIX_REG_F_INT(index));
+
+ return 0;
+}
+
+static int cix_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ if (priv->dir != CIX_MBOX_TX) {
+ dev_err(priv->dev, "Invalid Mbox dir %d\n", priv->dir);
+ return -EINVAL;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_send_data_db(chan, data);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_send_data_reg(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_send_data_fifo(chan, data);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_send_data_fast(chan, data);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void cix_mbox_isr_db(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_received_data(chan, NULL);
+ }
+ }
+}
+
+static void cix_mbox_isr_reg(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* rx interrupt is triggered */
+ if (int_status & CIX_DB_INT) {
+ u32 data[CIX_MBOX_MSG_WORDS], len, i;
+
+ cix_mbox_write(priv, CIX_DB_INT, CIX_INT_CLEAR);
+ data[0] = cix_mbox_read(priv, CIX_REG_MSG(0));
+ len = mbox_get_msg_size(data);
+ for (i = 1; i < len; i++)
+ data[i] = cix_mbox_read(priv, CIX_REG_MSG(i));
+
+ /* trigger ack interrupt */
+ cix_mbox_write(priv, CIX_DB_ACK_INT_BIT, CIX_REG_DB_ACK);
+ mbox_chan_received_data(chan, data);
+ }
+ } else {
+ /* tx ack interrupt is triggered */
+ if (int_status & CIX_ACK_INT) {
+ cix_mbox_write(priv, CIX_ACK_INT, CIX_INT_CLEAR);
+ mbox_chan_txdone(chan, 0);
+ }
+ }
+}
+
+static void cix_mbox_isr_fifo(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ u32 int_status, status;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (priv->dir == CIX_MBOX_RX) {
+ /* FIFO waterMark interrupt is generated */
+ if (int_status & (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT)) {
+ u32 data[CIX_MBOX_MSG_WORDS] = { 0 }, i = 0;
+
+ cix_mbox_write(priv, (CIX_FIFO_FULL_INT | CIX_FIFO_WM01_INT),
+ CIX_INT_CLEAR);
+ do {
+ data[i++] = cix_mbox_read(priv, CIX_FIFO_RD);
+ } while (!mbox_fifo_empty(chan) && i < CIX_MBOX_MSG_WORDS);
+
+ mbox_chan_received_data(chan, data);
+ }
+ /* FIFO underflow is generated */
+ if (int_status & CIX_FIFO_UFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo underflow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_UFLOW_INT, CIX_INT_CLEAR);
+ }
+ } else {
+ /* FIFO empty interrupt is generated */
+ if (int_status & CIX_FIFO_EMPTY_INT) {
+ u32 val;
+
+ cix_mbox_write(priv, CIX_FIFO_EMPTY_INT, CIX_INT_CLEAR);
+ /* Disable empty irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_FIFO_EMPTY_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ mbox_chan_txdone(chan, 0);
+ }
+ /* FIFO overflow is generated */
+ if (int_status & CIX_FIFO_OFLOW_INT) {
+ status = cix_mbox_read(priv, CIX_FIFO_STAS);
+ dev_err(priv->dev, "fifo overlow: int_stats %d\n", status);
+ cix_mbox_write(priv, CIX_FIFO_OFLOW_INT, CIX_INT_CLEAR);
+ }
+ }
+}
+
+static void cix_mbox_isr_fast(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ u32 int_status, data;
+
+ /* no irq will be trigger for TX dir mbox */
+ if (priv->dir != CIX_MBOX_RX)
+ return;
+
+ int_status = cix_mbox_read(priv, CIX_INT_STATUS);
+
+ if (int_status & CIX_FAST_CH_INT(cp->index)) {
+ cix_mbox_write(priv, CIX_FAST_CH_INT(cp->index), CIX_INT_CLEAR);
+ data = cix_mbox_read(priv, CIX_REG_F_INT(cp->index));
+ mbox_chan_received_data(chan, &data);
+ }
+}
+
+static irqreturn_t cix_mbox_isr(int irq, void *arg)
+{
+ struct mbox_chan *chan = arg;
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ cix_mbox_isr_db(chan);
+ break;
+ case CIX_MBOX_TYPE_REG:
+ cix_mbox_isr_reg(chan);
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ cix_mbox_isr_fifo(chan);
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ cix_mbox_isr_fast(chan);
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int cix_mbox_startup(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index, ret;
+ u32 val;
+
+ ret = request_irq(priv->irq, cix_mbox_isr, 0,
+ dev_name(priv->dev), chan);
+ if (ret) {
+ dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq);
+ return ret;
+ }
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ /* Overwrite txdone_method for DB channel */
+ chan->txdone_method = TXDONE_BY_ACK;
+ fallthrough;
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ /* reset fifo */
+ cix_mbox_write(priv, CIX_FIFO_RST_BIT, CIX_FIFO_RST);
+ /* set default watermark */
+ cix_mbox_write(priv, CIX_FIFO_WM_DEFAULT, CIX_FIFO_WM);
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Enable fifo overflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val |= CIX_FIFO_OFLOW_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else {
+ /* Enable fifo full/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FIFO_UFLOW_INT|CIX_FIFO_WM01_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ /* Only RX channel has intterupt */
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ ret = -EINVAL;
+ goto failed;
+ }
+ /* enable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val |= CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ ret = -EINVAL;
+ goto failed;
+ }
+ return 0;
+
+failed:
+ free_irq(priv->irq, chan);
+ return ret;
+}
+
+static void cix_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct cix_mbox_priv *priv = to_cix_mbox_priv(chan->mbox);
+ struct cix_mbox_con_priv *cp = chan->con_priv;
+ int index = cp->index;
+ u32 val;
+
+ switch (cp->type) {
+ case CIX_MBOX_TYPE_DB:
+ case CIX_MBOX_TYPE_REG:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable ACK interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~CIX_ACK_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable Doorbell interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_DB_INT;
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FIFO:
+ if (priv->dir == CIX_MBOX_TX) {
+ /* Disable empty/fifo overflow irq*/
+ val = cix_mbox_read(priv, CIX_INT_ENABLE);
+ val &= ~(CIX_FIFO_EMPTY_INT | CIX_FIFO_OFLOW_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE);
+ } else if (priv->dir == CIX_MBOX_RX) {
+ /* Disable fifo WM01/underflow interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~(CIX_FIFO_UFLOW_INT | CIX_FIFO_WM01_INT);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+ case CIX_MBOX_TYPE_FAST:
+ if (priv->dir == CIX_MBOX_RX) {
+ if (index < 0 || index > CIX_MBOX_FAST_IDX) {
+ dev_err(priv->dev, "Invalid index %d\n", index);
+ break;
+ }
+ /* Disable fast channel interrupt */
+ val = cix_mbox_read(priv, CIX_INT_ENABLE_SIDE_B);
+ val &= ~CIX_FAST_CH_INT(index);
+ cix_mbox_write(priv, val, CIX_INT_ENABLE_SIDE_B);
+ }
+ break;
+
+ default:
+ dev_err(priv->dev, "Invalid channel type: %d\n", cp->type);
+ break;
+ }
+
+ free_irq(priv->irq, chan);
+}
+
+static const struct mbox_chan_ops cix_mbox_chan_ops = {
+ .send_data = cix_mbox_send_data,
+ .startup = cix_mbox_startup,
+ .shutdown = cix_mbox_shutdown,
+};
+
+static void cix_mbox_init(struct cix_mbox_priv *priv)
+{
+ struct cix_mbox_con_priv *cp;
+ int i;
+
+ for (i = 0; i < CIX_MBOX_CHANS; i++) {
+ cp = &priv->con_priv[i];
+ cp->index = i;
+ cp->chan = &priv->mbox_chans[i];
+ priv->mbox_chans[i].con_priv = cp;
+ if (cp->index <= CIX_MBOX_FAST_IDX)
+ cp->type = CIX_MBOX_TYPE_FAST;
+ if (cp->index == CIX_MBOX_DB_IDX)
+ cp->type = CIX_MBOX_TYPE_DB;
+ if (cp->index == CIX_MBOX_FIFO_IDX)
+ cp->type = CIX_MBOX_TYPE_FIFO;
+ if (cp->index == CIX_MBOX_REG_IDX)
+ cp->type = CIX_MBOX_TYPE_REG;
+ }
+}
+
+static int cix_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cix_mbox_priv *priv;
+ struct resource *res;
+ const char *dir_str;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /*
+ * The first 0x80 bytes of the register space of the cix mailbox controller
+ * can be used as shared memory for clients. When this shared memory is in
+ * use, the base address of the mailbox is offset by 0x80. Therefore, when
+ * performing subsequent read/write operations, it is necessary to subtract
+ * the offset CIX_SHMEM_OFFSET.
+ *
+ * When the base address of the mailbox is offset by 0x80, it indicates
+ * that shmem is in use.
+ */
+ priv->use_shmem = !!(res->start & CIX_SHMEM_OFFSET);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ if (device_property_read_string(dev, "cix,mbox-dir", &dir_str)) {
+ dev_err(priv->dev, "cix,mbox_dir property not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(dir_str, "tx"))
+ priv->dir = 0;
+ else if (!strcmp(dir_str, "rx"))
+ priv->dir = 1;
+ else {
+ dev_err(priv->dev, "cix,mbox_dir=%s is not expected\n", dir_str);
+ return -EINVAL;
+ }
+
+ cix_mbox_init(priv);
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &cix_mbox_chan_ops;
+ priv->mbox.chans = priv->mbox_chans;
+ priv->mbox.txdone_irq = true;
+ priv->mbox.num_chans = CIX_MBOX_CHANS;
+ priv->mbox.of_xlate = NULL;
+
+ platform_set_drvdata(pdev, priv);
+ ret = devm_mbox_controller_register(dev, &priv->mbox);
+ if (ret)
+ dev_err(dev, "Failed to register mailbox %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id cix_mbox_dt_ids[] = {
+ { .compatible = "cix,sky1-mbox" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cix_mbox_dt_ids);
+
+static struct platform_driver cix_mbox_driver = {
+ .probe = cix_mbox_probe,
+ .driver = {
+ .name = "cix_mbox",
+ .of_match_table = cix_mbox_dt_ids,
+ },
+};
+
+static int __init cix_mailbox_init(void)
+{
+ return platform_driver_register(&cix_mbox_driver);
+}
+arch_initcall(cix_mailbox_init);
+
+MODULE_AUTHOR("Cix Technology Group Co., Ltd.");
+MODULE_DESCRIPTION("CIX mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/cv1800-mailbox.c b/drivers/mailbox/cv1800-mailbox.c
new file mode 100644
index 000000000000..4761191acf78
--- /dev/null
+++ b/drivers/mailbox/cv1800-mailbox.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Sophgo Technology Inc.
+ * Copyright (C) 2024 Yuntao Dai <d1581209858@live.com>
+ * Copyright (C) 2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kfifo.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define RECV_CPU 1
+
+#define MAILBOX_MAX_CHAN 8
+#define MAILBOX_MSG_LEN 8
+
+#define MBOX_EN_REG(cpu) (cpu << 2)
+#define MBOX_DONE_REG(cpu) ((cpu << 2) + 2)
+#define MBOX_SET_CLR_REG(cpu) (0x10 + (cpu << 4))
+#define MBOX_SET_INT_REG(cpu) (0x18 + (cpu << 4))
+#define MBOX_SET_REG 0x60
+
+#define MAILBOX_CONTEXT_OFFSET 0x0400
+#define MAILBOX_CONTEXT_SIZE 0x0040
+
+#define MBOX_CONTEXT_BASE_INDEX(base, index) \
+ ((u64 __iomem *)(base + MAILBOX_CONTEXT_OFFSET) + index)
+
+/**
+ * struct cv1800_mbox_chan_priv - cv1800 mailbox channel private data
+ * @idx: index of channel
+ * @cpu: send to which processor
+ */
+struct cv1800_mbox_chan_priv {
+ int idx;
+ int cpu;
+};
+
+struct cv1800_mbox {
+ struct mbox_controller mbox;
+ struct cv1800_mbox_chan_priv priv[MAILBOX_MAX_CHAN];
+ struct mbox_chan chans[MAILBOX_MAX_CHAN];
+ u64 __iomem *content[MAILBOX_MAX_CHAN];
+ void __iomem *mbox_base;
+ int recvid;
+};
+
+static irqreturn_t cv1800_mbox_isr(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ size_t i;
+ u64 msg;
+ int ret = IRQ_NONE;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ if (mbox->content[i] && mbox->chans[i].cl) {
+ memcpy_fromio(&msg, mbox->content[i], MAILBOX_MSG_LEN);
+ mbox->content[i] = NULL;
+ mbox_chan_received_data(&mbox->chans[i], (void *)&msg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static irqreturn_t cv1800_mbox_irq(int irq, void *dev_id)
+{
+ struct cv1800_mbox *mbox = (struct cv1800_mbox *)dev_id;
+ u8 set, valid;
+ size_t i;
+ int ret = IRQ_NONE;
+
+ set = readb(mbox->mbox_base + MBOX_SET_INT_REG(RECV_CPU));
+
+ if (!set)
+ return ret;
+
+ for (i = 0; i < MAILBOX_MAX_CHAN; i++) {
+ valid = set & BIT(i);
+ if (valid) {
+ mbox->content[i] =
+ MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, i);
+ writeb(valid, mbox->mbox_base +
+ MBOX_SET_CLR_REG(RECV_CPU));
+ writeb(~valid, mbox->mbox_base + MBOX_EN_REG(RECV_CPU));
+ ret = IRQ_WAKE_THREAD;
+ }
+ }
+
+ return ret;
+}
+
+static int cv1800_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ int idx = priv->idx;
+ int cpu = priv->cpu;
+ u8 en, valid;
+
+ memcpy_toio(MBOX_CONTEXT_BASE_INDEX(mbox->mbox_base, idx),
+ data, MAILBOX_MSG_LEN);
+
+ valid = BIT(idx);
+ writeb(valid, mbox->mbox_base + MBOX_SET_CLR_REG(cpu));
+ en = readb(mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(en | valid, mbox->mbox_base + MBOX_EN_REG(cpu));
+ writeb(valid, mbox->mbox_base + MBOX_SET_REG);
+
+ return 0;
+}
+
+static bool cv1800_last_tx_done(struct mbox_chan *chan)
+{
+ struct cv1800_mbox_chan_priv *priv =
+ (struct cv1800_mbox_chan_priv *)chan->con_priv;
+ struct cv1800_mbox *mbox = dev_get_drvdata(chan->mbox->dev);
+ u8 en;
+
+ en = readb(mbox->mbox_base + MBOX_EN_REG(priv->cpu));
+
+ return !(en & BIT(priv->idx));
+}
+
+static const struct mbox_chan_ops cv1800_mbox_chan_ops = {
+ .send_data = cv1800_mbox_send_data,
+ .last_tx_done = cv1800_last_tx_done,
+};
+
+static struct mbox_chan *cv1800_mbox_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *spec)
+{
+ struct cv1800_mbox_chan_priv *priv;
+
+ int idx = spec->args[0];
+ int cpu = spec->args[1];
+
+ if (idx >= mbox->num_chans)
+ return ERR_PTR(-EINVAL);
+
+ priv = mbox->chans[idx].con_priv;
+ priv->cpu = cpu;
+
+ return &mbox->chans[idx];
+}
+
+static const struct of_device_id cv1800_mbox_of_match[] = {
+ { .compatible = "sophgo,cv1800b-mailbox", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cv1800_mbox_of_match);
+
+static int cv1800_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cv1800_mbox *mb;
+ int irq, idx, err;
+
+ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->mbox_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mb->mbox_base))
+ return dev_err_probe(dev, PTR_ERR(mb->mbox_base),
+ "Failed to map resource\n");
+
+ mb->mbox.dev = dev;
+ mb->mbox.chans = mb->chans;
+ mb->mbox.txdone_poll = true;
+ mb->mbox.ops = &cv1800_mbox_chan_ops;
+ mb->mbox.num_chans = MAILBOX_MAX_CHAN;
+ mb->mbox.of_xlate = cv1800_mbox_xlate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_threaded_irq(dev, irq, cv1800_mbox_irq,
+ cv1800_mbox_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), mb);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to register irq\n");
+
+ for (idx = 0; idx < MAILBOX_MAX_CHAN; idx++) {
+ mb->priv[idx].idx = idx;
+ mb->mbox.chans[idx].con_priv = &mb->priv[idx];
+ }
+
+ platform_set_drvdata(pdev, mb);
+
+ err = devm_mbox_controller_register(dev, &mb->mbox);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register mailbox\n");
+
+ return 0;
+}
+
+static struct platform_driver cv1800_mbox_driver = {
+ .driver = {
+ .name = "cv1800-mbox",
+ .of_match_table = cv1800_mbox_of_match,
+ },
+ .probe = cv1800_mbox_probe,
+};
+
+module_platform_driver(cv1800_mbox_driver);
+
+MODULE_DESCRIPTION("cv1800 mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 6ef8338add0d..6778afc64a04 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -226,7 +226,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
{
u32 *arg = data;
u32 val;
- int ret;
+ int ret, count;
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -240,11 +240,20 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
case IMX_MU_TYPE_TXDB_V2:
imx_mu_write(priv, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx),
priv->dcfg->xCR[IMX_MU_GCR]);
- ret = readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
- !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
- 0, 1000);
- if (ret)
- dev_warn_ratelimited(priv->dev, "channel type: %d failure\n", cp->type);
+ ret = -ETIMEDOUT;
+ count = 0;
+ while (ret && (count < 10)) {
+ ret =
+ readl_poll_timeout(priv->base + priv->dcfg->xCR[IMX_MU_GCR], val,
+ !(val & IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx)),
+ 0, 10000);
+
+ if (ret) {
+ dev_warn_ratelimited(priv->dev,
+ "channel type: %d timeout, %d times, retry\n",
+ cp->type, ++count);
+ }
+ }
break;
default:
dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index 748128661892..17278c2571d3 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -130,7 +130,7 @@ static void altera_mbox_rx_data(struct mbox_chan *chan)
static void altera_mbox_poll_rx(struct timer_list *t)
{
- struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer);
+ struct altera_mbox *mbox = timer_container_of(mbox, t, rxpoll_timer);
altera_mbox_rx_data(mbox->chan);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 0593b4d03685..5cd8ae222073 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -6,6 +6,7 @@
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -24,15 +25,12 @@ static DEFINE_MUTEX(con_mutex);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ guard(spinlock_irqsave)(&chan->lock);
/* See if there is any space left */
- if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN)
return -ENOBUFS;
- }
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
@@ -43,60 +41,53 @@ static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
else
chan->msg_free++;
- spin_unlock_irqrestore(&chan->lock, flags);
-
return idx;
}
static void msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
- unsigned long flags;
void *data;
int err = -EBUSY;
- spin_lock_irqsave(&chan->lock, flags);
-
- if (!chan->msg_count || chan->active_req)
- goto exit;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ if (!chan->msg_count || chan->active_req)
+ break;
- count = chan->msg_count;
- idx = chan->msg_free;
- if (idx >= count)
- idx -= count;
- else
- idx += MBOX_TX_QUEUE_LEN - count;
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
- data = chan->msg_data[idx];
+ data = chan->msg_data[idx];
- if (chan->cl->tx_prepare)
- chan->cl->tx_prepare(chan->cl, data);
- /* Try to submit a message to the MBOX controller */
- err = chan->mbox->ops->send_data(chan, data);
- if (!err) {
- chan->active_req = data;
- chan->msg_count--;
+ if (chan->cl->tx_prepare)
+ chan->cl->tx_prepare(chan->cl, data);
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
}
-exit:
- spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
/* kick start the timer immediately to avoid delays */
- spin_lock_irqsave(&chan->mbox->poll_hrt_lock, flags);
- hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
- spin_unlock_irqrestore(&chan->mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
+ hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
static void tx_tick(struct mbox_chan *chan, int r)
{
- unsigned long flags;
void *mssg;
- spin_lock_irqsave(&chan->lock, flags);
- mssg = chan->active_req;
- chan->active_req = NULL;
- spin_unlock_irqrestore(&chan->lock, flags);
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ }
/* Submit next message */
msg_submit(chan);
@@ -118,7 +109,6 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
container_of(hrtimer, struct mbox_controller, poll_hrt);
bool txdone, resched = false;
int i;
- unsigned long flags;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
@@ -133,10 +123,10 @@ static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
}
if (resched) {
- spin_lock_irqsave(&mbox->poll_hrt_lock, flags);
- if (!hrtimer_is_queued(hrtimer))
- hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
- spin_unlock_irqrestore(&mbox->poll_hrt_lock, flags);
+ scoped_guard(spinlock_irqsave, &mbox->poll_hrt_lock) {
+ if (!hrtimer_is_queued(hrtimer))
+ hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
+ }
return HRTIMER_RESTART;
}
@@ -318,25 +308,23 @@ EXPORT_SYMBOL_GPL(mbox_flush);
static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
struct device *dev = cl->dev;
- unsigned long flags;
int ret;
if (chan->cl || !try_module_get(chan->mbox->dev->driver->owner)) {
- dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ dev_err(dev, "%s: mailbox not free\n", __func__);
return -EBUSY;
}
- spin_lock_irqsave(&chan->lock, flags);
- chan->msg_free = 0;
- chan->msg_count = 0;
- chan->active_req = NULL;
- chan->cl = cl;
- init_completion(&chan->tx_complete);
-
- if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
- chan->txdone_method = TXDONE_BY_ACK;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
- spin_unlock_irqrestore(&chan->lock, flags);
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method = TXDONE_BY_ACK;
+ }
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
@@ -370,13 +358,9 @@ static int __mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
*/
int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
- int ret;
-
- mutex_lock(&con_mutex);
- ret = __mbox_bind_client(chan, cl);
- mutex_unlock(&con_mutex);
+ guard(mutex)(&con_mutex);
- return ret;
+ return __mbox_bind_client(chan, cl);
}
EXPORT_SYMBOL_GPL(mbox_bind_client);
@@ -413,32 +397,29 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec);
if (ret) {
- dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
return ERR_PTR(ret);
}
- mutex_lock(&con_mutex);
+ scoped_guard(mutex, &con_mutex) {
+ chan = ERR_PTR(-EPROBE_DEFER);
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
- chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
- }
+ of_node_put(spec.np);
- of_node_put(spec.np);
+ if (IS_ERR(chan))
+ return chan;
- if (IS_ERR(chan)) {
- mutex_unlock(&con_mutex);
- return chan;
+ ret = __mbox_bind_client(chan, cl);
+ if (ret)
+ chan = ERR_PTR(ret);
}
- ret = __mbox_bind_client(chan, cl);
- if (ret)
- chan = ERR_PTR(ret);
-
- mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
@@ -458,7 +439,7 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(index);
}
return mbox_request_channel(cl, index);
}
@@ -471,8 +452,6 @@ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
*/
void mbox_free_channel(struct mbox_chan *chan)
{
- unsigned long flags;
-
if (!chan || !chan->cl)
return;
@@ -480,14 +459,14 @@ void mbox_free_channel(struct mbox_chan *chan)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
- spin_lock_irqsave(&chan->lock, flags);
- chan->cl = NULL;
- chan->active_req = NULL;
- if (chan->txdone_method == TXDONE_BY_ACK)
- chan->txdone_method = TXDONE_BY_POLL;
+ scoped_guard(spinlock_irqsave, &chan->lock) {
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == TXDONE_BY_ACK)
+ chan->txdone_method = TXDONE_BY_POLL;
+ }
module_put(chan->mbox->dev->driver->owner);
- spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
@@ -547,9 +526,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
- mutex_lock(&con_mutex);
- list_add_tail(&mbox->node, &mbox_cons);
- mutex_unlock(&con_mutex);
+ scoped_guard(mutex, &con_mutex)
+ list_add_tail(&mbox->node, &mbox_cons);
return 0;
}
@@ -566,17 +544,15 @@ void mbox_controller_unregister(struct mbox_controller *mbox)
if (!mbox)
return;
- mutex_lock(&con_mutex);
-
- list_del(&mbox->node);
-
- for (i = 0; i < mbox->num_chans; i++)
- mbox_free_channel(&mbox->chans[i]);
+ scoped_guard(mutex, &con_mutex) {
+ list_del(&mbox->node);
- if (mbox->txdone_poll)
- hrtimer_cancel(&mbox->poll_hrt);
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
- mutex_unlock(&con_mutex);
+ if (mbox->txdone_poll)
+ hrtimer_cancel(&mbox->poll_hrt);
+ }
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);
@@ -587,16 +563,6 @@ static void __devm_mbox_controller_unregister(struct device *dev, void *res)
mbox_controller_unregister(*mbox);
}
-static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
-{
- struct mbox_controller **mbox = res;
-
- if (WARN_ON(!mbox || !*mbox))
- return 0;
-
- return *mbox == data;
-}
-
/**
* devm_mbox_controller_register() - managed mbox_controller_register()
* @dev: device owning the mailbox controller being registered
@@ -632,20 +598,3 @@ int devm_mbox_controller_register(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
-
-/**
- * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
- * @dev: device owning the mailbox controller being unregistered
- * @mbox: mailbox controller being unregistered
- *
- * This function unregisters the mailbox controller and removes the device-
- * managed resource that was set up to automatically unregister the mailbox
- * controller on driver probe failure or driver removal. It's typically not
- * necessary to call this function.
- */
-void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
-{
- WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
- devm_mbox_controller_match, mbox));
-}
-EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index d186865b8dce..532929916e99 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,18 +92,6 @@ struct gce_plat {
u32 gce_num;
};
-static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
-{
- WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
- if (enable)
- writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
- else
- writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
-
- clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
-}
-
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -112,6 +100,19 @@ u8 cmdq_get_shift_pa(struct mbox_chan *chan)
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
+static void cmdq_gctl_value_toggle(struct cmdq *cmdq, bool ddr_enable)
+{
+ u32 val = cmdq->pdata->control_by_sw ? GCE_CTRL_BY_SW : 0;
+
+ if (!cmdq->pdata->control_by_sw && !cmdq->pdata->sw_ddr_en)
+ return;
+
+ if (cmdq->pdata->sw_ddr_en && ddr_enable)
+ val |= GCE_DDR_EN;
+
+ writel(val, cmdq->base + GCE_GCTL_VALUE);
+}
+
static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
{
u32 status;
@@ -140,16 +141,10 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
- u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
- if (cmdq->pdata->control_by_sw)
- gctl_regval = GCE_CTRL_BY_SW;
- if (cmdq->pdata->sw_ddr_en)
- gctl_regval |= GCE_DDR_EN;
- if (gctl_regval)
- writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
+ cmdq_gctl_value_toggle(cmdq, true);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
@@ -315,14 +310,21 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
static int cmdq_runtime_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ int ret;
- return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ ret = clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+ if (ret)
+ return ret;
+
+ cmdq_gctl_value_toggle(cmdq, true);
+ return 0;
}
static int cmdq_runtime_suspend(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
+ cmdq_gctl_value_toggle(cmdq, false);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
@@ -347,9 +349,6 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
return pm_runtime_force_suspend(dev);
}
@@ -360,9 +359,6 @@ static int cmdq_resume(struct device *dev)
WARN_ON(pm_runtime_force_resume(dev));
cmdq->suspended = false;
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, true);
-
return 0;
}
@@ -370,9 +366,6 @@ static void cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
- if (cmdq->pdata->sw_ddr_en)
- cmdq_sw_ddr_enable(cmdq, false);
-
if (!IS_ENABLED(CONFIG_PM))
cmdq_runtime_suspend(&pdev->dev);
@@ -397,7 +390,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task = kzalloc(sizeof(*task), GFP_ATOMIC);
if (!task) {
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return -ENOMEM;
}
@@ -447,7 +440,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
list_move_tail(&task->list_entry, &thread->task_busy_list);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
@@ -495,7 +488,7 @@ done:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
}
static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -535,7 +528,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
@@ -550,7 +543,7 @@ wait:
return -EFAULT;
}
pm_runtime_mark_last_busy(cmdq->mbox.dev);
- __pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ pm_runtime_put_autosuspend(cmdq->mbox.dev);
return 0;
}
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index f6714c233f5a..0a00719b2482 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -306,6 +306,22 @@ static void pcc_chan_acknowledge(struct pcc_chan_info *pchan)
pcc_chan_reg_read_modify_write(&pchan->db);
}
+static void *write_response(struct pcc_chan_info *pchan)
+{
+ struct pcc_header pcc_header;
+ void *buffer;
+ int data_len;
+
+ memcpy_fromio(&pcc_header, pchan->chan.shmem,
+ sizeof(pcc_header));
+ data_len = pcc_header.length - sizeof(u32) + sizeof(struct pcc_header);
+
+ buffer = pchan->chan.rx_alloc(pchan->chan.mchan->cl, data_len);
+ if (buffer != NULL)
+ memcpy_fromio(buffer, pchan->chan.shmem, data_len);
+ return buffer;
+}
+
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@@ -317,6 +333,8 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
+ struct pcc_header *pcc_header = chan->active_req;
+ void *handle = NULL;
pchan = chan->con_priv;
@@ -340,7 +358,17 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* required to avoid any possible race in updatation of this flag.
*/
pchan->chan_in_use = false;
- mbox_chan_received_data(chan, NULL);
+
+ if (pchan->chan.rx_alloc)
+ handle = write_response(pchan);
+
+ if (chan->active_req) {
+ pcc_header = chan->active_req;
+ if (pcc_header->flags & PCC_CMD_COMPLETION_NOTIFY)
+ mbox_chan_txdone(chan, 0);
+ }
+
+ mbox_chan_received_data(chan, handle);
pcc_chan_acknowledge(pchan);
@@ -384,9 +412,24 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pcc_mchan = &pchan->chan;
pcc_mchan->shmem = acpi_os_ioremap(pcc_mchan->shmem_base_addr,
pcc_mchan->shmem_size);
- if (pcc_mchan->shmem)
- return pcc_mchan;
+ if (!pcc_mchan->shmem)
+ goto err;
+
+ pcc_mchan->manage_writes = false;
+
+ /* This indicates that the channel is ready to accept messages.
+ * This needs to happen after the channel has registered
+ * its callback. There is no access point to do that in
+ * the mailbox API. That implies that the mailbox client must
+ * have set the allocate callback function prior to
+ * sending any messages.
+ */
+ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+
+ return pcc_mchan;
+err:
mbox_free_channel(chan);
return ERR_PTR(-ENXIO);
}
@@ -417,8 +460,38 @@ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+static int pcc_write_to_buffer(struct mbox_chan *chan, void *data)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ struct pcc_mbox_chan *pcc_mbox_chan = &pchan->chan;
+ struct pcc_header *pcc_header = data;
+
+ if (!pchan->chan.manage_writes)
+ return 0;
+
+ /* The PCC header length includes the command field
+ * but not the other values from the header.
+ */
+ int len = pcc_header->length - sizeof(u32) + sizeof(struct pcc_header);
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val) {
+ pr_info("%s pchan->cmd_complete not set", __func__);
+ return -1;
+ }
+ memcpy_toio(pcc_mbox_chan->shmem, data, len);
+ return 0;
+}
+
+
/**
- * pcc_send_data - Called from Mailbox Controller code. Used
+ * pcc_send_data - Called from Mailbox Controller code. If
+ * pchan->chan.rx_alloc is set, then the command complete
+ * flag is checked and the data is written to the shared
+ * buffer io memory.
+ *
+ * If pchan->chan.rx_alloc is not set, then it is used
* here only to ring the channel doorbell. The PCC client
* specific read/write is done in the client driver in
* order to maintain atomicity over PCC channel once
@@ -434,17 +507,37 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
int ret;
struct pcc_chan_info *pchan = chan->con_priv;
+ ret = pcc_write_to_buffer(chan, data);
+ if (ret)
+ return ret;
+
ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
if (ret)
return ret;
ret = pcc_chan_reg_read_modify_write(&pchan->db);
+
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
+
+static bool pcc_last_tx_done(struct mbox_chan *chan)
+{
+ struct pcc_chan_info *pchan = chan->con_priv;
+ u64 val;
+
+ pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (!val)
+ return false;
+ else
+ return true;
+}
+
+
+
/**
* pcc_startup - Called from Mailbox Controller code. Used here
* to request the interrupt.
@@ -490,6 +583,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.startup = pcc_startup,
.shutdown = pcc_shutdown,
+ .last_tx_done = pcc_last_tx_done,
};
/**
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 11c41e935a36..8b24ec0fa191 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -116,10 +116,18 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
}
if (apcs_data->clk_name) {
- apcs->clk = platform_device_register_data(&pdev->dev,
- apcs_data->clk_name,
- PLATFORM_DEVID_AUTO,
- NULL, 0);
+ struct device_node *np = of_get_child_by_name(pdev->dev.of_node,
+ "clock-controller");
+ struct platform_device_info pdevinfo = {
+ .parent = &pdev->dev,
+ .name = apcs_data->clk_name,
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = of_fwnode_handle(np) ?: pdev->dev.fwnode,
+ .of_node_reused = !np,
+ };
+
+ apcs->clk = platform_device_register_full(&pdevinfo);
+ of_node_put(np);
if (IS_ERR(apcs->clk))
dev_err(&pdev->dev, "failed to register APCS clk\n");
}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 0b17a38ea6bf..d957d989c0ce 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -312,8 +312,7 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
- &qcom_ipcc_irq_ops, ipcc);
+ ipcc->irq_domain = irq_domain_create_tree(dev_fwnode(&pdev->dev), &qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;