summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Hu <andy.hu@starfivetech.com>2024-05-31 12:40:44 +0300
committerAndy Hu <andy.hu@starfivetech.com>2024-05-31 12:40:44 +0300
commita7cf7e446ecd207d0517727d7e08f93a1ac79b14 (patch)
tree7407172b9daf4355a72e780fcb5ec0105ac33baf
parent62b446203e9350e995c6fe8e72a4bf305004eedb (diff)
parent46db20b9cd19b6d5b2f5fbe72f846fee0ce12953 (diff)
downloadlinux-a7cf7e446ecd207d0517727d7e08f93a1ac79b14.tar.xz
Merge remote-tracking branch 'sdk/jh7110-6.6.y-devel' into vf2-6.6.y-devel
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/configs/starfive_jh7110_defconfig3
-rw-r--r--arch/riscv/include/asm/irq.h9
-rw-r--r--arch/riscv/include/asm/sbi.h13
-rw-r--r--arch/riscv/kernel/irq.c13
-rw-r--r--arch/riscv/kernel/sbi-ipi.c48
-rw-r--r--arch/riscv/kernel/sbi.c30
-rw-r--r--arch/riscv/kernel/smp.c46
-rw-r--r--drivers/mailbox/Kconfig6
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/starfive_ipi_mailbox.c305
-rw-r--r--drivers/phy/starfive/phy-jh7110-pcie.c10
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c2
-rw-r--r--drivers/rpmsg/Kconfig9
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/starfive_rpmsg.c496
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c25
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c43
-rw-r--r--drivers/virtio/virtio_ring.c30
-rw-r--r--include/linux/virtio_ring.h18
-rw-r--r--kernel/irq/ipi-mux.c18
21 files changed, 1123 insertions, 8 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index c50df02b78fd..1da66ef1eb58 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -395,6 +395,10 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
+config RISCV_AMP
+ bool "support for RISCV AMP"
+ depends on SMP && RISCV_SBI
+
choice
prompt "CPU Tuning"
default TUNE_GENERIC
diff --git a/arch/riscv/configs/starfive_jh7110_defconfig b/arch/riscv/configs/starfive_jh7110_defconfig
index 3bc5da1dfb12..0772712e8d7d 100644
--- a/arch/riscv/configs/starfive_jh7110_defconfig
+++ b/arch/riscv/configs/starfive_jh7110_defconfig
@@ -35,6 +35,7 @@ CONFIG_SOC_VIRT=y
CONFIG_ERRATA_SIFIVE=y
CONFIG_NONPORTABLE=y
CONFIG_SMP=y
+CONFIG_RISCV_AMP=y
CONFIG_RISCV_SBI_V01=y
# CONFIG_RISCV_BOOT_SPINWAIT is not set
CONFIG_HIBERNATION=y
@@ -287,10 +288,12 @@ CONFIG_CLK_STARFIVE_JH7110_STG=y
CONFIG_CLK_STARFIVE_JH7110_ISP=y
CONFIG_CLK_STARFIVE_JH7110_VOUT=y
CONFIG_MAILBOX=y
+CONFIG_STARFIVE_IPI_MBOX=y
CONFIG_STARFIVE_MBOX=m
CONFIG_STARFIVE_MBOX_TEST=m
CONFIG_RPMSG_CHAR=y
CONFIG_RPMSG_CTRL=y
+CONFIG_RPMSG_STARFIVE=m
CONFIG_RPMSG_VIRTIO=y
CONFIG_SIFIVE_CCACHE=y
CONFIG_PWM=y
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 8e10a94430a2..57352e80ce61 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -16,4 +16,13 @@ void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
struct fwnode_handle *riscv_get_intc_hwnode(void);
+#ifdef CONFIG_RISCV_AMP
+#define IPI_AMP 15
+void riscv_set_ipi_amp_enable(void);
+int riscv_get_ipi_amp_enable(void);
+void ipi_set_extra_bits(unsigned long (*func)(void));
+unsigned long riscv_clear_amp_bits(void);
+void register_ipi_mailbox_handler(void (*handler)(unsigned long));
+#endif
+
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b79d0228144f..2be05510f25b 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -56,6 +56,9 @@ enum sbi_ext_time_fid {
enum sbi_ext_ipi_fid {
SBI_EXT_IPI_SEND_IPI = 0,
+ SBI_EXT_IPI_SEND_EXT_DOMAIN = 0x100,
+ SBI_EXT_IPI_SET_AMP_DATA_ADDR = 0x101,
+ SBI_EXT_IPI_CLEAR_IPI = 0x102,
};
enum sbi_ext_rfence_fid {
@@ -253,6 +256,12 @@ enum sbi_pmu_ctr_type {
#define SBI_ERR_ALREADY_STOPPED -8
extern unsigned long sbi_spec_version;
+#ifdef CONFIG_RISCV_AMP
+struct amp_data {
+ unsigned long amp_bits;
+};
+#endif
+
struct sbiret {
long error;
long value;
@@ -337,4 +346,8 @@ void sbi_ipi_init(void);
static inline void sbi_ipi_init(void) { }
#endif
+#ifdef CONFIG_RISCV_AMP
+int sbi_send_ipi_amp(unsigned int hartid, unsigned int msg_type);
+int sbi_amp_data_init(void *riscv_amp_data);
+#endif
#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 9cc0a7669271..4552b24f6d0c 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -17,6 +17,19 @@
static struct fwnode_handle *(*__get_intc_node)(void);
+#ifdef CONFIG_RISCV_AMP
+static int ipi_amp_enable;
+void riscv_set_ipi_amp_enable(void)
+{
+ ipi_amp_enable = 1;
+}
+
+int riscv_get_ipi_amp_enable(void)
+{
+ return ipi_amp_enable;
+}
+#endif
+
void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void))
{
__get_intc_node = fn;
diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
index a4559695ce62..144e608f4b6b 100644
--- a/arch/riscv/kernel/sbi-ipi.c
+++ b/arch/riscv/kernel/sbi-ipi.c
@@ -15,6 +15,33 @@
static int sbi_ipi_virq;
+#ifdef CONFIG_RISCV_AMP
+static struct amp_data riscv_amp_data[NR_CPUS] __cacheline_aligned;
+
+static unsigned long riscv_get_extra_bits(void)
+{
+ int cpu_id;
+ unsigned long bits = 0;
+
+ cpu_id = smp_processor_id();
+ if (riscv_amp_data[cpuid_to_hartid_map(cpu_id)].amp_bits)
+ bits |= BIT(IPI_AMP);
+
+ return bits;
+}
+
+unsigned long riscv_clear_amp_bits(void)
+{
+ int cpu_id;
+ unsigned long *ops;
+
+ /*atomic ops */
+ cpu_id = smp_processor_id();
+ ops = &riscv_amp_data[cpuid_to_hartid_map(cpu_id)].amp_bits;
+ return xchg(ops, 0);
+}
+#endif
+
static void sbi_ipi_handle(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -35,13 +62,15 @@ static int sbi_ipi_starting_cpu(unsigned int cpu)
void __init sbi_ipi_init(void)
{
- int virq;
+ int virq, irq_num;
struct irq_domain *domain;
+ struct fwnode_handle *node;
if (riscv_ipi_have_virq_range())
return;
- domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
+ node = riscv_get_intc_hwnode();
+ domain = irq_find_matching_fwnode(node,
DOMAIN_BUS_ANY);
if (!domain) {
pr_err("unable to find INTC IRQ domain\n");
@@ -53,8 +82,19 @@ void __init sbi_ipi_init(void)
pr_err("unable to create INTC IRQ mapping\n");
return;
}
+#ifdef CONFIG_RISCV_AMP
+ if (fwnode_property_present(node, "enable-ipi-amp")) {
+ riscv_set_ipi_amp_enable();
+ sbi_amp_data_init(riscv_amp_data);
+ ipi_set_extra_bits(riscv_get_extra_bits);
+ irq_num = BITS_PER_TYPE(short);
+ } else
+ irq_num = BITS_PER_BYTE;
+#else
+ irq_num = BITS_PER_BYTE;
+#endif
- virq = ipi_mux_create(BITS_PER_BYTE, sbi_send_ipi);
+ virq = ipi_mux_create(irq_num, sbi_send_ipi);
if (virq <= 0) {
pr_err("unable to create muxed IPIs\n");
irq_dispose_mapping(sbi_ipi_virq);
@@ -72,6 +112,6 @@ void __init sbi_ipi_init(void)
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
- riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false);
+ riscv_ipi_set_virq_range(virq, irq_num, false);
pr_info("providing IPIs using SBI IPI extension\n");
}
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 5a62ed1da453..aa54c9549ba3 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/reboot.h>
+#include <asm/io.h>
#include <asm/sbi.h>
#include <asm/smp.h>
#include <asm/tlbflush.h>
@@ -364,6 +365,35 @@ void sbi_send_ipi(unsigned int cpu)
}
EXPORT_SYMBOL(sbi_send_ipi);
+#ifdef CONFIG_RISCV_AMP
+int sbi_send_ipi_amp(unsigned int hartid, unsigned int msg_type)
+{
+ struct sbiret ret = {0};
+
+ ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_EXT_DOMAIN,
+ 0, hartid, msg_type, 0, 0, 0);
+
+ if (ret.error)
+ pr_err("sbi ipi amp error");
+
+ return ret.error;
+}
+EXPORT_SYMBOL_GPL(sbi_send_ipi_amp);
+
+int sbi_amp_data_init(void *riscv_amp_data)
+{
+ struct sbiret ret = {0};
+
+ ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SET_AMP_DATA_ADDR,
+ virt_to_phys((void *)riscv_amp_data), 0,
+ 0, 0, 0, 0);
+ if (ret.error)
+ pr_err("set ipi data error");
+
+ return ret.error;
+}
+#endif
+
/**
* sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
* @cpu_mask: A cpu mask containing all the target harts.
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 40420afbb1a0..5bd06a1460e9 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -49,6 +49,21 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, ipi_dummy_dev);
static int ipi_virq_base __ro_after_init;
static int nr_ipi __ro_after_init = IPI_MAX;
static struct irq_desc *ipi_desc[IPI_MAX] __read_mostly;
+#ifdef CONFIG_RISCV_AMP
+static struct irq_desc *amp_desc;
+static void (*ipi_mailbox_handler)(unsigned long msg_type);
+void ipi_amp_handle(unsigned long msg_type)
+{
+ if (ipi_mailbox_handler)
+ ipi_mailbox_handler(msg_type);
+}
+
+void register_ipi_mailbox_handler(void (*handler)(unsigned long))
+{
+ ipi_mailbox_handler = handler;
+}
+EXPORT_SYMBOL_GPL(register_ipi_mailbox_handler);
+#endif
int riscv_hartid_to_cpuid(unsigned long hartid)
{
@@ -136,6 +151,11 @@ static irqreturn_t handle_IPI(int irq, void *data)
tick_receive_broadcast();
break;
#endif
+#ifdef CONFIG_RISCV_AMP
+ case IPI_AMP:
+ ipi_amp_handle(riscv_clear_amp_bits());
+ break;
+#endif
default:
pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi);
break;
@@ -153,6 +173,10 @@ void riscv_ipi_enable(void)
for (i = 0; i < nr_ipi; i++)
enable_percpu_irq(ipi_virq_base + i, 0);
+#ifdef CONFIG_RISCV_AMP
+ if (riscv_get_ipi_amp_enable())
+ enable_percpu_irq(ipi_virq_base + IPI_AMP, 0);
+#endif
}
void riscv_ipi_disable(void)
@@ -164,6 +188,10 @@ void riscv_ipi_disable(void)
for (i = 0; i < nr_ipi; i++)
disable_percpu_irq(ipi_virq_base + i);
+#ifdef CONFIG_RISCV_AMP
+ if (riscv_get_ipi_amp_enable())
+ disable_percpu_irq(ipi_virq_base + IPI_AMP);
+#endif
}
bool riscv_ipi_have_virq_range(void)
@@ -194,7 +222,16 @@ void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence)
ipi_desc[i] = irq_to_desc(ipi_virq_base + i);
irq_set_status_flags(ipi_virq_base + i, IRQ_HIDDEN);
}
+#ifdef CONFIG_RISCV_AMP
+ if (riscv_get_ipi_amp_enable()) {
+ err = request_percpu_irq(ipi_virq_base + IPI_AMP, handle_IPI,
+ "IPI", &ipi_dummy_dev);
+ WARN_ON(err);
+ amp_desc = irq_to_desc(ipi_virq_base + IPI_AMP);
+ irq_set_status_flags(ipi_virq_base + IPI_AMP, IRQ_HIDDEN);
+ }
+#endif
/* Enabled IPIs for boot CPU immediately */
riscv_ipi_enable();
@@ -225,6 +262,15 @@ void show_ipi_stats(struct seq_file *p, int prec)
seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_names[i]);
}
+#ifdef CONFIG_RISCV_AMP
+ if (riscv_get_ipi_amp_enable()) {
+ seq_printf(p, "%*s:%s", prec - 1, "IAMP",
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", irq_desc_kstat_cpu(amp_desc, cpu));
+ seq_printf(p, " %s\n", "AMP rpmsg interrupts");
+ }
+#endif
}
void arch_send_call_function_ipi_mask(struct cpumask *mask)
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 8cb2a7004a57..c13d7a74fc75 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -295,6 +295,12 @@ config QCOM_IPCC
acts as an interrupt controller for receiving interrupts from clients.
Say Y here if you want to build this driver.
+config STARFIVE_IPI_MBOX
+ tristate "Starfive AMP IPI Mailbox"
+ depends on OF
+ help
+ Say Y here if you want to build a AMP IPI Mailbox controller driver.
+
config STARFIVE_MBOX
tristate "Platform Starfive Mailbox"
depends on OF
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index f5ff98ba44c9..627a275d36bd 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -63,6 +63,8 @@ obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
+obj-$(CONFIG_STARFIVE_IPI_MBOX) += starfive_ipi_mailbox.o
+
obj-$(CONFIG_STARFIVE_MBOX) += starfive_mailbox.o
obj-$(CONFIG_STARFIVE_MBOX_TEST) += starfive_mailbox-test.o
diff --git a/drivers/mailbox/starfive_ipi_mailbox.c b/drivers/mailbox/starfive_ipi_mailbox.c
new file mode 100644
index 000000000000..ee53465e7c0d
--- /dev/null
+++ b/drivers/mailbox/starfive_ipi_mailbox.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 StarFive Technology Co., Ltd.
+ */
+
+#include <asm/irq.h>
+#include <asm/sbi.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+#include "mailbox.h"
+
+#define IPI_MB_CHANS 2
+#define IPI_MB_DEV_PER_CHAN 8
+
+#define TX_MBOX_OFFSET 0x400
+
+#define TX_DONE_OFFSET 0x100
+
+#define QUEUE_ID_OFFSET 16
+#define QUEUE_TO_CHAN 4
+
+/* Please not change TX & RX */
+enum ipi_mb_chan_type {
+ IPI_MB_TYPE_RX = 0, /* Rx */
+ IPI_MB_TYPE_TX = 1, /* Txdone */
+};
+
+struct ipi_mb_con_priv {
+ unsigned int idx;
+ enum ipi_mb_chan_type type;
+ struct mbox_chan *chan;
+ struct tasklet_struct txdb_tasklet;
+ int rtos_hart_id;
+};
+
+struct ipi_mb_priv {
+ struct device *dev;
+
+ struct mbox_controller mbox;
+ struct mbox_chan chans[IPI_MB_CHANS * 2];
+
+ struct ipi_mb_con_priv con_priv_tx[IPI_MB_CHANS];
+ struct ipi_mb_con_priv con_priv_rx[IPI_MB_CHANS];
+
+ void *tx_mbase;
+ void *rx_mbase;
+ int mem_size;
+ int dev_num_per_chan;
+};
+
+struct ipi_mb_priv *mb_priv;
+
+static struct ipi_mb_priv *to_ipi_mb_priv(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct ipi_mb_priv, mbox);
+}
+
+static int ipi_mb_generic_tx(struct ipi_mb_priv *priv,
+ struct ipi_mb_con_priv *cp,
+ void *data)
+{
+ unsigned long *msg = data, *mb_base;
+ unsigned long queue;
+
+ switch (cp->type) {
+ case IPI_MB_TYPE_TX:
+ queue = *msg >> (QUEUE_ID_OFFSET + 1);
+ if (FIELD_GET(BIT(QUEUE_ID_OFFSET), *msg)) {
+ mb_base = mb_priv->tx_mbase;
+ WARN_ON(mb_base[queue]);
+ xchg(&mb_base[queue], *msg);
+ sbi_send_ipi_amp(cp->rtos_hart_id, IPI_MB_TYPE_TX); /* revert it */
+ } else {
+ mb_base = mb_priv->tx_mbase + TX_DONE_OFFSET;
+ WARN_ON(mb_base[queue]);
+ xchg(&mb_base[queue], *msg);
+ sbi_send_ipi_amp(cp->rtos_hart_id, IPI_MB_TYPE_RX);
+ tasklet_schedule(&cp->txdb_tasklet);
+ }
+ break;
+ default:
+ dev_warn_ratelimited(priv->dev,
+ "Send data on wrong channel type: %d\n", cp->type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct mbox_chan *queue_to_channel(unsigned long msg, bool tx)
+{
+ int index;
+ int offset = QUEUE_ID_OFFSET + QUEUE_TO_CHAN;
+
+ index = (tx) ? (msg >> offset) + IPI_MB_CHANS : (msg >> offset);
+
+ return &mb_priv->chans[index];
+}
+
+static void ipi_mb_isr(unsigned long msg_type)
+{
+ unsigned long *mb_base, msg;
+ struct mbox_chan *chan;
+ void *rx_done_base;
+ u32 i;
+
+ if (!msg_type)
+ return;
+
+ mb_base = mb_priv->rx_mbase;
+ rx_done_base = mb_priv->rx_mbase + TX_DONE_OFFSET;
+ if (msg_type & BIT(IPI_MB_TYPE_RX)) {
+ for (i = 0; i < IPI_MB_CHANS * mb_priv->dev_num_per_chan; i++) {
+ msg = xchg(&mb_base[i], 0);
+ chan = queue_to_channel(msg, 0);
+ if (msg)
+ mbox_chan_received_data(chan, (void *)&msg);
+ }
+ }
+ if (msg_type & BIT(IPI_MB_TYPE_TX)) {
+ mb_base = rx_done_base;
+ for (i = 0; i < IPI_MB_CHANS * mb_priv->dev_num_per_chan; i++) {
+ msg = xchg(&mb_base[i], 0);
+ chan = queue_to_channel(msg, 1);
+ if (msg) {
+ mbox_chan_received_data(chan, (void *)&msg);
+ mbox_chan_txdone(chan, 0);
+ }
+ }
+ }
+}
+
+static int ipi_mb_send_data(struct mbox_chan *chan, void *data)
+{
+ struct ipi_mb_priv *priv = to_ipi_mb_priv(chan->mbox);
+ struct ipi_mb_con_priv *cp = chan->con_priv;
+
+ return ipi_mb_generic_tx(priv, cp, data);
+}
+
+static void ipi_mb_txdb_tasklet(unsigned long data)
+{
+ struct ipi_mb_con_priv *cp = (struct ipi_mb_con_priv *)data;
+
+ mbox_chan_txdone(cp->chan, 0);
+}
+
+static const struct mbox_chan_ops ipi_mb_ops = {
+ .send_data = ipi_mb_send_data,
+};
+
+static struct mbox_chan *ipi_mb_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ struct mbox_chan *p_chan;
+ u32 type, idx;
+
+ if (sp->args_count != 2) {
+ dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
+ return ERR_PTR(-EINVAL);
+ }
+
+ type = sp->args[0]; /* channel type */
+ idx = sp->args[1]; /* index */
+
+ if (idx >= (mbox->num_chans >> 1)) {
+ dev_err(mbox->dev,
+ "Not supported channel number: %d. (type: %d, idx: %d)\n",
+ idx, type, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (type == IPI_MB_TYPE_TX)
+ p_chan = &mbox->chans[idx + IPI_MB_CHANS];
+ else
+ p_chan = &mbox->chans[idx];
+
+ return p_chan;
+}
+
+static void ipi_mb_init_generic(struct ipi_mb_priv *priv, int rtos_hart_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPI_MB_CHANS; i++) {
+ struct ipi_mb_con_priv *cp = &priv->con_priv_tx[i];
+
+ cp->idx = i;
+ cp->type = IPI_MB_TYPE_TX;
+ cp->chan = &priv->chans[i + IPI_MB_CHANS];
+ cp->rtos_hart_id = rtos_hart_id;
+ tasklet_init(&cp->txdb_tasklet, ipi_mb_txdb_tasklet,
+ (unsigned long)cp);
+ cp->chan->con_priv = cp;
+ }
+ for (i = 0; i < IPI_MB_CHANS; i++) {
+ struct ipi_mb_con_priv *cp = &priv->con_priv_rx[i];
+
+ cp->idx = i;
+ cp->type = IPI_MB_TYPE_RX;
+ cp->chan = &priv->chans[i];
+ cp->rtos_hart_id = rtos_hart_id;
+ cp->chan->con_priv = cp;
+ }
+
+ priv->mbox.num_chans = IPI_MB_CHANS * 2;
+ priv->mbox.of_xlate = ipi_mb_xlate;
+ priv->dev_num_per_chan = IPI_MB_DEV_PER_CHAN;
+
+}
+
+static int ipi_mb_init_mem_region(struct ipi_mb_priv *priv, struct platform_device *pdev)
+{
+ phys_addr_t phy_addr;
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ phy_addr = r->start;
+ priv->mem_size = resource_size(r);
+ priv->rx_mbase = devm_memremap(priv->dev, phy_addr,
+ priv->mem_size,
+ MEMREMAP_WB);
+
+ if (IS_ERR(priv->rx_mbase)) {
+ dev_err(priv->dev, "unable to map memory region: %llx %d\n",
+ (u64)r->start, priv->mem_size);
+ return -EBUSY;
+ }
+
+ priv->tx_mbase = priv->rx_mbase + TX_MBOX_OFFSET;
+
+ memset(priv->rx_mbase, 0, priv->mem_size);
+
+ return 0;
+}
+
+static int starfive_ipi_mb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct ipi_mb_priv *priv;
+ u32 rtos_hart_id;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ if (of_property_read_u32(np, "rtos-hart-id",
+ &rtos_hart_id))
+ return -EINVAL;
+
+ priv->dev = dev;
+
+ priv->mbox.dev = dev;
+ priv->mbox.ops = &ipi_mb_ops;
+ priv->mbox.chans = priv->chans;
+ priv->mbox.txdone_irq = true;
+ ipi_mb_init_generic(priv, rtos_hart_id);
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = ipi_mb_init_mem_region(priv, pdev);
+ if (ret)
+ return ret;
+
+ register_ipi_mailbox_handler(ipi_mb_isr);
+ mb_priv = priv;
+
+ ret = devm_mbox_controller_register(priv->dev, &priv->mbox);
+
+ return ret;
+}
+
+static const struct of_device_id ipi_amp_of_match[] = {
+ { .compatible = "starfive,ipi-amp-mailbox", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, amp_rpmsg_of_match);
+
+static struct platform_driver starfive_ipi_mb_driver = {
+ .probe = starfive_ipi_mb_probe,
+ .driver = {
+ .name = "starfive-ipi-mailbox",
+ .of_match_table = ipi_amp_of_match,
+ },
+};
+module_platform_driver(starfive_ipi_mb_driver);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/phy/starfive/phy-jh7110-pcie.c b/drivers/phy/starfive/phy-jh7110-pcie.c
index 734c8e007727..3aee3189d23c 100644
--- a/drivers/phy/starfive/phy-jh7110-pcie.c
+++ b/drivers/phy/starfive/phy-jh7110-pcie.c
@@ -129,7 +129,17 @@ static int jh7110_pcie_phy_set_mode(struct phy *_phy,
return 0;
}
+static int jh7110_pcie_phy_exit(struct phy *_phy)
+{
+ struct jh7110_pcie_phy *phy = phy_get_drvdata(_phy);
+
+ phy->mode = PHY_MODE_INVALID;
+
+ return 0;
+}
+
static const struct phy_ops jh7110_pcie_phy_ops = {
+ .exit = jh7110_pcie_phy_exit,
.set_mode = jh7110_pcie_phy_set_mode,
.owner = THIS_MODULE,
};
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
index 633912f8a05d..44a7acffee67 100644
--- a/drivers/phy/starfive/phy-jh7110-usb.c
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -86,6 +86,8 @@ static int jh7110_usb2_phy_exit(struct phy *_phy)
clk_disable_unprepare(phy->app_125m);
+ phy->mode = PHY_MODE_INVALID;
+
return 0;
}
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index d3795860f5c0..8b9734c8bc2c 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -40,6 +40,15 @@ config RPMSG_MTK_SCP
remote processors in MediaTek platforms.
This use IPI and IPC to communicate with remote processors.
+config RPMSG_STARFIVE
+ tristate "Starfive rpmsg driver "
+ select RPMSG
+ depends on MAILBOX
+ help
+ Say y here to enable support providing rpmsg channels to
+ remote processors in Starfive platforms.
+ This use IPI and IPC to communicate with remote processors.
+
config RPMSG_QCOM_GLINK
tristate
select RPMSG
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index 58e3b382e316..3825829c3ec5 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_GLINK_SMEM) += qcom_glink_smem.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
+obj-$(CONFIG_RPMSG_STARFIVE) += starfive_rpmsg.o
diff --git a/drivers/rpmsg/starfive_rpmsg.c b/drivers/rpmsg/starfive_rpmsg.c
new file mode 100644
index 000000000000..45862b6bc07f
--- /dev/null
+++ b/drivers/rpmsg/starfive_rpmsg.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 StarFive Technology Co., Ltd.
+ */
+
+#include <asm/irq.h>
+#include <linux/circ_buf.h>
+#include <linux/dma-map-ops.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/remoteproc.h>
+#include <linux/platform_device.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ids.h>
+#include <linux/virtio_ring.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+
+struct starfive_virdev {
+ struct virtio_device vdev;
+ void *vring[2];
+ struct virtqueue *vq[2];
+ int base_vq_id;
+ int num_of_vqs;
+ int index;
+ int status;
+ struct starfive_rpmsg_dev *rpdev;
+};
+
+struct starfive_rpmsg_mem {
+ void __iomem *vaddr;
+ phys_addr_t phy_addr;
+ unsigned long virtio_ram;
+ int size;
+ int count;
+};
+
+struct starfive_rpmsg_dev;
+
+struct starfive_vq_priv {
+ struct starfive_rpmsg_dev *rpdev;
+ unsigned long mmsg;
+ int vq_id;
+};
+
+struct starfive_rpmsg_dev {
+ struct mbox_client cl;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ int vdev_nums;
+ /* int first_notify; */
+ u32 flags;
+#define MAX_VDEV_NUMS 8
+ struct starfive_virdev ivdev[MAX_VDEV_NUMS];
+ struct starfive_rpmsg_mem shm_mem;
+ struct starfive_vq_priv vq_priv[MAX_VDEV_NUMS * 2];
+ struct delayed_work rpmsg_work;
+ struct circ_buf rx_buffer;
+ spinlock_t mu_lock;
+ struct platform_device *pdev;
+};
+
+#define VRING_BUFFER_SIZE 8192
+#define CIRC_ADD(val, add, size) (((val) + (add)) & ((size) - 1))
+#define RPMSG_NUM_BUFS (512)
+#define MAX_RPMSG_BUF_SIZE (512)
+
+static inline struct starfive_virdev *vdev_to_virdev(struct virtio_device *dev)
+{
+ return container_of(dev, struct starfive_virdev, vdev);
+}
+
+static void rpmsg_work_handler(struct work_struct *work)
+{
+ u32 message;
+ unsigned long flags;
+ struct starfive_virdev *virdev;
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct starfive_rpmsg_dev *rpdev = container_of(dwork,
+ struct starfive_rpmsg_dev, rpmsg_work);
+ struct circ_buf *cb = &rpdev->rx_buffer;
+ struct platform_device *pdev = rpdev->pdev;
+ struct device *dev = &pdev->dev;
+
+ spin_lock_irqsave(&rpdev->mu_lock, flags);
+ /* handle all incoming mu message */
+ while (CIRC_CNT(cb->head, cb->tail, PAGE_SIZE)) {
+ message = cb->buf[cb->tail];
+ message |= (cb->buf[cb->tail + 1] << 8);
+ message |= (cb->buf[cb->tail + 2] << 16);
+ message |= (cb->buf[cb->tail + 3] << 24);
+ spin_unlock_irqrestore(&rpdev->mu_lock, flags);
+ virdev = &rpdev->ivdev[(message >> 16) / 2];
+
+ dev_dbg(dev, "%s msg: 0x%x\n", __func__, message);
+
+ message = message >> 16;
+ message -= virdev->base_vq_id;
+ /*
+ * Currently both PENDING_MSG and explicit-virtqueue-index
+ * messaging are supported.
+ * Whatever approach is taken, at this point message contains
+ * the index of the vring which was just triggered.
+ */
+ if (message < virdev->num_of_vqs)
+ vring_interrupt(message, virdev->vq[message]);
+ spin_lock_irqsave(&rpdev->mu_lock, flags);
+ cb->tail = CIRC_ADD(cb->tail, 4, PAGE_SIZE);
+ }
+ spin_unlock_irqrestore(&rpdev->mu_lock, flags);
+}
+
+static void starfive_rpmsg_rx_callback(struct mbox_client *c, void *msg)
+{
+ int buf_space;
+ u32 *data = msg;
+ struct starfive_rpmsg_dev *rpdev = container_of(c,
+ struct starfive_rpmsg_dev, cl);
+ struct circ_buf *cb = &rpdev->rx_buffer;
+
+ spin_lock(&rpdev->mu_lock);
+ buf_space = CIRC_SPACE(cb->head, cb->tail, PAGE_SIZE);
+ if (unlikely(!buf_space)) {
+ dev_err(c->dev, "RPMSG RX overflow!\n");
+ spin_unlock(&rpdev->mu_lock);
+ return;
+ }
+ cb->buf[cb->head] = (u8) *data;
+ cb->buf[cb->head + 1] = (u8) (*data >> 8);
+ cb->buf[cb->head + 2] = (u8) (*data >> 16);
+ cb->buf[cb->head + 3] = (u8) (*data >> 24);
+ cb->head = CIRC_ADD(cb->head, 4, PAGE_SIZE);
+ spin_unlock(&rpdev->mu_lock);
+
+ schedule_delayed_work(&(rpdev->rpmsg_work), 0);
+}
+
+static unsigned long starfive_vring_init(struct vring *vr,
+ unsigned int num, void *p,
+ unsigned long align)
+{
+ unsigned long addr;
+
+ vr->num = num;
+ vr->desc = (struct vring_desc *)(void *)p;
+
+ addr = (unsigned long) (p + num * sizeof(struct vring_desc) + align - 1UL);
+ addr &= ~(align - 1UL);
+ vr->avail = (void *)addr;
+ addr = (u64)&vr->avail->ring[num];
+ addr += (align - 1UL);
+ addr &= ~(align - 1UL);
+ vr->used = (struct vring_used *)addr;
+ addr = (unsigned long)&vr->used->ring[num];
+ addr += (align - 1UL);
+ addr &= ~(align - 1UL);
+
+ return addr;
+}
+
+static bool starfive_virtio_notify(struct virtqueue *vq)
+{
+ int ret;
+ struct starfive_vq_priv *rpvq = vq->priv;
+ struct starfive_rpmsg_dev *rpdev = rpvq->rpdev;
+
+ rpvq->mmsg = rpvq->vq_id << 16 | 0x1;
+
+ rpdev->cl.tx_tout = 1000;
+ ret = mbox_send_message(rpdev->tx_ch, &rpvq->mmsg);
+ if (ret < 0)
+ return false;
+
+ return true;
+}
+
+static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
+ unsigned int id,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx)
+{
+ struct starfive_virdev *virdev = vdev_to_virdev(vdev);
+ struct starfive_rpmsg_dev *rpdev = virdev->rpdev;
+ struct starfive_rpmsg_mem *shm_mem = &rpdev->shm_mem;
+ struct device *dev = rpdev->cl.dev;
+ struct starfive_vq_priv *priv;
+ struct virtqueue *vq;
+
+ if (!name)
+ return NULL;
+ /*
+ * Create the new vq, and tell virtio we're not interested in
+ * the 'weak' smp barriers, since we're talking with a real device.
+ */
+ vq = vring_new_virtqueue_with_init(id, RPMSG_NUM_BUFS / 2, 64, vdev, false, ctx,
+ shm_mem->vaddr + shm_mem->count * VRING_BUFFER_SIZE,
+ starfive_virtio_notify, callback, name,
+ starfive_vring_init);
+ if (!vq) {
+ dev_err(dev, "vring_new_virtqueue %s failed\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+ virdev->vring[id] = shm_mem->vaddr + shm_mem->count * VRING_BUFFER_SIZE;
+ priv = &rpdev->vq_priv[shm_mem->count];
+ priv->rpdev = rpdev;
+ virdev->vq[id] = vq;
+ priv->vq_id = virdev->base_vq_id + id;
+ vq->priv = priv;
+ shm_mem->count++;
+
+ return vq;
+}
+
+static void starfive_virtio_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+ struct rproc_vring *rvring;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+ rvring = vq->priv;
+ rvring->vq = NULL;
+ vring_del_virtqueue(vq);
+ }
+}
+
+static int starfive_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ int i, ret, queue_idx = 0;
+ struct starfive_virdev *virdev = vdev_to_virdev(vdev);
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
+ ctx ? ctx[i] : false);
+ if (IS_ERR(vqs[i])) {
+ ret = PTR_ERR(vqs[i]);
+ goto error;
+ }
+ }
+ virdev->num_of_vqs = nvqs;
+
+ return 0;
+
+error:
+ starfive_virtio_del_vqs(vdev);
+ return ret;
+}
+
+static void starfive_virtio_reset(struct virtio_device *vdev)
+{
+}
+
+static u8 starfive_virtio_get_status(struct virtio_device *vdev)
+{
+ struct starfive_virdev *virdev = vdev_to_virdev(vdev);
+
+ return virdev->status;
+}
+
+static void starfive_virtio_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct starfive_virdev *virdev = vdev_to_virdev(vdev);
+
+ virdev->status = status;
+}
+
+static u64 starfive_virtio_get_features(struct virtio_device *vdev)
+{
+ return (BIT(0) | BIT(VIRTIO_F_VERSION_1));
+}
+
+static int starfive_virtio_finalize_features(struct virtio_device *vdev)
+{
+ vdev->features |= (BIT(0) | BIT(VIRTIO_F_VERSION_1));
+
+ return 0;
+}
+
+bool starfive_get_shm_region(struct virtio_device *vdev,
+ struct virtio_shm_region *region, u8 id)
+{
+ struct starfive_virdev *virtdev = vdev_to_virdev(vdev);
+ struct starfive_rpmsg_mem *shm_mem = &virtdev->rpdev->shm_mem;
+
+ region->len = RPMSG_NUM_BUFS * MAX_RPMSG_BUF_SIZE;
+ region->addr = (shm_mem->virtio_ram + virtdev->index * region->len);
+
+ return true;
+}
+
+static const struct virtio_config_ops rpmsg_virtio_config_ops = {
+ .get_features = starfive_virtio_get_features,
+ .finalize_features = starfive_virtio_finalize_features,
+ .find_vqs = starfive_virtio_find_vqs,
+ .del_vqs = starfive_virtio_del_vqs,
+ .reset = starfive_virtio_reset,
+ .set_status = starfive_virtio_set_status,
+ .get_status = starfive_virtio_get_status,
+ .get_shm_region = starfive_get_shm_region,
+};
+
+static int starfive_alloc_memory_region(struct starfive_rpmsg_dev *rpmsg,
+ struct platform_device *pdev)
+{
+ struct starfive_rpmsg_mem *shm_mem;
+ struct resource *r;
+ void *addr;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ shm_mem = &rpmsg->shm_mem;
+ if (i == 0) {
+ shm_mem->phy_addr = r->start;
+ shm_mem->size = resource_size(r);
+ shm_mem->vaddr = devm_memremap(&pdev->dev, shm_mem->phy_addr,
+ shm_mem->size,
+ MEMREMAP_WB);
+ if (IS_ERR(shm_mem->vaddr)) {
+ dev_err(&pdev->dev, "unable to map memory region: %llx %d\n",
+ (u64)r->start, shm_mem->size);
+ return -EBUSY;
+ }
+ } else {
+ addr = devm_memremap(&pdev->dev, r->start,
+ resource_size(r),
+ MEMREMAP_WB);
+ if (IS_ERR(addr)) {
+ dev_err(&pdev->dev, "unable to map virtio memory region: %llx %d\n",
+ (u64)r->start, shm_mem->size);
+ return -EBUSY;
+ }
+ shm_mem->virtio_ram = (unsigned long)addr;
+ }
+
+ }
+
+ return 0;
+}
+
+static int starfive_rpmsg_xtr_channel_init(struct starfive_rpmsg_dev *rpdev)
+{
+ struct platform_device *pdev = rpdev->pdev;
+ struct device *dev = &pdev->dev;
+ struct mbox_client *cl;
+ int ret = 0;
+
+ cl = &rpdev->cl;
+ cl->dev = dev;
+ cl->tx_block = false;
+ cl->tx_tout = 20;
+ cl->knows_txdone = false;
+ cl->rx_callback = starfive_rpmsg_rx_callback;
+
+ rpdev->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(rpdev->tx_ch)) {
+ ret = PTR_ERR(rpdev->tx_ch);
+ dev_info(cl->dev, "failed to request mbox tx chan, ret %d\n",
+ ret);
+ goto err_out;
+ }
+ rpdev->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(rpdev->rx_ch)) {
+ ret = PTR_ERR(rpdev->rx_ch);
+ dev_info(cl->dev, "failed to request mbox rx chan, ret %d\n",
+ ret);
+ goto err_out;
+ }
+
+ return ret;
+
+err_out:
+ if (!IS_ERR(rpdev->tx_ch))
+ mbox_free_channel(rpdev->tx_ch);
+ if (!IS_ERR(rpdev->rx_ch))
+ mbox_free_channel(rpdev->rx_ch);
+
+ return ret;
+}
+
+static int starfive_rpmsg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct starfive_rpmsg_dev *rpmsg_dev;
+ int ret, i;
+ char *buf;
+
+ /* Allocate virtio device */
+ rpmsg_dev = devm_kzalloc(dev, sizeof(*rpmsg_dev), GFP_KERNEL);
+ if (!rpmsg_dev)
+ return -ENOMEM;
+
+ buf = devm_kzalloc(dev, PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ rpmsg_dev->rx_buffer.buf = buf;
+ rpmsg_dev->rx_buffer.head = 0;
+ rpmsg_dev->rx_buffer.tail = 0;
+ spin_lock_init(&rpmsg_dev->mu_lock);
+
+ rpmsg_dev->pdev = pdev;
+ ret = starfive_alloc_memory_region(rpmsg_dev, pdev);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(np, "vdev-nums", &rpmsg_dev->vdev_nums);
+ if (ret)
+ rpmsg_dev->vdev_nums = 1;
+
+ ret = starfive_rpmsg_xtr_channel_init(rpmsg_dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&(rpmsg_dev->rpmsg_work), rpmsg_work_handler);
+
+ memset(rpmsg_dev->shm_mem.vaddr, 0, rpmsg_dev->shm_mem.size);
+
+ for (i = 0; i < rpmsg_dev->vdev_nums; i++) {
+ rpmsg_dev->ivdev[i].vdev.id.device = VIRTIO_ID_RPMSG;
+ rpmsg_dev->ivdev[i].vdev.config = &rpmsg_virtio_config_ops;
+ rpmsg_dev->ivdev[i].vdev.dev.parent = &pdev->dev;
+ rpmsg_dev->ivdev[i].base_vq_id = i * 2;
+ rpmsg_dev->ivdev[i].rpdev = rpmsg_dev;
+ rpmsg_dev->ivdev[i].index = i;
+ ret = register_virtio_device(&rpmsg_dev->ivdev[i].vdev);
+ if (ret) {
+ dev_err(dev, "failed to register vdev: %d\n", ret);
+ if (i == 0)
+ goto err_chl;
+ break;
+ }
+ }
+
+ platform_set_drvdata(pdev, rpmsg_dev);
+
+ dev_info(dev, "registered %s\n", dev_name(&pdev->dev));
+
+ return 0;
+err_chl:
+ if (!IS_ERR(rpmsg_dev->tx_ch))
+ mbox_free_channel(rpmsg_dev->tx_ch);
+ if (!IS_ERR(rpmsg_dev->rx_ch))
+ mbox_free_channel(rpmsg_dev->rx_ch);
+ return ret;
+}
+
+static int starfive_rpmsg_remove(struct platform_device *pdev)
+{
+ struct starfive_rpmsg_dev *vdev = platform_get_drvdata(pdev);
+ int i;
+
+ cancel_delayed_work_sync(&vdev->rpmsg_work);
+ for (i = 0; i < vdev->vdev_nums; i++)
+ unregister_virtio_device(&vdev->ivdev[i].vdev);
+
+ if (!IS_ERR(vdev->tx_ch))
+ mbox_free_channel(vdev->tx_ch);
+ if (!IS_ERR(vdev->rx_ch))
+ mbox_free_channel(vdev->rx_ch);
+
+ return 0;
+}
+
+static const struct of_device_id amp_rpmsg_of_match[] = {
+ { .compatible = "starfive,amp-virtio-rpmsg", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, amp_rpmsg_of_match);
+
+static struct platform_driver starfive_rmpsg_driver = {
+ .probe = starfive_rpmsg_probe,
+ .remove = starfive_rpmsg_remove,
+ .driver = {
+ .name = "starfive-rpmsg",
+ .of_match_table = amp_rpmsg_of_match,
+ },
+};
+module_platform_driver(starfive_rmpsg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index f1af0f674615..03640612f170 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -913,9 +913,24 @@ static int rpmsg_probe(struct virtio_device *vdev)
total_buf_space = vrp->num_bufs * vrp->buf_size;
/* allocate coherent memory for the buffers */
+#ifdef CONFIG_RISCV_AMP
+ if (vdev->config->get_shm_region) {
+ struct virtio_shm_region shm_mem;
+
+ vdev->config->get_shm_region(vdev, &shm_mem, 0);
+ bufs_va = (void *)shm_mem.addr;
+ vrp->bufs_dma = virt_to_phys(bufs_va);
+ } else {
+ bufs_va = dma_alloc_coherent(vdev->dev.parent,
+ total_buf_space, &vrp->bufs_dma,
+ GFP_KERNEL);
+ }
+#else
bufs_va = dma_alloc_coherent(vdev->dev.parent,
total_buf_space, &vrp->bufs_dma,
GFP_KERNEL);
+#endif
+
if (!bufs_va) {
err = -ENOMEM;
goto vqs_del;
@@ -1002,7 +1017,10 @@ static int rpmsg_probe(struct virtio_device *vdev)
free_ctrldev:
rpmsg_virtio_del_ctrl_dev(rpdev_ctrl);
free_coherent:
- dma_free_coherent(vdev->dev.parent, total_buf_space,
+#ifdef CONFIG_RISCV_AMP
+ if (!vdev->config->get_shm_region)
+#endif
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
bufs_va, vrp->bufs_dma);
vqs_del:
vdev->config->del_vqs(vrp->vdev);
@@ -1034,7 +1052,10 @@ static void rpmsg_remove(struct virtio_device *vdev)
vdev->config->del_vqs(vrp->vdev);
- dma_free_coherent(vdev->dev.parent, total_buf_space,
+#ifdef CONFIG_RISCV_AMP
+ if (!vdev->config->get_shm_region)
+#endif
+ dma_free_coherent(vdev->dev.parent, total_buf_space,
vrp->rbufs, vrp->bufs_dma);
kfree(vrp);
diff --git a/drivers/usb/cdns3/cdns3-starfive.c b/drivers/usb/cdns3/cdns3-starfive.c
index a7265b86e427..6452533125bf 100644
--- a/drivers/usb/cdns3/cdns3-starfive.c
+++ b/drivers/usb/cdns3/cdns3-starfive.c
@@ -39,6 +39,7 @@ struct cdns_starfive {
struct clk_bulk_data *clks;
int num_clks;
u32 stg_usb_mode;
+ int mode;
};
static void cdns_mode_init(struct platform_device *pdev,
@@ -74,6 +75,7 @@ static void cdns_mode_init(struct platform_device *pdev,
default:
break;
}
+ data->mode = mode;
}
static int cdns_clk_rst_init(struct cdns_starfive *data)
@@ -104,6 +106,20 @@ static void cdns_clk_rst_deinit(struct cdns_starfive *data)
clk_bulk_disable_unprepare(data->num_clks, data->clks);
}
+static int cdns_starfive_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup);
+static struct cdns3_platform_data cdns_starfive_pdata = {
+ .platform_suspend = cdns_starfive_platform_suspend,
+};
+
+static const struct of_dev_auxdata cdns_starfive_auxdata[] = {
+ {
+ .compatible = "cdns,usb3",
+ .platform_data = &cdns_starfive_pdata,
+ },
+ {},
+};
+
static int cdns_starfive_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -142,7 +158,7 @@ static int cdns_starfive_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ ret = of_platform_populate(dev->of_node, NULL, cdns_starfive_auxdata, dev);
if (ret) {
dev_err(dev, "Failed to create children\n");
cdns_clk_rst_deinit(data);
@@ -213,6 +229,31 @@ static int cdns_starfive_suspend(struct device *dev)
return 0;
}
+
+static int cdns_starfive_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ struct cdns_starfive *data = dev_get_drvdata(dev->parent);
+
+ if (!suspend) {
+ if (data->mode == USB_DR_MODE_HOST) {
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_HOST);
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST);
+ } else if (data->mode == USB_DR_MODE_PERIPHERAL) {
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_DEVICE);
+ phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
+ }
+ }
+
+ return 0;
+}
+#else
+static int cdns_starfive_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 49299b1f9ec7..305466098e46 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -2869,6 +2869,36 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
+#ifdef CONFIG_RISCV_AMP
+struct virtqueue *vring_new_virtqueue_with_init(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ void *pages,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ unsigned long (*init)(struct vring *vr,
+ unsigned int num,
+ void *p,
+ unsigned long align)
+ )
+{
+ struct vring_virtqueue_split vring_split = {};
+
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
+ return NULL;
+
+ init(&vring_split.vring, num, pages, vring_align);
+ return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
+ context, notify, callback, name,
+ vdev->dev.parent);
+}
+EXPORT_SYMBOL_GPL(vring_new_virtqueue_with_init);
+#endif
+
static void vring_free(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 9b33df741b63..5843b7302fc6 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -107,7 +107,23 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
-
+#ifdef CONFIG_RISCV_AMP
+struct virtqueue *vring_new_virtqueue_with_init(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool context,
+ void *pages,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name,
+ unsigned long (*init)(struct vring *vr,
+ unsigned int num,
+ void *p,
+ unsigned long align)
+ );
+#endif
/*
* Destroys a virtqueue. If created with vring_create_virtqueue, this
* also frees the ring.
diff --git a/kernel/irq/ipi-mux.c b/kernel/irq/ipi-mux.c
index fa4fc18c6131..6eb5d65932d3 100644
--- a/kernel/irq/ipi-mux.c
+++ b/kernel/irq/ipi-mux.c
@@ -26,6 +26,14 @@ static struct ipi_mux_cpu __percpu *ipi_mux_pcpu;
static struct irq_domain *ipi_mux_domain;
static void (*ipi_mux_send)(unsigned int cpu);
+#ifdef CONFIG_RISCV_AMP
+static unsigned long (*arch_get_extra_bits)(void);
+void ipi_set_extra_bits(unsigned long (*func)(void))
+{
+ arch_get_extra_bits = func;
+}
+#endif
+
static void ipi_mux_mask(struct irq_data *d)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
@@ -139,6 +147,16 @@ void ipi_mux_process(void)
for_each_set_bit(hwirq, &ipis, BITS_PER_TYPE(int))
generic_handle_domain_irq(ipi_mux_domain, hwirq);
+
+#ifdef CONFIG_RISCV_AMP
+ unsigned long extra_ipis;
+
+ if (arch_get_extra_bits) {
+ extra_ipis = arch_get_extra_bits();
+ for_each_set_bit(hwirq, &extra_ipis, BITS_PER_TYPE(int))
+ generic_handle_domain_irq(ipi_mux_domain, hwirq);
+ }
+#endif
}
/**