From 340974c4f709ce8142a672ab11f1889dff94d6dc Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 18 Aug 2025 09:39:00 +0530 Subject: mailbox: Add common header for RPMI messages sent via mailbox The RPMI based mailbox controller drivers and mailbox clients need to share defines related to RPMI messages over mailbox interface so add a common header for this purpose. Acked-by: Jassi Brar Co-developed-by: Rahul Pathak Signed-off-by: Rahul Pathak Signed-off-by: Anup Patel Link: https://lore.kernel.org/r/20250818040920.272664-5-apatel@ventanamicro.com Signed-off-by: Paul Walmsley --- include/linux/mailbox/riscv-rpmi-message.h | 214 +++++++++++++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 include/linux/mailbox/riscv-rpmi-message.h (limited to 'include/linux') diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h new file mode 100644 index 000000000000..c3a98fc12c0a --- /dev/null +++ b/include/linux/mailbox/riscv-rpmi-message.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2025 Ventana Micro Systems Inc. */ + +#ifndef _LINUX_RISCV_RPMI_MESSAGE_H_ +#define _LINUX_RISCV_RPMI_MESSAGE_H_ + +#include +#include +#include +#include + +/* RPMI version encode/decode macros */ +#define RPMI_VER_MAJOR(__ver) upper_16_bits(__ver) +#define RPMI_VER_MINOR(__ver) lower_16_bits(__ver) +#define RPMI_MKVER(__maj, __min) (((u32)(__maj) << 16) | (u16)(__min)) + +/* RPMI message header */ +struct rpmi_message_header { + __le16 servicegroup_id; + u8 service_id; + u8 flags; + __le16 datalen; + __le16 token; +}; + +/* RPMI message */ +struct rpmi_message { + struct rpmi_message_header header; + u8 data[]; +}; + +/* RPMI notification event */ +struct rpmi_notification_event { + __le16 event_datalen; + u8 event_id; + u8 reserved; + u8 event_data[]; +}; + +/* RPMI error codes */ +enum rpmi_error_codes { + RPMI_SUCCESS = 0, + RPMI_ERR_FAILED = -1, + RPMI_ERR_NOTSUPP = -2, + RPMI_ERR_INVALID_PARAM = -3, + RPMI_ERR_DENIED = -4, + RPMI_ERR_INVALID_ADDR = -5, + RPMI_ERR_ALREADY = -6, + RPMI_ERR_EXTENSION = -7, + RPMI_ERR_HW_FAULT = -8, + RPMI_ERR_BUSY = -9, + RPMI_ERR_INVALID_STATE = -10, + RPMI_ERR_BAD_RANGE = -11, + RPMI_ERR_TIMEOUT = -12, + RPMI_ERR_IO = -13, + RPMI_ERR_NO_DATA = -14, + RPMI_ERR_RESERVED_START = -15, + RPMI_ERR_RESERVED_END = -127, + RPMI_ERR_VENDOR_START = -128, +}; + +static inline int rpmi_to_linux_error(int rpmi_error) +{ + switch (rpmi_error) { + case RPMI_SUCCESS: + return 0; + case RPMI_ERR_INVALID_PARAM: + case RPMI_ERR_BAD_RANGE: + case RPMI_ERR_INVALID_STATE: + return -EINVAL; + case RPMI_ERR_DENIED: + return -EPERM; + case RPMI_ERR_INVALID_ADDR: + case RPMI_ERR_HW_FAULT: + return -EFAULT; + case RPMI_ERR_ALREADY: + return -EALREADY; + case RPMI_ERR_BUSY: + return -EBUSY; + case RPMI_ERR_TIMEOUT: + return -ETIMEDOUT; + case RPMI_ERR_IO: + return -ECOMM; + case RPMI_ERR_FAILED: + case RPMI_ERR_NOTSUPP: + case RPMI_ERR_NO_DATA: + case RPMI_ERR_EXTENSION: + default: + return -EOPNOTSUPP; + } +} + +/* RPMI Linux mailbox attribute IDs */ +enum rpmi_mbox_attribute_id { + RPMI_MBOX_ATTR_SPEC_VERSION, + RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE, + RPMI_MBOX_ATTR_SERVICEGROUP_ID, + RPMI_MBOX_ATTR_SERVICEGROUP_VERSION, + RPMI_MBOX_ATTR_IMPL_ID, + RPMI_MBOX_ATTR_IMPL_VERSION, + RPMI_MBOX_ATTR_MAX_ID +}; + +/* RPMI Linux mailbox message types */ +enum rpmi_mbox_message_type { + RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE, + RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE, + RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE, + RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE, + RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT, + RPMI_MBOX_MSG_MAX_TYPE +}; + +/* RPMI Linux mailbox message instance */ +struct rpmi_mbox_message { + enum rpmi_mbox_message_type type; + union { + struct { + enum rpmi_mbox_attribute_id id; + u32 value; + } attr; + + struct { + u32 service_id; + void *request; + unsigned long request_len; + void *response; + unsigned long max_response_len; + unsigned long out_response_len; + } data; + + struct { + u16 event_datalen; + u8 event_id; + u8 *event_data; + } notif; + }; + int error; +}; + +/* RPMI Linux mailbox message helper routines */ +static inline void rpmi_mbox_init_get_attribute(struct rpmi_mbox_message *msg, + enum rpmi_mbox_attribute_id id) +{ + msg->type = RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE; + msg->attr.id = id; + msg->attr.value = 0; + msg->error = 0; +} + +static inline void rpmi_mbox_init_set_attribute(struct rpmi_mbox_message *msg, + enum rpmi_mbox_attribute_id id, + u32 value) +{ + msg->type = RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE; + msg->attr.id = id; + msg->attr.value = value; + msg->error = 0; +} + +static inline void rpmi_mbox_init_send_with_response(struct rpmi_mbox_message *msg, + u32 service_id, + void *request, + unsigned long request_len, + void *response, + unsigned long max_response_len) +{ + msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE; + msg->data.service_id = service_id; + msg->data.request = request; + msg->data.request_len = request_len; + msg->data.response = response; + msg->data.max_response_len = max_response_len; + msg->data.out_response_len = 0; + msg->error = 0; +} + +static inline void rpmi_mbox_init_send_without_response(struct rpmi_mbox_message *msg, + u32 service_id, + void *request, + unsigned long request_len) +{ + msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE; + msg->data.service_id = service_id; + msg->data.request = request; + msg->data.request_len = request_len; + msg->data.response = NULL; + msg->data.max_response_len = 0; + msg->data.out_response_len = 0; + msg->error = 0; +} + +static inline void *rpmi_mbox_get_msg_response(struct rpmi_mbox_message *msg) +{ + return msg ? msg->data.response : NULL; +} + +static inline int rpmi_mbox_send_message(struct mbox_chan *chan, + struct rpmi_mbox_message *msg) +{ + int ret; + + /* Send message for the underlying mailbox channel */ + ret = mbox_send_message(chan, msg); + if (ret < 0) + return ret; + + /* Explicitly signal txdone for mailbox channel */ + ret = msg->error; + mbox_client_txdone(chan, ret); + return ret; +} + +#endif /* _LINUX_RISCV_RPMI_MESSAGE_H_ */ -- cgit v1.2.3 From ba879dfc0574878f3e08f217b2b4fdf845c426c0 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 18 Aug 2025 09:39:01 +0530 Subject: mailbox: Allow controller specific mapping using fwnode Introduce optional fw_node() callback which allows a mailbox controller driver to provide controller specific mapping using fwnode. The Linux OF framework already implements fwnode operations for the Linux DD framework so the fw_xlate() callback works fine with device tree as well. Acked-by: Jassi Brar Reviewed-by: Andy Shevchenko Signed-off-by: Anup Patel Link: https://lore.kernel.org/r/20250818040920.272664-6-apatel@ventanamicro.com Signed-off-by: Paul Walmsley --- drivers/mailbox/mailbox.c | 65 +++++++++++++++++++++++--------------- include/linux/mailbox_controller.h | 3 ++ 2 files changed, 43 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 5cd8ae222073..2acc6ec229a4 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "mailbox.h" @@ -383,34 +384,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client); */ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) { - struct device *dev = cl->dev; + struct fwnode_reference_args fwspec; + struct fwnode_handle *fwnode; struct mbox_controller *mbox; struct of_phandle_args spec; struct mbox_chan *chan; + struct device *dev; + unsigned int i; int ret; - if (!dev || !dev->of_node) { - pr_debug("%s: No owner device node\n", __func__); + dev = cl->dev; + if (!dev) { + pr_debug("No owner device\n"); return ERR_PTR(-ENODEV); } - ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", - index, &spec); + fwnode = dev_fwnode(dev); + if (!fwnode) { + dev_dbg(dev, "No owner fwnode\n"); + return ERR_PTR(-ENODEV); + } + + ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells", + 0, index, &fwspec); if (ret) { - dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__); + dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes"); return ERR_PTR(ret); } + spec.np = to_of_node(fwspec.fwnode); + spec.args_count = fwspec.nargs; + for (i = 0; i < spec.args_count; i++) + spec.args[i] = fwspec.args[i]; + scoped_guard(mutex, &con_mutex) { chan = ERR_PTR(-EPROBE_DEFER); - list_for_each_entry(mbox, &mbox_cons, node) - if (mbox->dev->of_node == spec.np) { - chan = mbox->of_xlate(mbox, &spec); - if (!IS_ERR(chan)) - break; + list_for_each_entry(mbox, &mbox_cons, node) { + if (device_match_fwnode(mbox->dev, fwspec.fwnode)) { + if (mbox->fw_xlate) { + chan = mbox->fw_xlate(mbox, &fwspec); + if (!IS_ERR(chan)) + break; + } else if (mbox->of_xlate) { + chan = mbox->of_xlate(mbox, &spec); + if (!IS_ERR(chan)) + break; + } } + } - of_node_put(spec.np); + fwnode_handle_put(fwspec.fwnode); if (IS_ERR(chan)) return chan; @@ -427,15 +450,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel); struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl, const char *name) { - struct device_node *np = cl->dev->of_node; - int index; - - if (!np) { - dev_err(cl->dev, "%s() currently only supports DT\n", __func__); - return ERR_PTR(-EINVAL); - } + int index = device_property_match_string(cl->dev, "mbox-names", name); - index = of_property_match_string(np, "mbox-names", name); if (index < 0) { dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n", __func__, name); @@ -470,9 +486,8 @@ void mbox_free_channel(struct mbox_chan *chan) } EXPORT_SYMBOL_GPL(mbox_free_channel); -static struct mbox_chan * -of_mbox_index_xlate(struct mbox_controller *mbox, - const struct of_phandle_args *sp) +static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox, + const struct fwnode_reference_args *sp) { int ind = sp->args[0]; @@ -523,8 +538,8 @@ int mbox_controller_register(struct mbox_controller *mbox) spin_lock_init(&chan->lock); } - if (!mbox->of_xlate) - mbox->of_xlate = of_mbox_index_xlate; + if (!mbox->fw_xlate && !mbox->of_xlate) + mbox->fw_xlate = fw_mbox_index_xlate; scoped_guard(mutex, &con_mutex) list_add_tail(&mbox->node, &mbox_cons); diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h index ad01c4082358..80a427c7ca29 100644 --- a/include/linux/mailbox_controller.h +++ b/include/linux/mailbox_controller.h @@ -66,6 +66,7 @@ struct mbox_chan_ops { * no interrupt rises. Ignored if 'txdone_irq' is set. * @txpoll_period: If 'txdone_poll' is in effect, the API polls for * last TX's status after these many millisecs + * @fw_xlate: Controller driver specific mapping of channel via fwnode * @of_xlate: Controller driver specific mapping of channel via DT * @poll_hrt: API private. hrtimer used to poll for TXDONE on all * channels. @@ -79,6 +80,8 @@ struct mbox_controller { bool txdone_irq; bool txdone_poll; unsigned txpoll_period; + struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox, + const struct fwnode_reference_args *sp); struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, const struct of_phandle_args *sp); /* Internal to API */ -- cgit v1.2.3 From 6f01c24f3a7525e953d514367a6d91b39c8f1f6a Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 18 Aug 2025 09:39:02 +0530 Subject: byteorder: Add memcpy_to_le32() and memcpy_from_le32() Add common memcpy APIs for copying u32 array to/from __le32 array. Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Signed-off-by: Anup Patel Reviewed-by: Linus Walleij Acked-by: Jassi Brar Link: https://lore.kernel.org/r/20250818040920.272664-7-apatel@ventanamicro.com Signed-off-by: Paul Walmsley --- include/linux/byteorder/generic.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/linux') diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h index c9a4c96c9943..b3705e8bbe2b 100644 --- a/include/linux/byteorder/generic.h +++ b/include/linux/byteorder/generic.h @@ -173,6 +173,22 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words) } } +static inline void memcpy_from_le32(u32 *dst, const __le32 *src, size_t words) +{ + size_t i; + + for (i = 0; i < words; i++) + dst[i] = le32_to_cpu(src[i]); +} + +static inline void memcpy_to_le32(__le32 *dst, const u32 *src, size_t words) +{ + size_t i; + + for (i = 0; i < words; i++) + dst[i] = cpu_to_le32(src[i]); +} + static inline void be16_add_cpu(__be16 *var, u16 val) { *var = cpu_to_be16(be16_to_cpu(*var) + val); -- cgit v1.2.3 From 5ba9f520f41a33c99fd5d1eb81b5650ed3517b88 Mon Sep 17 00:00:00 2001 From: Rahul Pathak Date: Mon, 18 Aug 2025 09:39:06 +0530 Subject: clk: Add clock driver for the RISC-V RPMI clock service group The RPMI specification defines a clock service group which can be accessed via SBI MPXY extension or dedicated S-mode RPMI transport. Add mailbox client based clock driver for the RISC-V RPMI clock service group. Reviewed-by: Stephen Boyd Reviewed-by: Andy Shevchenko Co-developed-by: Anup Patel Signed-off-by: Anup Patel Signed-off-by: Rahul Pathak Link: https://lore.kernel.org/r/20250818040920.272664-11-apatel@ventanamicro.com [pjw@kernel.org: converted rpmi_clkrate_u64 macro to a function; replaced bare constant with a macro] Signed-off-by: Paul Walmsley --- drivers/clk/Kconfig | 8 + drivers/clk/Makefile | 1 + drivers/clk/clk-rpmi.c | 620 +++++++++++++++++++++++++++++ include/linux/mailbox/riscv-rpmi-message.h | 16 + 4 files changed, 645 insertions(+) create mode 100644 drivers/clk/clk-rpmi.c (limited to 'include/linux') diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 4d56475f94fc..4d51fa589c1e 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -501,6 +501,14 @@ config COMMON_CLK_SP7021 Not all features of the PLL are currently supported by the driver. +config COMMON_CLK_RPMI + tristate "Clock driver based on RISC-V RPMI" + depends on MAILBOX + default RISCV + help + Support for clocks based on the clock service group defined by + the RISC-V platform management interface (RPMI) specification. + source "drivers/clk/actions/Kconfig" source "drivers/clk/analogbits/Kconfig" source "drivers/clk/baikal-t1/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 18ed29cfdc11..b74a1767ca27 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -86,6 +86,7 @@ obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o +obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c new file mode 100644 index 000000000000..921296aafa68 --- /dev/null +++ b/drivers/clk/clk-rpmi.c @@ -0,0 +1,620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V MPXY Based Clock Driver + * + * Copyright (C) 2025 Ventana Micro Systems Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16 +#define RPMI_CLK_NAME_LEN 16 + +#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw) + +enum rpmi_clk_config { + RPMI_CLK_DISABLE = 0, + RPMI_CLK_ENABLE = 1, + RPMI_CLK_CONFIG_MAX_IDX +}; + +#define RPMI_CLK_TYPE_MASK GENMASK(1, 0) +enum rpmi_clk_type { + RPMI_CLK_DISCRETE = 0, + RPMI_CLK_LINEAR = 1, + RPMI_CLK_TYPE_MAX_IDX +}; + +struct rpmi_clk_context { + struct device *dev; + struct mbox_chan *chan; + struct mbox_client client; + u32 max_msg_data_size; +}; + +/* + * rpmi_clk_rates represents the rates format + * as specified by the RPMI specification. + * No other data format (e.g., struct linear_range) + * is required to avoid to and from conversion. + */ +union rpmi_clk_rates { + u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES]; + struct { + u64 min; + u64 max; + u64 step; + } linear; +}; + +struct rpmi_clk { + struct rpmi_clk_context *context; + u32 id; + u32 num_rates; + u32 transition_latency; + enum rpmi_clk_type type; + union rpmi_clk_rates *rates; + char name[RPMI_CLK_NAME_LEN]; + struct clk_hw hw; +}; + +struct rpmi_clk_rate_discrete { + __le32 lo; + __le32 hi; +}; + +struct rpmi_clk_rate_linear { + __le32 min_lo; + __le32 min_hi; + __le32 max_lo; + __le32 max_hi; + __le32 step_lo; + __le32 step_hi; +}; + +struct rpmi_get_num_clocks_rx { + __le32 status; + __le32 num_clocks; +}; + +struct rpmi_get_attrs_tx { + __le32 clkid; +}; + +struct rpmi_get_attrs_rx { + __le32 status; + __le32 flags; + __le32 num_rates; + __le32 transition_latency; + char name[RPMI_CLK_NAME_LEN]; +}; + +struct rpmi_get_supp_rates_tx { + __le32 clkid; + __le32 clk_rate_idx; +}; + +struct rpmi_get_supp_rates_rx { + __le32 status; + __le32 flags; + __le32 remaining; + __le32 returned; + __le32 rates[]; +}; + +struct rpmi_get_rate_tx { + __le32 clkid; +}; + +struct rpmi_get_rate_rx { + __le32 status; + __le32 lo; + __le32 hi; +}; + +struct rpmi_set_rate_tx { + __le32 clkid; + __le32 flags; + __le32 lo; + __le32 hi; +}; + +struct rpmi_set_rate_rx { + __le32 status; +}; + +struct rpmi_set_config_tx { + __le32 clkid; + __le32 config; +}; + +struct rpmi_set_config_rx { + __le32 status; +}; + +static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo) +{ + return (((u64)(__hi) << 32) | (u32)(__lo)); +} + +static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context) +{ + struct rpmi_get_num_clocks_rx rx, *resp; + struct rpmi_mbox_message msg; + int ret; + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS, + NULL, 0, &rx, sizeof(rx)); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return 0; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp || resp->status) + return 0; + + return le32_to_cpu(resp->num_clocks); +} + +static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk) +{ + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_mbox_message msg; + struct rpmi_get_attrs_tx tx; + struct rpmi_get_attrs_rx rx, *resp; + u8 format; + int ret; + + tx.clkid = cpu_to_le32(clkid); + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES, + &tx, sizeof(tx), &rx, sizeof(rx)); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + + rpmi_clk->id = clkid; + rpmi_clk->num_rates = le32_to_cpu(resp->num_rates); + rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency); + strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN); + + format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK; + if (format >= RPMI_CLK_TYPE_MAX_IDX) + return -EINVAL; + + rpmi_clk->type = format; + + return 0; +} + +static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk) +{ + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_clk_rate_discrete *rate_discrete; + struct rpmi_clk_rate_linear *rate_linear; + struct rpmi_get_supp_rates_tx tx; + struct rpmi_get_supp_rates_rx *resp; + struct rpmi_mbox_message msg; + size_t clk_rate_idx; + int ret, rateidx, j; + + tx.clkid = cpu_to_le32(clkid); + tx.clk_rate_idx = 0; + + /* + * Make sure we allocate rx buffer sufficient to be accommodate all + * the rates sent in one RPMI message. + */ + struct rpmi_get_supp_rates_rx *rx __free(kfree) = + kzalloc(context->max_msg_data_size, GFP_KERNEL); + if (!rx) + return -ENOMEM; + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES, + &tx, sizeof(tx), rx, context->max_msg_data_size); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + if (!le32_to_cpu(resp->returned)) + return -EINVAL; + + if (rpmi_clk->type == RPMI_CLK_DISCRETE) { + rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates; + + for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) { + rpmi_clk->rates->discrete[rateidx] = + rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi), + le32_to_cpu(rate_discrete[rateidx].lo)); + } + + /* + * Keep sending the request message until all + * the rates are received. + */ + clk_rate_idx = 0; + while (le32_to_cpu(resp->remaining)) { + clk_rate_idx += le32_to_cpu(resp->returned); + tx.clk_rate_idx = cpu_to_le32(clk_rate_idx); + + rpmi_mbox_init_send_with_response(&msg, + RPMI_CLK_SRV_GET_SUPPORTED_RATES, + &tx, sizeof(tx), + rx, context->max_msg_data_size); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + if (!le32_to_cpu(resp->returned)) + return -EINVAL; + + for (j = 0; j < le32_to_cpu(resp->returned); j++) { + if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned)) + break; + rpmi_clk->rates->discrete[rateidx++] = + rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi), + le32_to_cpu(rate_discrete[j].lo)); + } + } + } else if (rpmi_clk->type == RPMI_CLK_LINEAR) { + rate_linear = (struct rpmi_clk_rate_linear *)resp->rates; + + rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi), + le32_to_cpu(rate_linear->min_lo)); + rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi), + le32_to_cpu(rate_linear->max_lo)); + rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi), + le32_to_cpu(rate_linear->step_lo)); + } + + return 0; +} + +static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw); + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_mbox_message msg; + struct rpmi_get_rate_tx tx; + struct rpmi_get_rate_rx rx, *resp; + int ret; + + tx.clkid = cpu_to_le32(rpmi_clk->id); + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE, + &tx, sizeof(tx), &rx, sizeof(rx)); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + + return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo)); +} + +static int rpmi_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw); + u64 fmin, fmax, ftmp; + + /* + * Keep the requested rate if the clock format + * is of discrete type. Let the platform which + * is actually controlling the clock handle that. + */ + if (rpmi_clk->type == RPMI_CLK_DISCRETE) + return 0; + + fmin = rpmi_clk->rates->linear.min; + fmax = rpmi_clk->rates->linear.max; + + if (req->rate <= fmin) { + req->rate = fmin; + return 0; + } else if (req->rate >= fmax) { + req->rate = fmax; + return 0; + } + + ftmp = req->rate - fmin; + ftmp += rpmi_clk->rates->linear.step - 1; + do_div(ftmp, rpmi_clk->rates->linear.step); + + req->rate = ftmp * rpmi_clk->rates->linear.step + fmin; + + return 0; +} + +static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw); + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_mbox_message msg; + struct rpmi_set_rate_tx tx; + struct rpmi_set_rate_rx rx, *resp; + int ret; + + tx.clkid = cpu_to_le32(rpmi_clk->id); + tx.lo = cpu_to_le32(lower_32_bits(rate)); + tx.hi = cpu_to_le32(upper_32_bits(rate)); + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE, + &tx, sizeof(tx), &rx, sizeof(rx)); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + + return 0; +} + +static int rpmi_clk_enable(struct clk_hw *hw) +{ + struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw); + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_mbox_message msg; + struct rpmi_set_config_tx tx; + struct rpmi_set_config_rx rx, *resp; + int ret; + + tx.config = cpu_to_le32(RPMI_CLK_ENABLE); + tx.clkid = cpu_to_le32(rpmi_clk->id); + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG, + &tx, sizeof(tx), &rx, sizeof(rx)); + + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return ret; + + resp = rpmi_mbox_get_msg_response(&msg); + if (!resp) + return -EINVAL; + if (resp->status) + return rpmi_to_linux_error(le32_to_cpu(resp->status)); + + return 0; +} + +static void rpmi_clk_disable(struct clk_hw *hw) +{ + struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw); + struct rpmi_clk_context *context = rpmi_clk->context; + struct rpmi_mbox_message msg; + struct rpmi_set_config_tx tx; + struct rpmi_set_config_rx rx; + + tx.config = cpu_to_le32(RPMI_CLK_DISABLE); + tx.clkid = cpu_to_le32(rpmi_clk->id); + + rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG, + &tx, sizeof(tx), &rx, sizeof(rx)); + + rpmi_mbox_send_message(context->chan, &msg); +} + +static const struct clk_ops rpmi_clk_ops = { + .recalc_rate = rpmi_clk_recalc_rate, + .determine_rate = rpmi_clk_determine_rate, + .set_rate = rpmi_clk_set_rate, + .prepare = rpmi_clk_enable, + .unprepare = rpmi_clk_disable, +}; + +static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid) +{ + struct device *dev = context->dev; + unsigned long min_rate, max_rate; + union rpmi_clk_rates *rates; + struct rpmi_clk *rpmi_clk; + struct clk_init_data init = {}; + struct clk_hw *clk_hw; + int ret; + + rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL); + if (!rates) + return ERR_PTR(-ENOMEM); + + rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL); + if (!rpmi_clk) + return ERR_PTR(-ENOMEM); + + rpmi_clk->context = context; + rpmi_clk->rates = rates; + + ret = rpmi_clk_get_attrs(clkid, rpmi_clk); + if (ret) + return dev_err_ptr_probe(dev, ret, + "Failed to get clk-%u attributes\n", + clkid); + + ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk); + if (ret) + return dev_err_ptr_probe(dev, ret, + "Get supported rates failed for clk-%u\n", + clkid); + + init.flags = CLK_GET_RATE_NOCACHE; + init.num_parents = 0; + init.ops = &rpmi_clk_ops; + init.name = rpmi_clk->name; + clk_hw = &rpmi_clk->hw; + clk_hw->init = &init; + + ret = devm_clk_hw_register(dev, clk_hw); + if (ret) + return dev_err_ptr_probe(dev, ret, + "Unable to register clk-%u\n", + clkid); + + if (rpmi_clk->type == RPMI_CLK_DISCRETE) { + min_rate = rpmi_clk->rates->discrete[0]; + max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1]; + } else { + min_rate = rpmi_clk->rates->linear.min; + max_rate = rpmi_clk->rates->linear.max; + } + + clk_hw_set_rate_range(clk_hw, min_rate, max_rate); + + return clk_hw; +} + +static void rpmi_clk_mbox_chan_release(void *data) +{ + struct mbox_chan *chan = data; + + mbox_free_channel(chan); +} + +static int rpmi_clk_probe(struct platform_device *pdev) +{ + int ret; + unsigned int num_clocks, i; + struct clk_hw_onecell_data *clk_data; + struct rpmi_clk_context *context; + struct rpmi_mbox_message msg; + struct clk_hw *hw_ptr; + struct device *dev = &pdev->dev; + + context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + context->dev = dev; + platform_set_drvdata(pdev, context); + + context->client.dev = context->dev; + context->client.rx_callback = NULL; + context->client.tx_block = false; + context->client.knows_txdone = true; + context->client.tx_tout = 0; + + context->chan = mbox_request_channel(&context->client, 0); + if (IS_ERR(context->chan)) + return PTR_ERR(context->chan); + + ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan); + if (ret) + return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n"); + + rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION); + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return dev_err_probe(dev, ret, "Failed to get spec version\n"); + if (msg.attr.value < RPMI_MKVER(1, 0)) { + return dev_err_probe(dev, -EINVAL, + "msg protocol version mismatch, expected 0x%x, found 0x%x\n", + RPMI_MKVER(1, 0), msg.attr.value); + } + + rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID); + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return dev_err_probe(dev, ret, "Failed to get service group ID\n"); + if (msg.attr.value != RPMI_SRVGRP_CLOCK) { + return dev_err_probe(dev, -EINVAL, + "service group match failed, expected 0x%x, found 0x%x\n", + RPMI_SRVGRP_CLOCK, msg.attr.value); + } + + rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION); + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return dev_err_probe(dev, ret, "Failed to get service group version\n"); + if (msg.attr.value < RPMI_MKVER(1, 0)) { + return dev_err_probe(dev, -EINVAL, + "service group version failed, expected 0x%x, found 0x%x\n", + RPMI_MKVER(1, 0), msg.attr.value); + } + + rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE); + ret = rpmi_mbox_send_message(context->chan, &msg); + if (ret) + return dev_err_probe(dev, ret, "Failed to get max message data size\n"); + + context->max_msg_data_size = msg.attr.value; + num_clocks = rpmi_clk_get_num_clocks(context); + if (!num_clocks) + return dev_err_probe(dev, -ENODEV, "No clocks found\n"); + + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks), + GFP_KERNEL); + if (!clk_data) + return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n"); + clk_data->num = num_clocks; + + for (i = 0; i < clk_data->num; i++) { + hw_ptr = rpmi_clk_enumerate(context, i); + if (IS_ERR(hw_ptr)) { + return dev_err_probe(dev, PTR_ERR(hw_ptr), + "Failed to register clk-%d\n", i); + } + clk_data->hws[i] = hw_ptr; + } + + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data); + if (ret) + return dev_err_probe(dev, ret, "Failed to register clock HW provider\n"); + + return 0; +} + +static const struct of_device_id rpmi_clk_of_match[] = { + { .compatible = "riscv,rpmi-clock" }, + { } +}; +MODULE_DEVICE_TABLE(of, rpmi_clk_of_match); + +static struct platform_driver rpmi_clk_driver = { + .driver = { + .name = "riscv-rpmi-clock", + .of_match_table = rpmi_clk_of_match, + }, + .probe = rpmi_clk_probe, +}; +module_platform_driver(rpmi_clk_driver); + +MODULE_AUTHOR("Rahul Pathak "); +MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h index c3a98fc12c0a..8176d33747fe 100644 --- a/include/linux/mailbox/riscv-rpmi-message.h +++ b/include/linux/mailbox/riscv-rpmi-message.h @@ -90,6 +90,22 @@ static inline int rpmi_to_linux_error(int rpmi_error) } } +/* RPMI service group IDs */ +#define RPMI_SRVGRP_CLOCK 0x00008 + +/* RPMI clock service IDs */ +enum rpmi_clock_service_id { + RPMI_CLK_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_CLK_SRV_GET_NUM_CLOCKS = 0x02, + RPMI_CLK_SRV_GET_ATTRIBUTES = 0x03, + RPMI_CLK_SRV_GET_SUPPORTED_RATES = 0x04, + RPMI_CLK_SRV_SET_CONFIG = 0x05, + RPMI_CLK_SRV_GET_CONFIG = 0x06, + RPMI_CLK_SRV_SET_RATE = 0x07, + RPMI_CLK_SRV_GET_RATE = 0x08, + RPMI_CLK_SRV_ID_MAX_COUNT +}; + /* RPMI Linux mailbox attribute IDs */ enum rpmi_mbox_attribute_id { RPMI_MBOX_ATTR_SPEC_VERSION, -- cgit v1.2.3 From aa43953e862c031ff66e44353c88beb7a449e80d Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 18 Aug 2025 09:39:09 +0530 Subject: irqchip: Add driver for the RPMI system MSI service group The RPMI specification defines a system MSI service group which allows application processors to receive MSIs upon system events such as graceful shutdown/reboot request, CPU hotplug event, memory hotplug event, etc. Add an irqchip driver for the RISC-V RPMI system MSI service group to directly receive system MSIs in Linux kernel. Reviewed-by: Thomas Gleixner Signed-off-by: Anup Patel Link: https://lore.kernel.org/r/20250818040920.272664-14-apatel@ventanamicro.com Signed-off-by: Paul Walmsley --- drivers/irqchip/Kconfig | 7 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-rpmi-sysmsi.c | 287 +++++++++++++++++++++++++++++ include/linux/mailbox/riscv-rpmi-message.h | 13 ++ 4 files changed, 308 insertions(+) create mode 100644 drivers/irqchip/irq-riscv-rpmi-sysmsi.c (limited to 'include/linux') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 6d12c6ab9ea4..e047ba36df16 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -634,6 +634,13 @@ config RISCV_IMSIC select GENERIC_MSI_IRQ select IRQ_MSI_LIB +config RISCV_RPMI_SYSMSI + bool + depends on MAILBOX + select IRQ_DOMAIN_HIERARCHY + select GENERIC_MSI_IRQ + default RISCV + config SIFIVE_PLIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 93e3ced023bb..3de083f5484c 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o +obj-$(CONFIG_RISCV_RPMI_SYSMSI) += irq-riscv-rpmi-sysmsi.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o obj-$(CONFIG_ACLINT_SSWI) += irq-aclint-sswi.o diff --git a/drivers/irqchip/irq-riscv-rpmi-sysmsi.c b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c new file mode 100644 index 000000000000..92e8847dfccc --- /dev/null +++ b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Ventana Micro Systems Inc. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct rpmi_sysmsi_get_attrs_rx { + __le32 status; + __le32 sys_num_msi; + __le32 flag0; + __le32 flag1; +}; + +#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV BIT(0) + +struct rpmi_sysmsi_set_msi_state_tx { + __le32 sys_msi_index; + __le32 sys_msi_state; +}; + +struct rpmi_sysmsi_set_msi_state_rx { + __le32 status; +}; + +#define RPMI_SYSMSI_MSI_STATE_ENABLE BIT(0) +#define RPMI_SYSMSI_MSI_STATE_PENDING BIT(1) + +struct rpmi_sysmsi_set_msi_target_tx { + __le32 sys_msi_index; + __le32 sys_msi_address_low; + __le32 sys_msi_address_high; + __le32 sys_msi_data; +}; + +struct rpmi_sysmsi_set_msi_target_rx { + __le32 status; +}; + +struct rpmi_sysmsi_priv { + struct device *dev; + struct mbox_client client; + struct mbox_chan *chan; + u32 nr_irqs; + u32 gsi_base; +}; + +static int rpmi_sysmsi_get_num_msi(struct rpmi_sysmsi_priv *priv) +{ + struct rpmi_sysmsi_get_attrs_rx rx; + struct rpmi_mbox_message msg; + int ret; + + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_GET_ATTRIBUTES, + NULL, 0, &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return le32_to_cpu(rx.sys_num_msi); +} + +static int rpmi_sysmsi_set_msi_state(struct rpmi_sysmsi_priv *priv, + u32 sys_msi_index, u32 sys_msi_state) +{ + struct rpmi_sysmsi_set_msi_state_tx tx; + struct rpmi_sysmsi_set_msi_state_rx rx; + struct rpmi_mbox_message msg; + int ret; + + tx.sys_msi_index = cpu_to_le32(sys_msi_index); + tx.sys_msi_state = cpu_to_le32(sys_msi_state); + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_STATE, + &tx, sizeof(tx), &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return 0; +} + +static int rpmi_sysmsi_set_msi_target(struct rpmi_sysmsi_priv *priv, + u32 sys_msi_index, struct msi_msg *m) +{ + struct rpmi_sysmsi_set_msi_target_tx tx; + struct rpmi_sysmsi_set_msi_target_rx rx; + struct rpmi_mbox_message msg; + int ret; + + tx.sys_msi_index = cpu_to_le32(sys_msi_index); + tx.sys_msi_address_low = cpu_to_le32(m->address_lo); + tx.sys_msi_address_high = cpu_to_le32(m->address_hi); + tx.sys_msi_data = cpu_to_le32(m->data); + rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_TARGET, + &tx, sizeof(tx), &rx, sizeof(rx)); + ret = rpmi_mbox_send_message(priv->chan, &msg); + if (ret) + return ret; + if (rx.status) + return rpmi_to_linux_error(le32_to_cpu(rx.status)); + + return 0; +} + +static void rpmi_sysmsi_irq_mask(struct irq_data *d) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + ret = rpmi_sysmsi_set_msi_state(priv, hwirq, 0); + if (ret) + dev_warn(priv->dev, "Failed to mask hwirq %lu (error %d)\n", hwirq, ret); + irq_chip_mask_parent(d); +} + +static void rpmi_sysmsi_irq_unmask(struct irq_data *d) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + irq_chip_unmask_parent(d); + ret = rpmi_sysmsi_set_msi_state(priv, hwirq, RPMI_SYSMSI_MSI_STATE_ENABLE); + if (ret) + dev_warn(priv->dev, "Failed to unmask hwirq %lu (error %d)\n", hwirq, ret); +} + +static void rpmi_sysmsi_write_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + int ret; + + /* For zeroed MSI, do nothing as of now */ + if (!msg->address_hi && !msg->address_lo && !msg->data) + return; + + ret = rpmi_sysmsi_set_msi_target(priv, hwirq, msg); + if (ret) + dev_warn(priv->dev, "Failed to set target for hwirq %lu (error %d)\n", hwirq, ret); +} + +static void rpmi_sysmsi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = desc->data.icookie.value; +} + +static int rpmi_sysmsi_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = d->host_data; + struct rpmi_sysmsi_priv *priv = info->data; + + if (WARN_ON(fwspec->param_count < 1)) + return -EINVAL; + + /* For DT, gsi_base is always zero. */ + *hwirq = fwspec->param[0] - priv->gsi_base; + *type = IRQ_TYPE_NONE; + return 0; +} + +static const struct msi_domain_template rpmi_sysmsi_template = { + .chip = { + .name = "RPMI-SYSMSI", + .irq_mask = rpmi_sysmsi_irq_mask, + .irq_unmask = rpmi_sysmsi_irq_unmask, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .irq_write_msi_msg = rpmi_sysmsi_write_msg, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, + }, + + .ops = { + .set_desc = rpmi_sysmsi_set_desc, + .msi_translate = rpmi_sysmsi_translate, + }, + + .info = { + .bus_token = DOMAIN_BUS_WIRED_TO_MSI, + .flags = MSI_FLAG_USE_DEV_FWNODE, + .handler = handle_simple_irq, + .handler_name = "simple", + }, +}; + +static int rpmi_sysmsi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rpmi_sysmsi_priv *priv; + int rc; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = dev; + + /* Setup mailbox client */ + priv->client.dev = priv->dev; + priv->client.rx_callback = NULL; + priv->client.tx_block = false; + priv->client.knows_txdone = true; + priv->client.tx_tout = 0; + + /* Request mailbox channel */ + priv->chan = mbox_request_channel(&priv->client, 0); + if (IS_ERR(priv->chan)) + return PTR_ERR(priv->chan); + + /* Get number of system MSIs */ + rc = rpmi_sysmsi_get_num_msi(priv); + if (rc < 1) { + mbox_free_channel(priv->chan); + if (rc) + return dev_err_probe(dev, rc, "Failed to get number of system MSIs\n"); + else + return dev_err_probe(dev, -ENODEV, "No system MSIs found\n"); + } + priv->nr_irqs = rc; + + /* + * The device MSI domain for platform devices on RISC-V architecture + * is only available after the MSI controller driver is probed so, + * explicitly configure here. + */ + if (!dev_get_msi_domain(dev)) { + /* + * The device MSI domain for OF devices is only set at the + * time of populating/creating OF device. If the device MSI + * domain is discovered later after the OF device is created + * then we need to set it explicitly before using any platform + * MSI functions. + */ + if (dev_of_node(dev)) + of_msi_configure(dev, dev_of_node(dev)); + + if (!dev_get_msi_domain(dev)) { + mbox_free_channel(priv->chan); + return -EPROBE_DEFER; + } + } + + if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, + &rpmi_sysmsi_template, + priv->nr_irqs, priv, priv)) { + mbox_free_channel(priv->chan); + return dev_err_probe(dev, -ENOMEM, "failed to create MSI irq domain\n"); + } + + dev_info(dev, "%u system MSIs registered\n", priv->nr_irqs); + return 0; +} + +static const struct of_device_id rpmi_sysmsi_match[] = { + { .compatible = "riscv,rpmi-system-msi" }, + {} +}; + +static struct platform_driver rpmi_sysmsi_driver = { + .driver = { + .name = "rpmi-sysmsi", + .of_match_table = rpmi_sysmsi_match, + }, + .probe = rpmi_sysmsi_probe, +}; +builtin_platform_driver(rpmi_sysmsi_driver); diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h index 8176d33747fe..e135c6564d0c 100644 --- a/include/linux/mailbox/riscv-rpmi-message.h +++ b/include/linux/mailbox/riscv-rpmi-message.h @@ -91,6 +91,7 @@ static inline int rpmi_to_linux_error(int rpmi_error) } /* RPMI service group IDs */ +#define RPMI_SRVGRP_SYSTEM_MSI 0x00002 #define RPMI_SRVGRP_CLOCK 0x00008 /* RPMI clock service IDs */ @@ -106,6 +107,18 @@ enum rpmi_clock_service_id { RPMI_CLK_SRV_ID_MAX_COUNT }; +/* RPMI system MSI service IDs */ +enum rpmi_sysmsi_service_id { + RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01, + RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x02, + RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x03, + RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x04, + RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x05, + RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x06, + RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x07, + RPMI_SYSMSI_SRV_ID_MAX_COUNT +}; + /* RPMI Linux mailbox attribute IDs */ enum rpmi_mbox_attribute_id { RPMI_MBOX_ATTR_SPEC_VERSION, -- cgit v1.2.3