diff options
Diffstat (limited to 'drivers/firmware')
-rw-r--r-- | drivers/firmware/arm_scmi/perf.c | 430 | ||||
-rw-r--r-- | drivers/firmware/imx/imx-scu-irq.c | 118 | ||||
-rw-r--r-- | drivers/firmware/imx/imx-scu-soc.c | 20 | ||||
-rw-r--r-- | drivers/firmware/imx/imx-scu.c | 9 | ||||
-rw-r--r-- | drivers/firmware/meson/meson_sm.c | 2 | ||||
-rw-r--r-- | drivers/firmware/qcom_scm.c | 156 | ||||
-rw-r--r-- | drivers/firmware/ti_sci.c | 49 |
7 files changed, 533 insertions, 251 deletions
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index ecf5c4de851b..c0cd556fbaae 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -2,19 +2,22 @@ /* * System Control and Management Interface (SCMI) Performance Protocol * - * Copyright (C) 2018-2022 ARM Ltd. + * Copyright (C) 2018-2023 ARM Ltd. */ #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt #include <linux/bits.h> -#include <linux/of.h> +#include <linux/hashtable.h> #include <linux/io.h> +#include <linux/log2.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scmi_protocol.h> #include <linux/sort.h> +#include <linux/xarray.h> #include <trace/events/scmi.h> @@ -46,6 +49,9 @@ struct scmi_opp { u32 perf; u32 power; u32 trans_latency_us; + u32 indicative_freq; + u32 level_index; + struct hlist_node hash; }; struct scmi_msg_resp_perf_attributes { @@ -66,6 +72,7 @@ struct scmi_msg_resp_perf_domain_attributes { #define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28)) #define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27)) #define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26)) +#define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25)) __le32 rate_limit_us; __le32 sustained_freq_khz; __le32 sustained_perf_level; @@ -122,12 +129,27 @@ struct scmi_msg_resp_perf_describe_levels { } opp[]; }; +struct scmi_msg_resp_perf_describe_levels_v4 { + __le16 num_returned; + __le16 num_remaining; + struct { + __le32 perf_val; + __le32 power; + __le16 transition_latency_us; + __le16 reserved; + __le32 indicative_freq; + __le32 level_index; + } opp[]; +}; + struct perf_dom_info { + u32 id; bool set_limits; bool set_perf; bool perf_limit_notify; bool perf_level_notify; bool perf_fastchannels; + bool level_indexing_mode; u32 opp_count; u32 sustained_freq_khz; u32 sustained_perf_level; @@ -135,11 +157,26 @@ struct perf_dom_info { char name[SCMI_MAX_STR_SIZE]; struct scmi_opp opp[MAX_OPPS]; struct scmi_fc_info *fc_info; + struct xarray opps_by_idx; + struct xarray opps_by_lvl; + DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS)); }; +#define LOOKUP_BY_FREQ(__htp, __freq) \ +({ \ + /* u32 cast is needed to pick right hash func */ \ + u32 f_ = (u32)(__freq); \ + struct scmi_opp *_opp; \ + \ + hash_for_each_possible((__htp), _opp, hash, f_) \ + if (_opp->indicative_freq == f_) \ + break; \ + _opp; \ +}) + struct scmi_perf_info { u32 version; - int num_domains; + u16 num_domains; enum scmi_power_scale power_scale; u64 stats_addr; u32 stats_size; @@ -186,9 +223,20 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph, return ret; } +static void scmi_perf_xa_destroy(void *data) +{ + int domain; + struct scmi_perf_info *pinfo = data; + + for (domain = 0; domain < pinfo->num_domains; domain++) { + xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx)); + xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl)); + } +} + static int scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, - u32 domain, struct perf_dom_info *dom_info, + struct perf_dom_info *dom_info, u32 version) { int ret; @@ -197,11 +245,11 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, struct scmi_msg_resp_perf_domain_attributes *attr; ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES, - sizeof(domain), sizeof(*attr), &t); + sizeof(dom_info->id), sizeof(*attr), &t); if (ret) return ret; - put_unaligned_le32(domain, t->tx.buf); + put_unaligned_le32(dom_info->id, t->tx.buf); attr = t->rx.buf; ret = ph->xops->do_xfer(ph, t); @@ -213,6 +261,9 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags); dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags); dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags); + if (PROTOCOL_REV_MAJOR(version) >= 0x4) + dom_info->level_indexing_mode = + SUPPORTS_LEVEL_INDEXING(flags); dom_info->sustained_freq_khz = le32_to_cpu(attr->sustained_freq_khz); dom_info->sustained_perf_level = @@ -236,8 +287,15 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, */ if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 && SUPPORTS_EXTENDED_NAMES(flags)) - ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain, - dom_info->name, SCMI_MAX_STR_SIZE); + ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, + dom_info->id, dom_info->name, + SCMI_MAX_STR_SIZE); + + if (dom_info->level_indexing_mode) { + xa_init(&dom_info->opps_by_idx); + xa_init(&dom_info->opps_by_lvl); + hash_init(dom_info->opps_by_freq); + } return ret; } @@ -250,7 +308,7 @@ static int opp_cmp_func(const void *opp1, const void *opp2) } struct scmi_perf_ipriv { - u32 domain; + u32 version; struct perf_dom_info *perf_dom; }; @@ -261,7 +319,7 @@ static void iter_perf_levels_prepare_message(void *message, struct scmi_msg_perf_describe_levels *msg = message; const struct scmi_perf_ipriv *p = priv; - msg->domain = cpu_to_le32(p->domain); + msg->domain = cpu_to_le32(p->perf_dom->id); /* Set the number of OPPs to be skipped/already read */ msg->level_index = cpu_to_le32(desc_index); } @@ -277,31 +335,63 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st, return 0; } +static inline void +process_response_opp(struct scmi_opp *opp, unsigned int loop_idx, + const struct scmi_msg_resp_perf_describe_levels *r) +{ + opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); + opp->power = le32_to_cpu(r->opp[loop_idx].power); + opp->trans_latency_us = + le16_to_cpu(r->opp[loop_idx].transition_latency_us); +} + +static inline void +process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp, + unsigned int loop_idx, + const struct scmi_msg_resp_perf_describe_levels_v4 *r) +{ + opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val); + opp->power = le32_to_cpu(r->opp[loop_idx].power); + opp->trans_latency_us = + le16_to_cpu(r->opp[loop_idx].transition_latency_us); + + /* Note that PERF v4 reports always five 32-bit words */ + opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq); + if (dom->level_indexing_mode) { + opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index); + + xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL); + xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL); + hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq); + } +} + static int iter_perf_levels_process_response(const struct scmi_protocol_handle *ph, const void *response, struct scmi_iterator_state *st, void *priv) { struct scmi_opp *opp; - const struct scmi_msg_resp_perf_describe_levels *r = response; struct scmi_perf_ipriv *p = priv; opp = &p->perf_dom->opp[st->desc_index + st->loop_idx]; - opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val); - opp->power = le32_to_cpu(r->opp[st->loop_idx].power); - opp->trans_latency_us = - le16_to_cpu(r->opp[st->loop_idx].transition_latency_us); + if (PROTOCOL_REV_MAJOR(p->version) <= 0x3) + process_response_opp(opp, st->loop_idx, response); + else + process_response_opp_v4(p->perf_dom, opp, st->loop_idx, + response); p->perf_dom->opp_count++; - dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n", - opp->perf, opp->power, opp->trans_latency_us); + dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n", + opp->perf, opp->power, opp->trans_latency_us, + opp->indicative_freq, opp->level_index); return 0; } static int -scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, - struct perf_dom_info *perf_dom) +scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, + struct perf_dom_info *perf_dom, u32 version) { int ret; void *iter; @@ -311,7 +401,7 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, .process_response = iter_perf_levels_process_response, }; struct scmi_perf_ipriv ppriv = { - .domain = domain, + .version = version, .perf_dom = perf_dom, }; @@ -333,8 +423,8 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, return ret; } -static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, - u32 domain, u32 max_perf, u32 min_perf) +static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 max_perf, u32 min_perf) { int ret; struct scmi_xfer *t; @@ -356,31 +446,73 @@ static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, return ret; } -static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, - u32 domain, u32 max_perf, u32 min_perf) +static inline struct perf_dom_info * +scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain) { struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; - if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) - return -EINVAL; + if (domain >= pi->num_domains) + return ERR_PTR(-EINVAL); + return pi->dom_info + domain; +} + +static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph, + struct perf_dom_info *dom, u32 max_perf, + u32 min_perf) +{ if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, - domain, min_perf, max_perf); + dom->id, min_perf, max_perf); iowrite32(max_perf, fci->set_addr); iowrite32(min_perf, fci->set_addr + 4); ph->hops->fastchannel_db_ring(fci->set_db); return 0; } - return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf); + return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf); } -static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph, - u32 domain, u32 *max_perf, u32 *min_perf) +static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 max_perf, u32 min_perf) +{ + struct scmi_perf_info *pi = ph->get_priv(ph); + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) + return -EINVAL; + + if (dom->level_indexing_mode) { + struct scmi_opp *opp; + + if (min_perf) { + opp = xa_load(&dom->opps_by_lvl, min_perf); + if (!opp) + return -EIO; + + min_perf = opp->level_index; + } + + if (max_perf) { + opp = xa_load(&dom->opps_by_lvl, max_perf); + if (!opp) + return -EIO; + + max_perf = opp->level_index; + } + } + + return __scmi_perf_limits_set(ph, dom, max_perf, min_perf); +} + +static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *max_perf, u32 *min_perf) { int ret; struct scmi_xfer *t; @@ -405,27 +537,58 @@ static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph, return ret; } -static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, - u32 domain, u32 *max_perf, u32 *min_perf) +static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph, + struct perf_dom_info *dom, u32 *max_perf, + u32 *min_perf) { - struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; *max_perf = ioread32(fci->get_addr); *min_perf = ioread32(fci->get_addr + 4); trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, - domain, *min_perf, *max_perf); + dom->id, *min_perf, *max_perf); return 0; } - return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf); + return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf); } -static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph, - u32 domain, u32 level, bool poll) +static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *max_perf, u32 *min_perf) +{ + int ret; + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf); + if (ret) + return ret; + + if (dom->level_indexing_mode) { + struct scmi_opp *opp; + + opp = xa_load(&dom->opps_by_idx, *min_perf); + if (!opp) + return -EIO; + + *min_perf = opp->perf; + + opp = xa_load(&dom->opps_by_idx, *max_perf); + if (!opp) + return -EIO; + + *max_perf = opp->perf; + } + + return 0; +} + +static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 level, bool poll) { int ret; struct scmi_xfer *t; @@ -446,27 +609,47 @@ static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph, return ret; } -static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, - u32 domain, u32 level, bool poll) +static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph, + struct perf_dom_info *dom, u32 level, + bool poll) { - struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, - domain, level, 0); + dom->id, level, 0); iowrite32(level, fci->set_addr); ph->hops->fastchannel_db_ring(fci->set_db); return 0; } - return scmi_perf_mb_level_set(ph, domain, level, poll); + return scmi_perf_msg_level_set(ph, dom->id, level, poll); } -static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph, - u32 domain, u32 *level, bool poll) +static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, + u32 domain, u32 level, bool poll) +{ + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + if (dom->level_indexing_mode) { + struct scmi_opp *opp; + + opp = xa_load(&dom->opps_by_lvl, level); + if (!opp) + return -EIO; + + level = opp->level_index; + } + + return __scmi_perf_level_set(ph, dom, level, poll); +} + +static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *level, bool poll) { int ret; struct scmi_xfer *t; @@ -487,20 +670,45 @@ static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph, return ret; } -static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, - u32 domain, u32 *level, bool poll) +static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph, + struct perf_dom_info *dom, u32 *level, + bool poll) { - struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, - domain, *level, 0); + dom->id, *level, 0); return 0; } - return scmi_perf_mb_level_get(ph, domain, level, poll); + return scmi_perf_msg_level_get(ph, dom->id, level, poll); +} + +static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, + u32 domain, u32 *level, bool poll) +{ + int ret; + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + ret = __scmi_perf_level_get(ph, dom, level, poll); + if (ret) + return ret; + + if (dom->level_indexing_mode) { + struct scmi_opp *opp; + + opp = xa_load(&dom->opps_by_idx, *level); + if (!opp) + return -EIO; + + *level = opp->perf; + } + + return 0; } static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, @@ -574,27 +782,37 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, unsigned long freq; struct scmi_opp *opp; struct perf_dom_info *dom; - struct scmi_perf_info *pi = ph->get_priv(ph); domain = scmi_dev_domain_id(dev); if (domain < 0) - return domain; + return -EINVAL; - dom = pi->dom_info + domain; + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { - freq = opp->perf * dom->mult_factor; + if (!dom->level_indexing_mode) + freq = opp->perf * dom->mult_factor; + else + freq = opp->indicative_freq * 1000; ret = dev_pm_opp_add(dev, freq, 0); if (ret) { dev_warn(dev, "failed to add opp %luHz\n", freq); while (idx-- > 0) { - freq = (--opp)->perf * dom->mult_factor; + if (!dom->level_indexing_mode) + freq = (--opp)->perf * dom->mult_factor; + else + freq = (--opp)->indicative_freq * 1000; dev_pm_opp_remove(dev, freq); } return ret; } + + dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n", + domain, dom->name, idx, freq); } return 0; } @@ -603,14 +821,17 @@ static int scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph, struct device *dev) { + int domain; struct perf_dom_info *dom; - struct scmi_perf_info *pi = ph->get_priv(ph); - int domain = scmi_dev_domain_id(dev); + domain = scmi_dev_domain_id(dev); if (domain < 0) - return domain; + return -EINVAL; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); - dom = pi->dom_info + domain; /* uS to nS */ return dom->opp[dom->opp_count - 1].trans_latency_us * 1000; } @@ -618,10 +839,26 @@ scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph, static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain, unsigned long freq, bool poll) { - struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; + unsigned int level; + struct perf_dom_info *dom; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); - return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll); + if (!dom->level_indexing_mode) { + level = freq / dom->mult_factor; + } else { + struct scmi_opp *opp; + + opp = LOOKUP_BY_FREQ(dom->opps_by_freq, freq / 1000); + if (!opp) + return -EIO; + + level = opp->level_index; + } + + return __scmi_perf_level_set(ph, dom, level, poll); } static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain, @@ -629,12 +866,27 @@ static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain, { int ret; u32 level; - struct scmi_perf_info *pi = ph->get_priv(ph); - struct perf_dom_info *dom = pi->dom_info + domain; + struct perf_dom_info *dom; - ret = scmi_perf_level_get(ph, domain, &level, poll); - if (!ret) + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); + + ret = __scmi_perf_level_get(ph, dom, &level, poll); + if (ret) + return ret; + + if (!dom->level_indexing_mode) { *freq = level * dom->mult_factor; + } else { + struct scmi_opp *opp; + + opp = xa_load(&dom->opps_by_idx, level); + if (!opp) + return -EIO; + + *freq = opp->indicative_freq * 1000; + } return ret; } @@ -643,18 +895,21 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph, u32 domain, unsigned long *freq, unsigned long *power) { - struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom; unsigned long opp_freq; int idx, ret = -EINVAL; struct scmi_opp *opp; - dom = pi->dom_info + domain; - if (!dom) - return -EIO; + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return PTR_ERR(dom); for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { - opp_freq = opp->perf * dom->mult_factor; + if (!dom->level_indexing_mode) + opp_freq = opp->perf * dom->mult_factor; + else + opp_freq = opp->indicative_freq * 1000; + if (opp_freq < *freq) continue; @@ -670,10 +925,16 @@ static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph, static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, struct device *dev) { + int domain; struct perf_dom_info *dom; - struct scmi_perf_info *pi = ph->get_priv(ph); - dom = pi->dom_info + scmi_dev_domain_id(dev); + domain = scmi_dev_domain_id(dev); + if (domain < 0) + return false; + + dom = scmi_perf_domain_lookup(ph, domain); + if (IS_ERR(dom)) + return false; return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; } @@ -831,13 +1092,18 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph) for (domain = 0; domain < pinfo->num_domains; domain++) { struct perf_dom_info *dom = pinfo->dom_info + domain; - scmi_perf_domain_attributes_get(ph, domain, dom, version); - scmi_perf_describe_levels_get(ph, domain, dom); + dom->id = domain; + scmi_perf_domain_attributes_get(ph, dom, version); + scmi_perf_describe_levels_get(ph, dom, version); if (dom->perf_fastchannels) - scmi_perf_domain_init_fc(ph, domain, &dom->fc_info); + scmi_perf_domain_init_fc(ph, dom->id, &dom->fc_info); } + ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo); + if (ret) + return ret; + pinfo->version = version; return ph->set_priv(ph, pinfo); diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c index d9dcc20945c6..7cc0dec04587 100644 --- a/drivers/firmware/imx/imx-scu-irq.c +++ b/drivers/firmware/imx/imx-scu-irq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Copyright 2019 NXP + * Copyright 2019,2023 NXP * * Implementation of the SCU IRQ functions using MU. * @@ -9,12 +9,14 @@ #include <dt-bindings/firmware/imx/rsrc.h> #include <linux/firmware/imx/ipc.h> #include <linux/firmware/imx/sci.h> +#include <linux/kobject.h> #include <linux/mailbox_client.h> #include <linux/suspend.h> +#include <linux/sysfs.h> #define IMX_SC_IRQ_FUNC_ENABLE 1 #define IMX_SC_IRQ_FUNC_STATUS 2 -#define IMX_SC_IRQ_NUM_GROUP 4 +#define IMX_SC_IRQ_NUM_GROUP 9 static u32 mu_resource_id; @@ -40,63 +42,102 @@ struct imx_sc_msg_irq_enable { u8 enable; } __packed; +struct scu_wakeup { + u32 mask; + u32 wakeup_src; + bool valid; +}; + +/* Sysfs functions */ +static struct kobject *wakeup_obj; +static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); +static struct kobj_attribute wakeup_source_attr = + __ATTR(wakeup_src, 0660, wakeup_source_show, NULL); + +static struct scu_wakeup scu_irq_wakeup[IMX_SC_IRQ_NUM_GROUP]; + static struct imx_sc_ipc *imx_sc_irq_ipc_handle; static struct work_struct imx_sc_irq_work; -static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain); +static BLOCKING_NOTIFIER_HEAD(imx_scu_irq_notifier_chain); int imx_scu_irq_register_notifier(struct notifier_block *nb) { - return atomic_notifier_chain_register( + return blocking_notifier_chain_register( &imx_scu_irq_notifier_chain, nb); } EXPORT_SYMBOL(imx_scu_irq_register_notifier); int imx_scu_irq_unregister_notifier(struct notifier_block *nb) { - return atomic_notifier_chain_unregister( + return blocking_notifier_chain_unregister( &imx_scu_irq_notifier_chain, nb); } EXPORT_SYMBOL(imx_scu_irq_unregister_notifier); static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group) { - return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain, + return blocking_notifier_call_chain(&imx_scu_irq_notifier_chain, status, (void *)group); } static void imx_scu_irq_work_handler(struct work_struct *work) { - struct imx_sc_msg_irq_get_status msg; - struct imx_sc_rpc_msg *hdr = &msg.hdr; u32 irq_status; int ret; u8 i; for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) { - hdr->ver = IMX_SC_RPC_VERSION; - hdr->svc = IMX_SC_RPC_SVC_IRQ; - hdr->func = IMX_SC_IRQ_FUNC_STATUS; - hdr->size = 2; - - msg.data.req.resource = mu_resource_id; - msg.data.req.group = i; + if (scu_irq_wakeup[i].mask) { + scu_irq_wakeup[i].valid = false; + scu_irq_wakeup[i].wakeup_src = 0; + } - ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true); + ret = imx_scu_irq_get_status(i, &irq_status); if (ret) { pr_err("get irq group %d status failed, ret %d\n", i, ret); return; } - irq_status = msg.data.resp.status; if (!irq_status) continue; + if (scu_irq_wakeup[i].mask & irq_status) { + scu_irq_wakeup[i].valid = true; + scu_irq_wakeup[i].wakeup_src = irq_status & scu_irq_wakeup[i].mask; + } else { + scu_irq_wakeup[i].wakeup_src = irq_status; + } pm_system_wakeup(); imx_scu_irq_notifier_call_chain(irq_status, &i); } } +int imx_scu_irq_get_status(u8 group, u32 *irq_status) +{ + struct imx_sc_msg_irq_get_status msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + int ret; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_IRQ; + hdr->func = IMX_SC_IRQ_FUNC_STATUS; + hdr->size = 2; + + msg.data.req.resource = mu_resource_id; + msg.data.req.group = group; + + ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true); + if (ret) + return ret; + + if (irq_status) + *irq_status = msg.data.resp.status; + + return 0; +} +EXPORT_SYMBOL(imx_scu_irq_get_status); + int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable) { struct imx_sc_msg_irq_enable msg; @@ -121,6 +162,11 @@ int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable) pr_err("enable irq failed, group %d, mask %d, ret %d\n", group, mask, ret); + if (enable) + scu_irq_wakeup[group].mask |= mask; + else + scu_irq_wakeup[group].mask &= ~mask; + return ret; } EXPORT_SYMBOL(imx_scu_irq_group_enable); @@ -130,6 +176,25 @@ static void imx_scu_irq_callback(struct mbox_client *c, void *msg) schedule_work(&imx_sc_irq_work); } +static ssize_t wakeup_source_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + int i; + + for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) { + if (!scu_irq_wakeup[i].wakeup_src) + continue; + + if (scu_irq_wakeup[i].valid) + sprintf(buf, "Wakeup source group = %d, irq = 0x%x\n", + i, scu_irq_wakeup[i].wakeup_src); + else + sprintf(buf, "Spurious SCU wakeup, group = %d, irq = 0x%x\n", + i, scu_irq_wakeup[i].wakeup_src); + } + + return strlen(buf); +} + int imx_scu_enable_general_irq_channel(struct device *dev) { struct of_phandle_args spec; @@ -169,6 +234,25 @@ int imx_scu_enable_general_irq_channel(struct device *dev) mu_resource_id = IMX_SC_R_MU_0A + i; + /* Create directory under /sysfs/firmware */ + wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj); + if (!wakeup_obj) { + ret = -ENOMEM; + goto free_ch; + } + + ret = sysfs_create_file(wakeup_obj, &wakeup_source_attr.attr); + if (ret) { + dev_err(dev, "Cannot create wakeup source src file......\n"); + kobject_put(wakeup_obj); + goto free_ch; + } + + return 0; + +free_ch: + mbox_free_channel(ch); + return ret; } EXPORT_SYMBOL(imx_scu_enable_general_irq_channel); diff --git a/drivers/firmware/imx/imx-scu-soc.c b/drivers/firmware/imx/imx-scu-soc.c index 2f32353de2c9..497192320562 100644 --- a/drivers/firmware/imx/imx-scu-soc.c +++ b/drivers/firmware/imx/imx-scu-soc.c @@ -78,6 +78,22 @@ static int imx_scu_soc_id(void) return msg.data.resp.id; } +static const char *imx_scu_soc_name(u32 id) +{ + switch (id) { + case 0x1: + return "i.MX8QM"; + case 0x2: + return "i.MX8QXP"; + case 0xe: + return "i.MX8DXL"; + default: + break; + } + + return "NULL"; +} + int imx_scu_soc_init(struct device *dev) { struct soc_device_attribute *soc_dev_attr; @@ -113,9 +129,7 @@ int imx_scu_soc_init(struct device *dev) /* format soc_id value passed from SCU firmware */ val = id & 0x1f; - soc_dev_attr->soc_id = devm_kasprintf(dev, GFP_KERNEL, "0x%x", val); - if (!soc_dev_attr->soc_id) - return -ENOMEM; + soc_dev_attr->soc_id = imx_scu_soc_name(val); /* format revision value passed from SCU firmware */ val = (id >> 5) & 0xf; diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 47db49911e7b..14ff9d3504bf 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -20,7 +20,7 @@ #include <linux/platform_device.h> #define SCU_MU_CHAN_NUM 8 -#define MAX_RX_TIMEOUT (msecs_to_jiffies(30)) +#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000)) struct imx_sc_chan { struct imx_sc_ipc *sc_ipc; @@ -353,7 +353,12 @@ static struct platform_driver imx_scu_driver = { }, .probe = imx_scu_probe, }; -builtin_platform_driver(imx_scu_driver); + +static int __init imx_scu_driver_init(void) +{ + return platform_driver_register(&imx_scu_driver); +} +subsys_initcall_sync(imx_scu_driver_init); MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>"); MODULE_DESCRIPTION("IMX SCU firmware protocol driver"); diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index 798bcdb05d84..9a2656d73600 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -292,6 +292,8 @@ static int __init meson_sm_probe(struct platform_device *pdev) return -ENOMEM; chip = of_match_device(meson_sm_ids, dev)->data; + if (!chip) + return -EINVAL; if (chip->cmd_shmem_in_base) { fw->sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base, diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index fde33acd46b7..06fe8aca870d 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -26,10 +26,6 @@ static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); module_param(download_mode, bool, 0); -#define SCM_HAS_CORE_CLK BIT(0) -#define SCM_HAS_IFACE_CLK BIT(1) -#define SCM_HAS_BUS_CLK BIT(2) - struct qcom_scm { struct device *dev; struct clk *core_clk; @@ -351,7 +347,7 @@ int qcom_scm_set_warm_boot_addr(void *entry) return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); return 0; } -EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); +EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); /** * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus @@ -364,7 +360,7 @@ int qcom_scm_set_cold_boot_addr(void *entry) return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); return 0; } -EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); +EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); /** * qcom_scm_cpu_power_down() - Power down the cpu @@ -386,7 +382,7 @@ void qcom_scm_cpu_power_down(u32 flags) qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_cpu_power_down); +EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); int qcom_scm_set_remote_state(u32 state, u32 id) { @@ -405,7 +401,7 @@ int qcom_scm_set_remote_state(u32 state, u32 id) return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_set_remote_state); +EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) { @@ -515,7 +511,7 @@ out: return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_pas_init_image); +EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); /** * qcom_scm_pas_metadata_release() - release metadata context @@ -532,7 +528,7 @@ void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) ctx->phys = 0; ctx->size = 0; } -EXPORT_SYMBOL(qcom_scm_pas_metadata_release); +EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); /** * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral @@ -571,7 +567,7 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_pas_mem_setup); +EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); /** * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware @@ -606,7 +602,7 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral) return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); +EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); /** * qcom_scm_pas_shutdown() - Shut down the remote processor @@ -641,7 +637,7 @@ int qcom_scm_pas_shutdown(u32 peripheral) return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_pas_shutdown); +EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); /** * qcom_scm_pas_supported() - Check if the peripheral authentication service is @@ -670,7 +666,7 @@ bool qcom_scm_pas_supported(u32 peripheral) return ret ? false : !!res.result[0]; } -EXPORT_SYMBOL(qcom_scm_pas_supported); +EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) { @@ -732,7 +728,7 @@ int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) return ret < 0 ? ret : 0; } -EXPORT_SYMBOL(qcom_scm_io_readl); +EXPORT_SYMBOL_GPL(qcom_scm_io_readl); int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { @@ -747,7 +743,7 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_io_writel); +EXPORT_SYMBOL_GPL(qcom_scm_io_writel); /** * qcom_scm_restore_sec_cfg_available() - Check if secure environment @@ -760,7 +756,7 @@ bool qcom_scm_restore_sec_cfg_available(void) return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, QCOM_SCM_MP_RESTORE_SEC_CFG); } -EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); +EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { @@ -779,7 +775,7 @@ int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); +EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { @@ -800,7 +796,7 @@ int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) return ret ? : res.result[1]; } -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); +EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { @@ -824,7 +820,7 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) return ret; } -EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); +EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) { @@ -839,7 +835,7 @@ int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size); +EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, @@ -863,7 +859,7 @@ int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, return ret ? : res.result[0]; } -EXPORT_SYMBOL(qcom_scm_mem_protect_video_var); +EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, size_t mem_sz, phys_addr_t src, size_t src_sz, @@ -972,7 +968,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, *srcvm = next_vm; return 0; } -EXPORT_SYMBOL(qcom_scm_assign_mem); +EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); /** * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available @@ -982,7 +978,7 @@ bool qcom_scm_ocmem_lock_available(void) return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, QCOM_SCM_OCMEM_LOCK_CMD); } -EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); /** * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM @@ -1008,7 +1004,7 @@ int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_ocmem_lock); +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); /** * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM @@ -1031,7 +1027,7 @@ int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_ocmem_unlock); +EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); /** * qcom_scm_ice_available() - Is the ICE key programming interface available? @@ -1046,7 +1042,7 @@ bool qcom_scm_ice_available(void) __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, QCOM_SCM_ES_CONFIG_SET_ICE_KEY); } -EXPORT_SYMBOL(qcom_scm_ice_available); +EXPORT_SYMBOL_GPL(qcom_scm_ice_available); /** * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key @@ -1072,7 +1068,7 @@ int qcom_scm_ice_invalidate_key(u32 index) return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); +EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); /** * qcom_scm_ice_set_key() - Set an inline encryption key @@ -1138,7 +1134,7 @@ int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); return ret; } -EXPORT_SYMBOL(qcom_scm_ice_set_key); +EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); /** * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. @@ -1160,7 +1156,7 @@ bool qcom_scm_hdcp_available(void) return avail; } -EXPORT_SYMBOL(qcom_scm_hdcp_available); +EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); /** * qcom_scm_hdcp_req() - Send HDCP request. @@ -1207,7 +1203,7 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) return ret; } -EXPORT_SYMBOL(qcom_scm_hdcp_req); +EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) { @@ -1223,7 +1219,7 @@ int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format); +EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { @@ -1239,13 +1235,13 @@ int qcom_scm_qsmmu500_wait_safe_toggle(bool en) return qcom_scm_call_atomic(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); +EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); bool qcom_scm_lmh_dcvsh_available(void) { return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); } -EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available); +EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); int qcom_scm_lmh_profile_change(u32 profile_id) { @@ -1259,7 +1255,7 @@ int qcom_scm_lmh_profile_change(u32 profile_id) return qcom_scm_call(__scm->dev, &desc, NULL); } -EXPORT_SYMBOL(qcom_scm_lmh_profile_change); +EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, u64 limit_node, u32 node_id, u64 version) @@ -1297,7 +1293,7 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); return ret; } -EXPORT_SYMBOL(qcom_scm_lmh_dcvsh); +EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { @@ -1332,7 +1328,7 @@ bool qcom_scm_is_available(void) { return !!__scm; } -EXPORT_SYMBOL(qcom_scm_is_available); +EXPORT_SYMBOL_GPL(qcom_scm_is_available); static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) { @@ -1405,7 +1401,6 @@ out: static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; - unsigned long clks; int irq, ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); @@ -1418,51 +1413,22 @@ static int qcom_scm_probe(struct platform_device *pdev) mutex_init(&scm->scm_bw_lock); - clks = (unsigned long)of_device_get_match_data(&pdev->dev); - scm->path = devm_of_icc_get(&pdev->dev, NULL); if (IS_ERR(scm->path)) return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), "failed to acquire interconnect path\n"); - scm->core_clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(scm->core_clk)) { - if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) - return PTR_ERR(scm->core_clk); - - if (clks & SCM_HAS_CORE_CLK) { - dev_err(&pdev->dev, "failed to acquire core clk\n"); - return PTR_ERR(scm->core_clk); - } - - scm->core_clk = NULL; - } - - scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); - if (IS_ERR(scm->iface_clk)) { - if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) - return PTR_ERR(scm->iface_clk); - - if (clks & SCM_HAS_IFACE_CLK) { - dev_err(&pdev->dev, "failed to acquire iface clk\n"); - return PTR_ERR(scm->iface_clk); - } - - scm->iface_clk = NULL; - } + scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); + if (IS_ERR(scm->core_clk)) + return PTR_ERR(scm->core_clk); - scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); - if (IS_ERR(scm->bus_clk)) { - if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) - return PTR_ERR(scm->bus_clk); + scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); + if (IS_ERR(scm->iface_clk)) + return PTR_ERR(scm->iface_clk); - if (clks & SCM_HAS_BUS_CLK) { - dev_err(&pdev->dev, "failed to acquire bus clk\n"); - return PTR_ERR(scm->bus_clk); - } - - scm->bus_clk = NULL; - } + scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); + if (IS_ERR(scm->bus_clk)) + return PTR_ERR(scm->bus_clk); scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; @@ -1512,39 +1478,15 @@ static void qcom_scm_shutdown(struct platform_device *pdev) } static const struct of_device_id qcom_scm_dt_match[] = { - { .compatible = "qcom,scm-apq8064", - /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ - }, - { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) - }, + { .compatible = "qcom,scm" }, + + /* Legacy entries kept for backwards compatibility */ + { .compatible = "qcom,scm-apq8064" }, + { .compatible = "qcom,scm-apq8084" }, { .compatible = "qcom,scm-ipq4019" }, - { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) }, - { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, - { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, - { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) - }, - { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) - }, - { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) - }, - { .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK | - SCM_HAS_IFACE_CLK | - SCM_HAS_BUS_CLK) - }, - { .compatible = "qcom,scm-msm8994" }, + { .compatible = "qcom,scm-msm8953" }, + { .compatible = "qcom,scm-msm8974" }, { .compatible = "qcom,scm-msm8996" }, - { .compatible = "qcom,scm-sm6375", .data = (void *)SCM_HAS_CORE_CLK }, - { .compatible = "qcom,scm" }, {} }; MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 039d92a595ec..26a37f47f4ca 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -97,7 +97,6 @@ struct ti_sci_desc { * @node: list head * @host_id: Host ID * @users: Number of users of this instance - * @is_suspending: Flag set to indicate in suspend path. */ struct ti_sci_info { struct device *dev; @@ -116,7 +115,6 @@ struct ti_sci_info { u8 host_id; /* protected by ti_sci_list_mutex */ int users; - bool is_suspending; }; #define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) @@ -418,14 +416,14 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info, ret = 0; - if (!info->is_suspending) { + if (system_state <= SYSTEM_RUNNING) { /* And we wait for the response. */ timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); if (!wait_for_completion_timeout(&xfer->done, timeout)) ret = -ETIMEDOUT; } else { /* - * If we are suspending, we cannot use wait_for_completion_timeout + * If we are !running, we cannot use wait_for_completion_timeout * during noirq phase, so we must manually poll the completion. */ ret = read_poll_timeout_atomic(try_wait_for_completion, done_state, @@ -1978,8 +1976,6 @@ static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params, * @src_index: IRQ source index within the source device * @dst_id: Device ID of the IRQ destination * @dst_host_irq: IRQ number of the destination device - * @vint_irq: Boolean specifying if this interrupt belongs to - * Interrupt Aggregator. * * Return: 0 if all went fine, else return appropriate error. */ @@ -2026,8 +2022,6 @@ static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle, * @src_index: IRQ source index within the source device * @dst_id: Device ID of the IRQ destination * @dst_host_irq: IRQ number of the destination device - * @vint_irq: Boolean specifying if this interrupt belongs to - * Interrupt Aggregator. * * Return: 0 if all went fine, else return appropriate error. */ @@ -2620,6 +2614,7 @@ fail: * configuration flags * @handle: Pointer to TI SCI handle * @proc_id: Processor ID this request is for + * @bootvector: Processor Boot vector (start address) * @config_flags_set: Configuration flags to be set * @config_flags_clear: Configuration flags to be cleared. * @@ -2736,9 +2731,13 @@ fail: } /** - * ti_sci_cmd_get_boot_status() - Command to get the processor boot status + * ti_sci_cmd_proc_get_status() - Command to get the processor boot status * @handle: Pointer to TI SCI handle * @proc_id: Processor ID this request is for + * @bv: Processor Boot vector (start address) + * @cfg_flags: Processor specific configuration flags + * @ctrl_flags: Processor specific control flags + * @sts_flags: Processor specific status flags * * Return: 0 if all went well, else returns appropriate error value. */ @@ -3256,7 +3255,7 @@ EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource); * @handle: TISCI handle * @dev: Device pointer to which the resource is assigned * @dev_id: TISCI device id to which the resource is assigned - * @suub_type: TISCI resource subytpe representing the resource. + * @sub_type: TISCI resource subytpe representing the resource. * * Return: Pointer to ti_sci_resource if all went well else appropriate * error pointer. @@ -3281,35 +3280,6 @@ static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, return NOTIFY_BAD; } -static void ti_sci_set_is_suspending(struct ti_sci_info *info, bool is_suspending) -{ - info->is_suspending = is_suspending; -} - -static int ti_sci_suspend(struct device *dev) -{ - struct ti_sci_info *info = dev_get_drvdata(dev); - /* - * We must switch operation to polled mode now as drivers and the genpd - * layer may make late TI SCI calls to change clock and device states - * from the noirq phase of suspend. - */ - ti_sci_set_is_suspending(info, true); - - return 0; -} - -static int ti_sci_resume(struct device *dev) -{ - struct ti_sci_info *info = dev_get_drvdata(dev); - - ti_sci_set_is_suspending(info, false); - - return 0; -} - -static DEFINE_SIMPLE_DEV_PM_OPS(ti_sci_pm_ops, ti_sci_suspend, ti_sci_resume); - /* Description for K2G */ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { .default_host_id = 2, @@ -3516,7 +3486,6 @@ static struct platform_driver ti_sci_driver = { .driver = { .name = "ti-sci", .of_match_table = of_match_ptr(ti_sci_of_match), - .pm = &ti_sci_pm_ops, }, }; module_platform_driver(ti_sci_driver); |