summaryrefslogtreecommitdiff
path: root/drivers/interconnect
diff options
context:
space:
mode:
authorKonrad Dybcio <konrad.dybcio@linaro.org>2023-08-25 18:38:24 +0300
committerGeorgi Djakov <djakov@kernel.org>2023-10-09 15:07:34 +0300
commitdb8fc1002c53bc17a3ca6fad2c524de42b77c146 (patch)
tree2e124c8d367d4180141583192de3497866ee1a87 /drivers/interconnect
parentdd014803f260b337daaabcde259daf70d5b26b5e (diff)
downloadlinux-db8fc1002c53bc17a3ca6fad2c524de42b77c146.tar.xz
interconnect: qcom: icc-rpm: Separate out clock rate calulcations
In preparation for also setting per-node clock rates, separate out the logic that computes it. Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org> Link: https://lore.kernel.org/r/20230726-topic-icc_coeff-v4-2-c04b60caa467@linaro.org Signed-off-by: Georgi Djakov <djakov@kernel.org>
Diffstat (limited to 'drivers/interconnect')
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 8b02aa8aa96a..8c1bfd65d774 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -291,6 +291,32 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
return 0;
}
+static u64 qcom_icc_calc_rate(struct qcom_icc_provider *qp, struct qcom_icc_node *qn, int ctx)
+{
+ u64 agg_avg_rate, agg_peak_rate, agg_rate;
+
+ if (qn->channels)
+ agg_avg_rate = div_u64(qn->sum_avg[ctx], qn->channels);
+ else
+ agg_avg_rate = qn->sum_avg[ctx];
+
+ if (qp->ab_coeff) {
+ agg_avg_rate = agg_avg_rate * qp->ab_coeff;
+ agg_avg_rate = div_u64(agg_avg_rate, 100);
+ }
+
+ if (qp->ib_coeff) {
+ agg_peak_rate = qn->max_peak[ctx] * 100;
+ agg_peak_rate = div_u64(qn->max_peak[ctx], qp->ib_coeff);
+ } else {
+ agg_peak_rate = qn->max_peak[ctx];
+ }
+
+ agg_rate = max_t(u64, agg_avg_rate, agg_peak_rate);
+
+ return div_u64(agg_rate, qn->buswidth);
+}
+
/**
* qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
* @provider: generic interconnect provider
@@ -299,10 +325,9 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
{
struct qcom_icc_provider *qp = to_qcom_provider(provider);
- u64 agg_avg_rate, agg_peak_rate, agg_rate;
struct qcom_icc_node *qn;
struct icc_node *node;
- int i;
+ int ctx;
/*
* Iterate nodes on the provider, aggregate bandwidth requests for
@@ -310,27 +335,9 @@ static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_r
*/
list_for_each_entry(node, &provider->nodes, node_list) {
qn = node->data;
- for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
- if (qn->channels)
- agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
- else
- agg_avg_rate = qn->sum_avg[i];
-
- if (qp->ab_coeff) {
- agg_avg_rate = agg_avg_rate * qp->ab_coeff;
- agg_avg_rate = div_u64(agg_avg_rate, 100);
- }
-
- if (qp->ib_coeff) {
- agg_peak_rate = qn->max_peak[i] * 100;
- agg_peak_rate = div_u64(qn->max_peak[i], qp->ib_coeff);
- } else {
- agg_peak_rate = qn->max_peak[i];
- }
-
- agg_rate = max_t(u64, agg_avg_rate, agg_peak_rate);
-
- agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
+ for (ctx = 0; ctx < QCOM_SMD_RPM_STATE_NUM; ctx++) {
+ agg_clk_rate[ctx] = max_t(u64, agg_clk_rate[ctx],
+ qcom_icc_calc_rate(qp, qn, ctx));
}
}
}