summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/qcom/rpmh-internal.h2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c1
-rw-r--r--drivers/soc/qcom/rpmh.c159
3 files changed, 160 insertions, 2 deletions
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index 6a8a4b7aeead..a7bbbb67991c 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -71,11 +71,13 @@ struct rpmh_request {
* @cache: the list of cached requests
* @cache_lock: synchronize access to the cache data
* @dirty: was the cache updated since flush
+ * @batch_cache: Cache sleep and wake requests sent as batch
*/
struct rpmh_ctrlr {
struct list_head cache;
spinlock_t cache_lock;
bool dirty;
+ struct list_head batch_cache;
};
/**
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 5d0dd0581904..4c0c1f24911a 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -636,6 +636,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
spin_lock_init(&drv->client.cache_lock);
INIT_LIST_HEAD(&drv->client.cache);
+ INIT_LIST_HEAD(&drv->client.batch_cache);
dev_set_drvdata(&pdev->dev, drv);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 1e3d34876af1..c7beb6841289 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -54,6 +54,20 @@ struct cache_req {
struct list_head list;
};
+/**
+ * struct batch_cache_req - An entry in our batch catch
+ *
+ * @list: linked list obj
+ * @count: number of messages
+ * @rpm_msgs: the messages
+ */
+
+struct batch_cache_req {
+ struct list_head list;
+ int count;
+ struct rpmh_request rpm_msgs[];
+};
+
static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
{
struct rsc_drv *drv = dev_get_drvdata(dev->parent);
@@ -73,10 +87,13 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
rpm_msg->msg.cmds[0].addr, r);
+ if (!compl)
+ goto exit;
+
/* Signal the blocking thread we are done */
- if (compl)
- complete(compl);
+ complete(compl);
+exit:
if (rpm_msg->needs_free)
kfree(rpm_msg);
}
@@ -264,6 +281,138 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
}
EXPORT_SYMBOL(rpmh_write);
+static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_add_tail(&req->list, &ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+static int flush_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req;
+ const struct rpmh_request *rpm_msg;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+
+ /* Send Sleep/Wake requests to the controller, expect no response */
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry(req, &ctrlr->batch_cache, list) {
+ for (i = 0; i < req->count; i++) {
+ rpm_msg = req->rpm_msgs + i;
+ ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
+ &rpm_msg->msg);
+ if (ret)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return ret;
+}
+
+static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
+{
+ struct batch_cache_req *req, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
+ kfree(req);
+ INIT_LIST_HEAD(&ctrlr->batch_cache);
+ spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+}
+
+/**
+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
+ * batch to finish.
+ *
+ * @dev: the device making the request
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the RSC controller without caching. If the request
+ * state is ACTIVE, then the requests are treated as completion request
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{
+ struct batch_cache_req *req;
+ struct rpmh_request *rpm_msgs;
+ DECLARE_COMPLETION_ONSTACK(compl);
+ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
+ unsigned long time_left;
+ int count = 0;
+ int ret, i, j;
+
+ if (!cmd || !n)
+ return -EINVAL;
+
+ while (n[count] > 0)
+ count++;
+ if (!count)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+ req->count = count;
+ rpm_msgs = req->rpm_msgs;
+
+ for (i = 0; i < count; i++) {
+ __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
+ cmd += n[i];
+ }
+
+ if (state != RPMH_ACTIVE_ONLY_STATE) {
+ cache_batch(ctrlr, req);
+ return 0;
+ }
+
+ for (i = 0; i < count; i++) {
+ rpm_msgs[i].completion = &compl;
+ ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
+ if (ret) {
+ pr_err("Error(%d) sending RPMH message addr=%#x\n",
+ ret, rpm_msgs[i].msg.cmds[0].addr);
+ for (j = i; j < count; j++)
+ rpmh_tx_done(&rpm_msgs[j].msg, ret);
+ break;
+ }
+ }
+
+ time_left = RPMH_TIMEOUT_MS;
+ for (i = 0; i < count; i++) {
+ time_left = wait_for_completion_timeout(&compl, time_left);
+ if (!time_left) {
+ /*
+ * Better hope they never finish because they'll signal
+ * the completion on our stack and that's bad once
+ * we've returned from the function.
+ */
+ WARN_ON(1);
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ }
+
+exit:
+ kfree(req);
+
+ return ret;
+}
+EXPORT_SYMBOL(rpmh_write_batch);
+
static int is_req_valid(struct cache_req *req)
{
return (req->sleep_val != UINT_MAX &&
@@ -309,6 +458,11 @@ int rpmh_flush(const struct device *dev)
return 0;
}
+ /* First flush the cached batch requests */
+ ret = flush_batch(ctrlr);
+ if (ret)
+ return ret;
+
/*
* Nobody else should be calling this function other than system PM,
* hence we can run without locks.
@@ -347,6 +501,7 @@ int rpmh_invalidate(const struct device *dev)
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret;
+ invalidate_batch(ctrlr);
ctrlr->dirty = true;
do {