diff options
author | Lina Iyer <ilina@codeaurora.org> | 2018-06-20 16:27:05 +0300 |
---|---|---|
committer | Andy Gross <andy.gross@linaro.org> | 2018-07-21 21:33:27 +0300 |
commit | 564b5e24ccd4c840a7f84dfd952e5715dd9b3966 (patch) | |
tree | 63a2a1d5c80d7db7c0cc350f4512a0efb6f7365f /drivers/soc/qcom | |
parent | 600513dfeef33cb05c694d1b13d319b9e8cde536 (diff) | |
download | linux-564b5e24ccd4c840a7f84dfd952e5715dd9b3966.tar.xz |
drivers: qcom: rpmh: allow requests to be sent asynchronously
Platform drivers that want to send a request but do not want to block
until the RPMH request completes have now a new API -
rpmh_write_async().
The API allocates memory and send the requests and returns the control
back to the platform driver. The tx_done callback from the controller is
handled in the context of the controller's thread and frees the
allocated memory. This API allows RPMH requests from atomic contexts as
well.
Signed-off-by: Lina Iyer <ilina@codeaurora.org>
Signed-off-by: Raju P.L.S.S.S.N <rplsssn@codeaurora.org>
Signed-off-by: Andy Gross <andy.gross@linaro.org>
Diffstat (limited to 'drivers/soc/qcom')
-rw-r--r-- | drivers/soc/qcom/rpmh-internal.h | 2 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmh.c | 51 |
2 files changed, 53 insertions, 0 deletions
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h index 101145c9db1c..6a8a4b7aeead 100644 --- a/drivers/soc/qcom/rpmh-internal.h +++ b/drivers/soc/qcom/rpmh-internal.h @@ -54,6 +54,7 @@ struct tcs_group { * @completion: triggered when request is done * @dev: the device making the request * @err: err return from the controller + * @needs_free: check to free dynamically allocated request object */ struct rpmh_request { struct tcs_request msg; @@ -61,6 +62,7 @@ struct rpmh_request { struct completion *completion; const struct device *dev; int err; + bool needs_free; }; /** diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index c355ba13e73f..1e3d34876af1 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -34,6 +34,7 @@ .cmd = { { 0 } }, \ .completion = q, \ .dev = dev, \ + .needs_free = false, \ } #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) @@ -75,6 +76,9 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) /* Signal the blocking thread we are done */ if (compl) complete(compl); + + if (rpm_msg->needs_free) + kfree(rpm_msg); } static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) @@ -180,6 +184,53 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, return ret; } +static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 n) +{ + if (!cmd || !n || n > MAX_RPMH_PAYLOAD) + return -EINVAL; + + memcpy(req->cmd, cmd, n * sizeof(*cmd)); + + req->msg.state = state; + req->msg.cmds = req->cmd; + req->msg.num_cmds = n; + + return 0; +} + +/** + * rpmh_write_async: Write a set of RPMH commands + * + * @dev: The device making the request + * @state: Active/sleep set + * @cmd: The payload data + * @n: The number of elements in payload + * + * Write a set of RPMH commands, the order of commands is maintained + * and will be sent as a single shot. + */ +int rpmh_write_async(const struct device *dev, enum rpmh_state state, + const struct tcs_cmd *cmd, u32 n) +{ + struct rpmh_request *rpm_msg; + int ret; + + rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC); + if (!rpm_msg) + return -ENOMEM; + rpm_msg->needs_free = true; + + ret = __fill_rpmh_msg(rpm_msg, state, cmd, n); + if (ret) { + kfree(rpm_msg); + return ret; + } + + return __rpmh_write(dev, state, rpm_msg); +} +EXPORT_SYMBOL(rpmh_write_async); + /** * rpmh_write: Write a set of RPMH commands and block until response * |