summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Elder <elder@linaro.org>2021-02-06 01:10:58 +0300
committerJakub Kicinski <kuba@kernel.org>2021-02-07 01:57:19 +0300
commit9af5ccf32383005070092e51b15cee51584323c0 (patch)
tree60206c979aa98a925728750ef3fb8fe5ab679f1a
parentd5bc5015eb9d64cbd14e467db1a56db1472d0d6c (diff)
downloadlinux-9af5ccf32383005070092e51b15cee51584323c0.tar.xz
net: ipa: use a Boolean rather than count when replenishing
The count argument to ipa_endpoint_replenish() is only ever 0 or 1, and always will be (because we always handle each receive buffer in a single transaction). Rename the argument to be add_one and change it to be Boolean. Update the function description to reflect the current code. Signed-off-by: Alex Elder <elder@linaro.org> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ipa/ipa_endpoint.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 7a46c790afbe..bff5d6ffd118 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1020,31 +1020,34 @@ err_free_pages:
}
/**
- * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @count: Number of buffers to send to hardware
+ * @add_one: Whether this is replacing a just-consumed buffer
*
- * Allocate RX packet wrapper structures with maximal socket buffers
- * for an endpoint. These are supplied to the hardware, which fills
- * them with incoming data.
+ * The IPA hardware can hold a fixed number of receive buffers for an RX
+ * endpoint, based on the number of entries in the underlying channel ring
+ * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
+ * more receive buffers can be supplied to the hardware. Replenishing for
+ * an endpoint can be disabled, in which case requests to replenish a
+ * buffer are "saved", and transferred to the backlog once it is re-enabled
+ * again.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
- if (count)
- atomic_add(count, &endpoint->replenish_saved);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_saved);
return;
}
-
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
return;
@@ -1052,8 +1055,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1080,7 +1083,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@@ -1099,7 +1102,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1300,7 +1303,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, 1);
+ ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;