summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
diff options
context:
space:
mode:
authorSathya Perla <sathya.perla@broadcom.com>2017-10-26 18:51:31 +0300
committerDavid S. Miller <davem@davemloft.net>2017-10-27 18:02:45 +0300
commit5a84acbebb22f93dfc9ce1e5f0427c45c94acb33 (patch)
treecefba34b1f898f8c5e224ad87044b16160c5bad5 /drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
parentf484f6782e013138946122ae09c100c9e4b547e3 (diff)
downloadlinux-5a84acbebb22f93dfc9ce1e5f0427c45c94acb33.tar.xz
bnxt_en: query cfa flow stats periodically to compute 'lastused' attribute
This patch implements periodic querying of cfa flow stats in batches to compute the 'lastused' attribute of TC flow stats. Signed-off-by: Sathya Perla <sathya.perla@broadcom.com> Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c256
1 files changed, 176 insertions, 80 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 0d258d303eef..71828a5beefe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -504,81 +504,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
return rc;
}
-/* Add val to accum while handling a possible wraparound
- * of val. Eventhough val is of type u64, its actual width
- * is denoted by mask and will wrap-around beyond that width.
- */
-static void accumulate_val(u64 *accum, u64 val, u64 mask)
-{
-#define low_bits(x, mask) ((x) & (mask))
-#define high_bits(x, mask) ((x) & ~(mask))
- bool wrapped = val < low_bits(*accum, mask);
-
- *accum = high_bits(*accum, mask) + val;
- if (wrapped)
- *accum += (mask + 1);
-}
-
-/* The HW counters' width is much less than 64bits.
- * Handle possible wrap-around while updating the stat counters
- */
-static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
- struct bnxt_tc_flow_stats *stats,
- struct bnxt_tc_flow_stats *hw_stats)
-{
- accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
- accumulate_val(&stats->packets, hw_stats->packets,
- tc_info->packets_mask);
-}
-
-/* Fix possible wraparound of the stats queried from HW, calculate
- * the delta from prev_stats, and also update the prev_stats.
- * The HW flow stats are fetched under the hwrm_cmd_lock mutex.
- * This routine is best called while under the mutex so that the
- * stats processing happens atomically.
- */
-static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
- struct bnxt_tc_flow *flow,
- struct bnxt_tc_flow_stats *stats)
-{
- struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
-
- acc_stats = &flow->stats;
- bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
-
- prev_stats = &flow->prev_stats;
- stats->bytes = acc_stats->bytes - prev_stats->bytes;
- stats->packets = acc_stats->packets - prev_stats->packets;
- *prev_stats = *acc_stats;
-}
-
-static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
- __le16 flow_handle,
- struct bnxt_tc_flow *flow,
- struct bnxt_tc_flow_stats *stats)
-{
- struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_cfa_flow_stats_input req = { 0 };
- int rc;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
- req.num_flows = cpu_to_le16(1);
- req.flow_handle_0 = flow_handle;
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc) {
- stats->packets = le64_to_cpu(resp->packet_0);
- stats->bytes = le64_to_cpu(resp->byte_0);
- bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
- } else {
- netdev_info(bp->dev, "error rc=%d", rc);
- }
-
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
struct bnxt_tc_flow *flow,
struct bnxt_tc_l2_key *l2_info,
@@ -1306,6 +1231,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
if (rc)
goto put_tunnel;
+ flow->lastused = jiffies;
+ spin_lock_init(&flow->stats_lock);
/* add new flow to flow-table */
rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
tc_info->flow_ht_params);
@@ -1352,10 +1279,11 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
static int bnxt_tc_get_flow_stats(struct bnxt *bp,
struct tc_cls_flower_offload *tc_flow_cmd)
{
+ struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
struct bnxt_tc_info *tc_info = &bp->tc_info;
struct bnxt_tc_flow_node *flow_node;
- struct bnxt_tc_flow_stats stats;
- int rc;
+ struct bnxt_tc_flow *flow;
+ unsigned long lastused;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -1366,15 +1294,183 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return -1;
}
- rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
- &flow_node->flow, &stats);
+ flow = &flow_node->flow;
+ curr_stats = &flow->stats;
+ prev_stats = &flow->prev_stats;
+
+ spin_lock(&flow->stats_lock);
+ stats.packets = curr_stats->packets - prev_stats->packets;
+ stats.bytes = curr_stats->bytes - prev_stats->bytes;
+ *prev_stats = *curr_stats;
+ lastused = flow->lastused;
+ spin_unlock(&flow->stats_lock);
+
+ tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
+ lastused);
+ return 0;
+}
+
+static int
+bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
+ struct bnxt_tc_stats_batch stats_batch[])
+{
+ struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_flow_stats_input req = { 0 };
+ __le16 *req_flow_handles = &req.flow_handle_0;
+ int rc, i;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
+ req.num_flows = cpu_to_le16(num_flows);
+ for (i = 0; i < num_flows; i++) {
+ struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
+
+ req_flow_handles[i] = flow_node->flow_handle;
+ }
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ __le64 *resp_packets = &resp->packet_0;
+ __le64 *resp_bytes = &resp->byte_0;
+
+ for (i = 0; i < num_flows; i++) {
+ stats_batch[i].hw_stats.packets =
+ le64_to_cpu(resp_packets[i]);
+ stats_batch[i].hw_stats.bytes =
+ le64_to_cpu(resp_bytes[i]);
+ }
+ } else {
+ netdev_info(bp->dev, "error rc=%d", rc);
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+/* Add val to accum while handling a possible wraparound
+ * of val. Eventhough val is of type u64, its actual width
+ * is denoted by mask and will wrap-around beyond that width.
+ */
+static void accumulate_val(u64 *accum, u64 val, u64 mask)
+{
+#define low_bits(x, mask) ((x) & (mask))
+#define high_bits(x, mask) ((x) & ~(mask))
+ bool wrapped = val < low_bits(*accum, mask);
+
+ *accum = high_bits(*accum, mask) + val;
+ if (wrapped)
+ *accum += (mask + 1);
+}
+
+/* The HW counters' width is much less than 64bits.
+ * Handle possible wrap-around while updating the stat counters
+ */
+static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
+ struct bnxt_tc_flow_stats *acc_stats,
+ struct bnxt_tc_flow_stats *hw_stats)
+{
+ accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
+ accumulate_val(&acc_stats->packets, hw_stats->packets,
+ tc_info->packets_mask);
+}
+
+static int
+bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
+ struct bnxt_tc_stats_batch stats_batch[])
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ int rc, i;
+
+ rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
if (rc)
return rc;
- tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
+ for (i = 0; i < num_flows; i++) {
+ struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
+ struct bnxt_tc_flow *flow = &flow_node->flow;
+
+ spin_lock(&flow->stats_lock);
+ bnxt_flow_stats_accum(tc_info, &flow->stats,
+ &stats_batch[i].hw_stats);
+ if (flow->stats.packets != flow->prev_stats.packets)
+ flow->lastused = jiffies;
+ spin_unlock(&flow->stats_lock);
+ }
+
return 0;
}
+static int
+bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
+ struct bnxt_tc_stats_batch stats_batch[],
+ int *num_flows)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct rhashtable_iter *iter = &tc_info->iter;
+ void *flow_node;
+ int rc, i;
+
+ rc = rhashtable_walk_start(iter);
+ if (rc && rc != -EAGAIN) {
+ i = 0;
+ goto done;
+ }
+
+ rc = 0;
+ for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
+ flow_node = rhashtable_walk_next(iter);
+ if (IS_ERR(flow_node)) {
+ i = 0;
+ if (PTR_ERR(flow_node) == -EAGAIN) {
+ continue;
+ } else {
+ rc = PTR_ERR(flow_node);
+ goto done;
+ }
+ }
+
+ /* No more flows */
+ if (!flow_node)
+ goto done;
+
+ stats_batch[i].flow_node = flow_node;
+ }
+done:
+ rhashtable_walk_stop(iter);
+ *num_flows = i;
+ return rc;
+}
+
+void bnxt_tc_flow_stats_work(struct bnxt *bp)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ int num_flows, rc;
+
+ num_flows = atomic_read(&tc_info->flow_table.nelems);
+ if (!num_flows)
+ return;
+
+ rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
+
+ for (;;) {
+ rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
+ &num_flows);
+ if (rc) {
+ if (rc == -EAGAIN)
+ continue;
+ break;
+ }
+
+ if (!num_flows)
+ break;
+
+ bnxt_tc_flow_stats_batch_update(bp, num_flows,
+ tc_info->stats_batch);
+ }
+
+ rhashtable_walk_exit(&tc_info->iter);
+}
+
int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
struct tc_cls_flower_offload *cls_flower)
{