diff options
author | Eric Dumazet <edumazet@google.com> | 2021-01-28 19:21:45 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2021-01-30 07:59:53 +0300 |
commit | 0d6cd689f9ba47deffffe9dfd204843ce8f1a51e (patch) | |
tree | 3e5c70083f679eb9054fa4f4d8ff2aa026ea278b /net/ipv4 | |
parent | df610cd9163b90adc3b5c23868089a0349580551 (diff) | |
download | linux-0d6cd689f9ba47deffffe9dfd204843ce8f1a51e.tar.xz |
net: proc: speedup /proc/net/netstat
Use cache friendly helpers to better use cpu caches
while reading /proc/net/netstat
Tested on a platform with 256 threads (AMD Rome)
Before: 305 usec spent in netstat_seq_show()
After: 130 usec spent in netstat_seq_show()
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20210128162145.1703601-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/proc.c | 50 |
1 files changed, 36 insertions, 14 deletions
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 63cd370ea29d..6d46297a99f8 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -464,30 +464,52 @@ static int snmp_seq_show(struct seq_file *seq, void *v) */ static int netstat_seq_show(struct seq_file *seq, void *v) { - int i; + const int ip_cnt = ARRAY_SIZE(snmp4_ipextstats_list) - 1; + const int tcp_cnt = ARRAY_SIZE(snmp4_net_list) - 1; struct net *net = seq->private; + unsigned long *buff; + int i; seq_puts(seq, "TcpExt:"); - for (i = 0; snmp4_net_list[i].name; i++) + for (i = 0; i < tcp_cnt; i++) seq_printf(seq, " %s", snmp4_net_list[i].name); seq_puts(seq, "\nTcpExt:"); - for (i = 0; snmp4_net_list[i].name; i++) - seq_printf(seq, " %lu", - snmp_fold_field(net->mib.net_statistics, - snmp4_net_list[i].entry)); - + buff = kzalloc(max(tcp_cnt * sizeof(long), ip_cnt * sizeof(u64)), + GFP_KERNEL); + if (buff) { + snmp_get_cpu_field_batch(buff, snmp4_net_list, + net->mib.net_statistics); + for (i = 0; i < tcp_cnt; i++) + seq_printf(seq, " %lu", buff[i]); + } else { + for (i = 0; i < tcp_cnt; i++) + seq_printf(seq, " %lu", + snmp_fold_field(net->mib.net_statistics, + snmp4_net_list[i].entry)); + } seq_puts(seq, "\nIpExt:"); - for (i = 0; snmp4_ipextstats_list[i].name; i++) + for (i = 0; i < ip_cnt; i++) seq_printf(seq, " %s", snmp4_ipextstats_list[i].name); seq_puts(seq, "\nIpExt:"); - for (i = 0; snmp4_ipextstats_list[i].name; i++) - seq_printf(seq, " %llu", - snmp_fold_field64(net->mib.ip_statistics, - snmp4_ipextstats_list[i].entry, - offsetof(struct ipstats_mib, syncp))); - + if (buff) { + u64 *buff64 = (u64 *)buff; + + memset(buff64, 0, ip_cnt * sizeof(u64)); + snmp_get_cpu_field64_batch(buff64, snmp4_ipextstats_list, + net->mib.ip_statistics, + offsetof(struct ipstats_mib, syncp)); + for (i = 0; i < ip_cnt; i++) + seq_printf(seq, " %llu", buff64[i]); + } else { + for (i = 0; i < ip_cnt; i++) + seq_printf(seq, " %llu", + snmp_fold_field64(net->mib.ip_statistics, + snmp4_ipextstats_list[i].entry, + offsetof(struct ipstats_mib, syncp))); + } + kfree(buff); seq_putc(seq, '\n'); mptcp_seq_show(seq); return 0; |