From: Eric Dumazet Date: Thu, 30 Sep 2021 01:03:32 +0000 (-0700) Subject: net: snmp: inline snmp_get_cpu_field() X-Git-Tag: v5.16-rc1~159^2~314^2~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=59f09ae8fac4a990070fc6bdc889d0e0118664ea;p=thirdparty%2Fkernel%2Flinux.git net: snmp: inline snmp_get_cpu_field() This trivial function is called ~90,000 times on 256 cpus hosts, when reading /proc/net/netstat. And this number keeps inflating. Inlining it saves many cycles. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- diff --git a/include/net/ip.h b/include/net/ip.h index 9192444f2964e..cf229a5311942 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -291,7 +291,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); +static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) +{ + return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); +} + unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 2fc6074583a41..8eb428387bac2 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1662,12 +1662,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, } EXPORT_SYMBOL_GPL(inet_ctl_sock_create); -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) -{ - return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); -} -EXPORT_SYMBOL_GPL(snmp_get_cpu_field); - unsigned long snmp_fold_field(void __percpu *mib, int offt) { unsigned long res = 0;