diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-03-18 21:36:06 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-22 02:34:16 +0100 |
commit | ec733b15a3ef0b5759141a177f8044a2f40c41e7 (patch) | |
tree | 41af80ea4dcc89cc049bd36cbd882916a6469263 /net/ipv4/af_inet.c | |
parent | atm: Use kasprintf (diff) | |
download | linux-ec733b15a3ef0b5759141a177f8044a2f40c41e7.tar.xz linux-ec733b15a3ef0b5759141a177f8044a2f40c41e7.zip |
net: snmp mib cleanup
There is no point to align or pad mibs to cache lines, they are per cpu
allocated with a 8 bytes alignment anyway.
This wastes space for no gain. This patch removes __SNMP_MIB_ALIGN__
Since SNMP mibs contain "unsigned long" fields only, we can relax the
allocation alignment from "unsigned long long" to "unsigned long"
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | net/ipv4/af_inet.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 33b7dffa7732..55e11906a73a 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1401,10 +1401,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field); int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) { BUG_ON(ptr == NULL); - ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); + ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); if (!ptr[0]) goto err0; - ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); + ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); if (!ptr[1]) goto err1; return 0; |