summaryrefslogtreecommitdiffstats
path: root/include/net/dst.h
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2008-11-17 04:46:36 +0100
committerDavid S. Miller <davem@davemloft.net>2008-11-17 04:46:36 +0100
commit5635c10d976716ef47ae441998aeae144c7e7387 (patch)
tree03cb08b634fba283bbde42ea290c92048045d2fd /include/net/dst.h
parentrcu: documents rculist_nulls (diff)
downloadlinux-5635c10d976716ef47ae441998aeae144c7e7387.tar.xz
linux-5635c10d976716ef47ae441998aeae144c7e7387.zip
net: make sure struct dst_entry refcount is aligned on 64 bytes
As found in the past (commit f1dd9c379cac7d5a76259e7dffcd5f8edc697d17 [NET]: Fix tbench regression in 2.6.25-rc1), it is really important that struct dst_entry refcount is aligned on a cache line. We cannot use __atribute((aligned)), so manually pad the structure for 32 and 64 bit arches. for 32bit : offsetof(truct dst_entry, __refcnt) is 0x80 for 64bit : offsetof(truct dst_entry, __refcnt) is 0xc0 As it is not possible to guess at compile time cache line size, we use a generic value of 64 bytes, that satisfies many current arches. (Using 128 bytes alignment on 64bit arches would waste 64 bytes) Add a BUILD_BUG_ON to catch future updates to "struct dst_entry" dont break this alignment. "tbench 8" is 4.4 % faster on a dual quad core (HP BL460c G1), Intel E5450 @3.00GHz (2350 MB/s instead of 2250 MB/s) Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--include/net/dst.h19
1 files changed, 19 insertions, 0 deletions
diff --git a/include/net/dst.h b/include/net/dst.h
index 65a60fab2f82..6c778799bf10 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -61,6 +61,8 @@ struct dst_entry
struct hh_cache *hh;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
+#else
+ void *__pad1;
#endif
int (*input)(struct sk_buff*);
int (*output)(struct sk_buff*);
@@ -71,8 +73,20 @@ struct dst_entry
#ifdef CONFIG_NET_CLS_ROUTE
__u32 tclassid;
+#else
+ __u32 __pad2;
#endif
+
+ /*
+ * Align __refcnt to a 64 bytes alignment
+ * (L1_CACHE_SIZE would be too much)
+ */
+#ifdef CONFIG_64BIT
+ long __pad_to_align_refcnt[2];
+#else
+ long __pad_to_align_refcnt[1];
+#endif
/*
* __refcnt wants to be on a different cache line from
* input/output/ops or performance tanks badly
@@ -157,6 +171,11 @@ dst_metric_locked(struct dst_entry *dst, int metric)
static inline void dst_hold(struct dst_entry * dst)
{
+ /*
+ * If your kernel compilation stops here, please check
+ * __pad_to_align_refcnt declaration in struct dst_entry
+ */
+ BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
atomic_inc(&dst->__refcnt);
}