summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-10 05:07:35 +0200
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-30 00:41:44 +0200
commit6e04e02165a7209a71db553b7bc48d68421e5ebf (patch)
tree004157924013e6c099cacac59f39d3dd61f3e0e5
parent[INET]: Generalise tcp_bind_hash & tcp_inherit_port (diff)
downloadlinux-6e04e02165a7209a71db553b7bc48d68421e5ebf.tar.xz
linux-6e04e02165a7209a71db553b7bc48d68421e5ebf.zip
[INET]: Move tcp_port_rover to inet_hashinfo
Also expose all of the tcp_hashinfo members, i.e. killing those tcp_ehash, etc macros, this will more clearly expose already generic functions and some that need just a bit of work to become generic, as we'll see in the upcoming changesets. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inet_hashtables.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h26
-rw-r--r--net/ipv4/tcp.c42
-rw-r--r--net/ipv4/tcp_diag.c8
-rw-r--r--net/ipv4/tcp_ipv4.c101
-rw-r--r--net/ipv4/tcp_minisocks.c15
-rw-r--r--net/ipv6/tcp_ipv6.c51
8 files changed, 118 insertions, 128 deletions
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index da9705525f15..da07411b36d2 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -117,6 +117,7 @@ struct inet_hashinfo {
wait_queue_head_t lhash_wait;
spinlock_t portalloc_lock;
kmem_cache_t *bind_bucket_cachep;
+ int port_rover;
};
static inline int inet_ehashfn(const __u32 laddr, const __u16 lport,
diff --git a/include/net/sock.h b/include/net/sock.h
index 69d869e41c35..391d00b5b7b4 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -136,7 +136,7 @@ struct sock_common {
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime: %SO_LINGER l_linger setting
- * @sk_hashent: hash entry in several tables (e.g. tcp_ehash)
+ * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 9eb8ff7c911e..99e47695d4b6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -41,19 +41,7 @@
#endif
#include <linux/seq_file.h>
-extern struct inet_hashinfo tcp_hashinfo;
-#define tcp_ehash (tcp_hashinfo.ehash)
-#define tcp_bhash (tcp_hashinfo.bhash)
-#define tcp_ehash_size (tcp_hashinfo.ehash_size)
-#define tcp_bhash_size (tcp_hashinfo.bhash_size)
-#define tcp_listening_hash (tcp_hashinfo.listening_hash)
-#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
-#define tcp_lhash_users (tcp_hashinfo.lhash_users)
-#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
-#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
-#define tcp_bucket_cachep (tcp_hashinfo.bind_bucket_cachep)
-
-extern int tcp_port_rover;
+extern struct inet_hashinfo tcp_hashinfo;
#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
@@ -1463,21 +1451,21 @@ extern void tcp_listen_wlock(void);
/* - We may sleep inside this lock.
* - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&tcp_lhash_lock).
+ * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
*/
static inline void tcp_listen_lock(void)
{
/* read_lock synchronizes to candidates to writers */
- read_lock(&tcp_lhash_lock);
- atomic_inc(&tcp_lhash_users);
- read_unlock(&tcp_lhash_lock);
+ read_lock(&tcp_hashinfo.lhash_lock);
+ atomic_inc(&tcp_hashinfo.lhash_users);
+ read_unlock(&tcp_hashinfo.lhash_lock);
}
static inline void tcp_listen_unlock(void)
{
- if (atomic_dec_and_test(&tcp_lhash_users))
- wake_up(&tcp_lhash_wait);
+ if (atomic_dec_and_test(&tcp_hashinfo.lhash_users))
+ wake_up(&tcp_hashinfo.lhash_wait);
}
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 38c04c1a754c..2f4b1a374bb7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2257,11 +2257,11 @@ void __init tcp_init(void)
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
sizeof(skb->cb));
- tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
- sizeof(struct inet_bind_bucket),
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (!tcp_bucket_cachep)
+ tcp_hashinfo.bind_bucket_cachep =
+ kmem_cache_create("tcp_bind_bucket",
+ sizeof(struct inet_bind_bucket), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!tcp_hashinfo.bind_bucket_cachep)
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
@@ -2276,7 +2276,7 @@ void __init tcp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
- tcp_ehash =
+ tcp_hashinfo.ehash =
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
@@ -2284,37 +2284,37 @@ void __init tcp_init(void)
(25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT),
HASH_HIGHMEM,
- &tcp_ehash_size,
+ &tcp_hashinfo.ehash_size,
NULL,
0);
- tcp_ehash_size = (1 << tcp_ehash_size) >> 1;
- for (i = 0; i < (tcp_ehash_size << 1); i++) {
- rwlock_init(&tcp_ehash[i].lock);
- INIT_HLIST_HEAD(&tcp_ehash[i].chain);
+ tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
+ for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
+ rwlock_init(&tcp_hashinfo.ehash[i].lock);
+ INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
}
- tcp_bhash =
+ tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
- tcp_ehash_size,
+ tcp_hashinfo.ehash_size,
(num_physpages >= 128 * 1024) ?
(25 - PAGE_SHIFT) :
(27 - PAGE_SHIFT),
HASH_HIGHMEM,
- &tcp_bhash_size,
+ &tcp_hashinfo.bhash_size,
NULL,
64 * 1024);
- tcp_bhash_size = 1 << tcp_bhash_size;
- for (i = 0; i < tcp_bhash_size; i++) {
- spin_lock_init(&tcp_bhash[i].lock);
- INIT_HLIST_HEAD(&tcp_bhash[i].chain);
+ tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
+ for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
+ spin_lock_init(&tcp_hashinfo.bhash[i].lock);
+ INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
/* Try to be a bit smarter and adjust defaults depending
* on available memory.
*/
for (order = 0; ((1 << order) << PAGE_SHIFT) <
- (tcp_bhash_size * sizeof(struct inet_bind_hashbucket));
+ (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
order++)
;
if (order >= 4) {
@@ -2329,7 +2329,7 @@ void __init tcp_init(void)
sysctl_tcp_max_orphans >>= (3 - order);
sysctl_max_syn_backlog = 128;
}
- tcp_port_rover = sysctl_local_port_range[0] - 1;
+ tcp_hashinfo.port_rover = sysctl_local_port_range[0] - 1;
sysctl_tcp_mem[0] = 768 << order;
sysctl_tcp_mem[1] = 1024 << order;
@@ -2344,7 +2344,7 @@ void __init tcp_init(void)
printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
- tcp_ehash_size << 1, tcp_bhash_size);
+ tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
}
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 5bb6a0f1c77b..0ae738b455f0 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -595,7 +595,7 @@ static int tcpdiag_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_node *node;
num = 0;
- sk_for_each(sk, node, &tcp_listening_hash[i]) {
+ sk_for_each(sk, node, &tcp_hashinfo.listening_hash[i]) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num) {
@@ -645,8 +645,8 @@ skip_listen_ht:
if (!(r->tcpdiag_states&~(TCPF_LISTEN|TCPF_SYN_RECV)))
return skb->len;
- for (i = s_i; i < tcp_ehash_size; i++) {
- struct inet_ehash_bucket *head = &tcp_ehash[i];
+ for (i = s_i; i < tcp_hashinfo.ehash_size; i++) {
+ struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[i];
struct sock *sk;
struct hlist_node *node;
@@ -678,7 +678,7 @@ next_normal:
if (r->tcpdiag_states&TCPF_TIME_WAIT) {
sk_for_each(sk, node,
- &tcp_ehash[i + tcp_ehash_size].chain) {
+ &tcp_hashinfo.ehash[i + tcp_hashinfo.ehash_size].chain) {
struct inet_sock *inet = inet_sk(sk);
if (num < s_num)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 40fe4f5fca1c..f5373f9f00ac 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -94,6 +94,7 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_users = ATOMIC_INIT(0),
.lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
.portalloc_lock = SPIN_LOCK_UNLOCKED,
+ .port_rover = 1024 - 1,
};
/*
@@ -102,7 +103,6 @@ struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
* 32768-61000
*/
int sysctl_local_port_range[2] = { 1024, 4999 };
-int tcp_port_rover = 1024 - 1;
static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
{
@@ -146,16 +146,16 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
int remaining = (high - low) + 1;
int rover;
- spin_lock(&tcp_portalloc_lock);
- if (tcp_port_rover < low)
+ spin_lock(&tcp_hashinfo.portalloc_lock);
+ if (tcp_hashinfo.port_rover < low)
rover = low;
else
- rover = tcp_port_rover;
+ rover = tcp_hashinfo.port_rover;
do {
rover++;
if (rover > high)
rover = low;
- head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
@@ -164,8 +164,8 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
next:
spin_unlock(&head->lock);
} while (--remaining > 0);
- tcp_port_rover = rover;
- spin_unlock(&tcp_portalloc_lock);
+ tcp_hashinfo.port_rover = rover;
+ spin_unlock(&tcp_hashinfo.portalloc_lock);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
@@ -182,7 +182,7 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
*/
snum = rover;
} else {
- head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
@@ -205,7 +205,7 @@ tb_found:
}
tb_not_found:
ret = 1;
- if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL)
+ if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
goto fail_unlock;
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@@ -237,22 +237,22 @@ fail:
void tcp_listen_wlock(void)
{
- write_lock(&tcp_lhash_lock);
+ write_lock(&tcp_hashinfo.lhash_lock);
- if (atomic_read(&tcp_lhash_users)) {
+ if (atomic_read(&tcp_hashinfo.lhash_users)) {
DEFINE_WAIT(wait);
for (;;) {
- prepare_to_wait_exclusive(&tcp_lhash_wait,
+ prepare_to_wait_exclusive(&tcp_hashinfo.lhash_wait,
&wait, TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&tcp_lhash_users))
+ if (!atomic_read(&tcp_hashinfo.lhash_users))
break;
- write_unlock_bh(&tcp_lhash_lock);
+ write_unlock_bh(&tcp_hashinfo.lhash_lock);
schedule();
- write_lock_bh(&tcp_lhash_lock);
+ write_lock_bh(&tcp_hashinfo.lhash_lock);
}
- finish_wait(&tcp_lhash_wait, &wait);
+ finish_wait(&tcp_hashinfo.lhash_wait, &wait);
}
}
@@ -263,20 +263,20 @@ static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
BUG_TRAP(sk_unhashed(sk));
if (listen_possible && sk->sk_state == TCP_LISTEN) {
- list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)];
- lock = &tcp_lhash_lock;
+ list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
+ lock = &tcp_hashinfo.lhash_lock;
tcp_listen_wlock();
} else {
- sk->sk_hashent = inet_sk_ehashfn(sk, tcp_ehash_size);
- list = &tcp_ehash[sk->sk_hashent].chain;
- lock = &tcp_ehash[sk->sk_hashent].lock;
+ sk->sk_hashent = inet_sk_ehashfn(sk, tcp_hashinfo.ehash_size);
+ list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
+ lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
write_lock(lock);
}
__sk_add_node(sk, list);
sock_prot_inc_use(sk->sk_prot);
write_unlock(lock);
if (listen_possible && sk->sk_state == TCP_LISTEN)
- wake_up(&tcp_lhash_wait);
+ wake_up(&tcp_hashinfo.lhash_wait);
}
static void tcp_v4_hash(struct sock *sk)
@@ -298,9 +298,9 @@ void tcp_unhash(struct sock *sk)
if (sk->sk_state == TCP_LISTEN) {
local_bh_disable();
tcp_listen_wlock();
- lock = &tcp_lhash_lock;
+ lock = &tcp_hashinfo.lhash_lock;
} else {
- struct inet_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
+ struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
lock = &head->lock;
write_lock_bh(&head->lock);
}
@@ -311,7 +311,7 @@ void tcp_unhash(struct sock *sk)
ende:
if (sk->sk_state == TCP_LISTEN)
- wake_up(&tcp_lhash_wait);
+ wake_up(&tcp_hashinfo.lhash_wait);
}
/* Don't inline this cruft. Here are some nice properties to
@@ -366,8 +366,8 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
struct sock *sk = NULL;
struct hlist_head *head;
- read_lock(&tcp_lhash_lock);
- head = &tcp_listening_hash[inet_lhashfn(hnum)];
+ read_lock(&tcp_hashinfo.lhash_lock);
+ head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
if (!hlist_empty(head)) {
struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
@@ -382,7 +382,7 @@ static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
sherry_cache:
sock_hold(sk);
}
- read_unlock(&tcp_lhash_lock);
+ read_unlock(&tcp_hashinfo.lhash_lock);
return sk;
}
@@ -406,8 +406,8 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
- const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_ehash_size);
- head = &tcp_ehash[hash];
+ const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
+ head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
@@ -415,7 +415,7 @@ static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
+ sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
goto hit;
}
@@ -469,8 +469,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if;
TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
__u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
- const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_ehash_size);
- struct inet_ehash_bucket *head = &tcp_ehash[hash];
+ const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
+ struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
@@ -478,7 +478,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
- sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
+ sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket *)sk2;
if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -582,7 +582,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
local_bh_disable();
for (i = 1; i <= range; i++) {
port = low + (i + offset) % range;
- head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
@@ -602,7 +602,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
}
}
- tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port);
+ tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
@@ -637,7 +637,7 @@ ok:
goto out;
}
- head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
@@ -1926,7 +1926,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
if (!sk) {
st->bucket = 0;
- sk = sk_head(&tcp_listening_hash[0]);
+ sk = sk_head(&tcp_hashinfo.listening_hash[0]);
goto get_sk;
}
@@ -1980,7 +1980,7 @@ start_req:
read_unlock_bh(&tp->accept_queue.syn_wait_lock);
}
if (++st->bucket < INET_LHTABLE_SIZE) {
- sk = sk_head(&tcp_listening_hash[st->bucket]);
+ sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
goto get_sk;
}
cur = NULL;
@@ -2004,7 +2004,7 @@ static void *established_get_first(struct seq_file *seq)
struct tcp_iter_state* st = seq->private;
void *rc = NULL;
- for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
+ for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
struct sock *sk;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
@@ -2012,8 +2012,8 @@ static void *established_get_first(struct seq_file *seq)
/* We can reschedule _before_ having picked the target: */
cond_resched_softirq();
- read_lock(&tcp_ehash[st->bucket].lock);
- sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
+ read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+ sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family) {
continue;
}
@@ -2022,14 +2022,14 @@ static void *established_get_first(struct seq_file *seq)
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
tw_for_each(tw, node,
- &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
+ &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
if (tw->tw_family != st->family) {
continue;
}
rc = tw;
goto out;
}
- read_unlock(&tcp_ehash[st->bucket].lock);
+ read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
@@ -2056,15 +2056,15 @@ get_tw:
cur = tw;
goto out;
}
- read_unlock(&tcp_ehash[st->bucket].lock);
+ read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* We can reschedule between buckets: */
cond_resched_softirq();
- if (++st->bucket < tcp_ehash_size) {
- read_lock(&tcp_ehash[st->bucket].lock);
- sk = sk_head(&tcp_ehash[st->bucket].chain);
+ if (++st->bucket < tcp_hashinfo.ehash_size) {
+ read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+ sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else {
cur = NULL;
goto out;
@@ -2078,7 +2078,7 @@ get_tw:
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
- tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
+ tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
goto get_tw;
found:
cur = sk;
@@ -2173,7 +2173,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
- read_unlock(&tcp_ehash[st->bucket].lock);
+ read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
local_bh_enable();
break;
}
@@ -2432,7 +2432,6 @@ EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL(inet_bind_bucket_create);
EXPORT_SYMBOL(tcp_hashinfo);
EXPORT_SYMBOL(tcp_listen_wlock);
-EXPORT_SYMBOL(tcp_port_rover);
EXPORT_SYMBOL(tcp_prot);
EXPORT_SYMBOL(tcp_unhash);
EXPORT_SYMBOL(tcp_v4_conn_request);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 267cea1087e5..f29e2f6ebe1b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -60,12 +60,11 @@ int tcp_tw_count;
/* Must be called with locally disabled BHs. */
static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
{
- struct inet_ehash_bucket *ehead;
struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb;
-
/* Unlink from established hashes. */
- ehead = &tcp_ehash[tw->tw_hashent];
+ struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent];
+
write_lock(&ehead->lock);
if (hlist_unhashed(&tw->tw_node)) {
write_unlock(&ehead->lock);
@@ -76,12 +75,12 @@ static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
write_unlock(&ehead->lock);
/* Disassociate with bind bucket. */
- bhead = &tcp_bhash[inet_bhashfn(tw->tw_num, tcp_bhash_size)];
+ bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)];
spin_lock(&bhead->lock);
tb = tw->tw_tb;
__hlist_del(&tw->tw_bind_node);
tw->tw_tb = NULL;
- inet_bind_bucket_destroy(tcp_bucket_cachep, tb);
+ inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb);
spin_unlock(&bhead->lock);
#ifdef SOCK_REFCNT_DEBUG
@@ -297,13 +296,13 @@ kill:
static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
{
const struct inet_sock *inet = inet_sk(sk);
- struct inet_ehash_bucket *ehead = &tcp_ehash[sk->sk_hashent];
+ struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in
binding cache, even if it is closed.
*/
- bhead = &tcp_bhash[inet_bhashfn(inet->num, tcp_bhash_size)];
+ bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = inet->bind_hash;
BUG_TRAP(inet->bind_hash);
@@ -317,7 +316,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
sock_prot_dec_use(sk->sk_prot);
/* Step 3: Hash TW into TIMEWAIT half of established hash table. */
- tw_add_node(tw, &(ehead + tcp_ehash_size)->chain);
+ tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index bfbedb56bce2..362ef5a64062 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -84,7 +84,7 @@ static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
hashent ^= hashent>>16;
hashent ^= hashent>>8;
- return (hashent & (tcp_ehash_size - 1));
+ return (hashent & (tcp_hashinfo.ehash_size - 1));
}
static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
@@ -138,15 +138,15 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
int remaining = (high - low) + 1;
int rover;
- spin_lock(&tcp_portalloc_lock);
- if (tcp_port_rover < low)
+ spin_lock(&tcp_hashinfo.portalloc_lock);
+ if (tcp_hashinfo.port_rover < low)
rover = low;
else
- rover = tcp_port_rover;
+ rover = tcp_hashinfo.port_rover;
do { rover++;
if (rover > high)
rover = low;
- head = &tcp_bhash[inet_bhashfn(rover, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == rover)
@@ -155,8 +155,8 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
next:
spin_unlock(&head->lock);
} while (--remaining > 0);
- tcp_port_rover = rover;
- spin_unlock(&tcp_portalloc_lock);
+ tcp_hashinfo.port_rover = rover;
+ spin_unlock(&tcp_hashinfo.portalloc_lock);
/* Exhausted local port range during search? It is not
* possible for us to be holding one of the bind hash
@@ -171,7 +171,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
/* OK, here is the one we will use. */
snum = rover;
} else {
- head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
inet_bind_bucket_for_each(tb, node, &head->chain)
if (tb->port == snum)
@@ -192,8 +192,11 @@ tb_found:
}
tb_not_found:
ret = 1;
- if (!tb && (tb = inet_bind_bucket_create(tcp_bucket_cachep, head, snum)) == NULL)
- goto fail_unlock;
+ if (tb == NULL) {
+ tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum);
+ if (tb == NULL)
+ goto fail_unlock;
+ }
if (hlist_empty(&tb->owners)) {
if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
tb->fastreuse = 1;
@@ -224,13 +227,13 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
BUG_TRAP(sk_unhashed(sk));
if (sk->sk_state == TCP_LISTEN) {
- list = &tcp_listening_hash[inet_sk_listen_hashfn(sk)];
- lock = &tcp_lhash_lock;
+ list = &tcp_hashinfo.listening_hash[inet_sk_listen_hashfn(sk)];
+ lock = &tcp_hashinfo.lhash_lock;
tcp_listen_wlock();
} else {
sk->sk_hashent = tcp_v6_sk_hashfn(sk);
- list = &tcp_ehash[sk->sk_hashent].chain;
- lock = &tcp_ehash[sk->sk_hashent].lock;
+ list = &tcp_hashinfo.ehash[sk->sk_hashent].chain;
+ lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock;
write_lock(lock);
}
@@ -263,8 +266,8 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
int score, hiscore;
hiscore=0;
- read_lock(&tcp_lhash_lock);
- sk_for_each(sk, node, &tcp_listening_hash[inet_lhashfn(hnum)]) {
+ read_lock(&tcp_hashinfo.lhash_lock);
+ sk_for_each(sk, node, &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)]) {
if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -291,7 +294,7 @@ static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned shor
}
if (result)
sock_hold(result);
- read_unlock(&tcp_lhash_lock);
+ read_unlock(&tcp_hashinfo.lhash_lock);
return result;
}
@@ -315,7 +318,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
* have wildcards anyways.
*/
hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
- head = &tcp_ehash[hash];
+ head = &tcp_hashinfo.ehash[hash];
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */
@@ -323,7 +326,7 @@ static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
+ sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
/* FIXME: acme: check this... */
struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
@@ -461,7 +464,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if;
u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
- struct inet_ehash_bucket *head = &tcp_ehash[hash];
+ struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
struct sock *sk2;
struct hlist_node *node;
struct tcp_tw_bucket *tw;
@@ -469,7 +472,7 @@ static int __tcp_v6_check_established(struct sock *sk, __u16 lport,
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
- sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
+ sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = (struct tcp_tw_bucket*)sk2;
if(*((__u32 *)&(tw->tw_dport)) == ports &&
@@ -558,7 +561,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
local_bh_disable();
for (i = 1; i <= range; i++) {
port = low + (i + offset) % range;
- head = &tcp_bhash[inet_bhashfn(port, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
spin_lock(&head->lock);
/* Does not bother with rcv_saddr checks,
@@ -578,7 +581,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
}
}
- tb = inet_bind_bucket_create(tcp_bucket_cachep, head, port);
+ tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
if (!tb) {
spin_unlock(&head->lock);
break;
@@ -613,7 +616,7 @@ ok:
goto out;
}
- head = &tcp_bhash[inet_bhashfn(snum, tcp_bhash_size)];
+ head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash;
spin_lock_bh(&head->lock);