diff options
author | Ying Xue <ying.xue@windriver.com> | 2015-01-09 08:27:05 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-12 22:24:32 +0100 |
commit | f2f9800d4955a96d92896841d8ba9b04201deaa1 (patch) | |
tree | 3b817800cfd8fcb2de6d5a3d7eb4fff972fba681 /net/tipc/node.c | |
parent | tipc: involve namespace infrastructure (diff) | |
download | linux-f2f9800d4955a96d92896841d8ba9b04201deaa1.tar.xz linux-f2f9800d4955a96d92896841d8ba9b04201deaa1.zip |
tipc: make tipc node table aware of net namespace
Global variables associated with node table are below:
- node table list (node_htable)
- node hash table list (tipc_node_list)
- node table lock (node_list_lock)
- node number counter (tipc_num_nodes)
- node link number counter (tipc_num_links)
To make node table support namespace, above global variables must be
moved to tipc_net structure in order to keep secret for different
namespaces. As a consequence, these variables are allocated and
initialized when namespace is created, and deallocated when namespace
is destroyed. After the change, functions associated with these
variables have to utilize a namespace pointer to access them. So
adding namespace pointer as a parameter of these functions is the
major change made in the commit.
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Tested-by: Tero Aho <Tero.Aho@coriant.com>
Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 130 |
1 files changed, 69 insertions, 61 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 8d353ec77a66..a0ca1ac53119 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -40,17 +40,9 @@ #include "name_distr.h" #include "socket.h" -#define NODE_HTABLE_SIZE 512 - static void node_lost_contact(struct tipc_node *n_ptr); static void node_established_contact(struct tipc_node *n_ptr); -static struct hlist_head node_htable[NODE_HTABLE_SIZE]; -LIST_HEAD(tipc_node_list); -static u32 tipc_num_nodes; -static u32 tipc_num_links; -static DEFINE_SPINLOCK(node_list_lock); - struct tipc_sock_conn { u32 port; u32 peer_port; @@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr) /* * tipc_node_find - locate specified node object, if it exists */ -struct tipc_node *tipc_node_find(u32 addr) +struct tipc_node *tipc_node_find(struct net *net, u32 addr) { + struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *node; if (unlikely(!in_own_cluster_exact(addr))) return NULL; rcu_read_lock(); - hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { + hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], + hash) { if (node->addr == addr) { rcu_read_unlock(); return node; @@ -96,20 +90,22 @@ struct tipc_node *tipc_node_find(u32 addr) return NULL; } -struct tipc_node *tipc_node_create(u32 addr) +struct tipc_node *tipc_node_create(struct net *net, u32 addr) { + struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *n_ptr, *temp_node; - spin_lock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); if (!n_ptr) { - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); pr_warn("Node creation failed, no memory\n"); return NULL; } n_ptr->addr = addr; + n_ptr->net = net; spin_lock_init(&n_ptr->lock); INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); @@ -118,9 +114,9 @@ struct tipc_node *tipc_node_create(u32 addr) skb_queue_head_init(&n_ptr->waiting_sks); __skb_queue_head_init(&n_ptr->bclink.deferred_queue); - hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); + hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); - list_for_each_entry_rcu(temp_node, &tipc_node_list, list) { + list_for_each_entry_rcu(temp_node, &tn->node_list, list) { if (n_ptr->addr < temp_node->addr) break; } @@ -128,32 +124,33 @@ struct tipc_node *tipc_node_create(u32 addr) n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; n_ptr->signature = INVALID_NODE_SIG; - tipc_num_nodes++; + tn->num_nodes++; - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); return n_ptr; } -static void tipc_node_delete(struct tipc_node *n_ptr) +static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr) { list_del_rcu(&n_ptr->list); hlist_del_rcu(&n_ptr->hash); kfree_rcu(n_ptr, rcu); - tipc_num_nodes--; + tn->num_nodes--; } -void tipc_node_stop(void) +void tipc_node_stop(struct net *net) { + struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *node, *t_node; - spin_lock_bh(&node_list_lock); - list_for_each_entry_safe(node, t_node, &tipc_node_list, list) - tipc_node_delete(node); - spin_unlock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); + list_for_each_entry_safe(node, t_node, &tn->node_list, list) + tipc_node_delete(tn, node); + spin_unlock_bh(&tn->node_list_lock); } -int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) +int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) { struct tipc_node *node; struct tipc_sock_conn *conn; @@ -161,7 +158,7 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) if (in_own_node(dnode)) return 0; - node = tipc_node_find(dnode); + node = tipc_node_find(net, dnode); if (!node) { pr_warn("Connecting sock to node 0x%x failed\n", dnode); return -EHOSTUNREACH; @@ -179,7 +176,7 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) return 0; } -void tipc_node_remove_conn(u32 dnode, u32 port) +void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) { struct tipc_node *node; struct tipc_sock_conn *conn, *safe; @@ -187,7 +184,7 @@ void tipc_node_remove_conn(u32 dnode, u32 port) if (in_own_node(dnode)) return; - node = tipc_node_find(dnode); + node = tipc_node_find(net, dnode); if (!node) return; @@ -201,7 +198,7 @@ void tipc_node_remove_conn(u32 dnode, u32 port) tipc_node_unlock(node); } -void tipc_node_abort_sock_conns(struct list_head *conns) +void tipc_node_abort_sock_conns(struct net *net, struct list_head *conns) { struct tipc_sock_conn *conn, *safe; struct sk_buff *buf; @@ -212,7 +209,7 @@ void tipc_node_abort_sock_conns(struct list_head *conns) conn->peer_node, conn->port, conn->peer_port, TIPC_ERR_NO_NODE); if (likely(buf)) - tipc_sk_rcv(buf); + tipc_sk_rcv(net, buf); list_del(&conn->list); kfree(conn); } @@ -342,24 +339,27 @@ int tipc_node_is_up(struct tipc_node *n_ptr) void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { + struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); + n_ptr->links[l_ptr->bearer_id] = l_ptr; - spin_lock_bh(&node_list_lock); - tipc_num_links++; - spin_unlock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); + tn->num_links++; + spin_unlock_bh(&tn->node_list_lock); n_ptr->link_cnt++; } void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { + struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); int i; for (i = 0; i < MAX_BEARERS; i++) { if (l_ptr != n_ptr->links[i]) continue; n_ptr->links[i] = NULL; - spin_lock_bh(&node_list_lock); - tipc_num_links--; - spin_unlock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); + tn->num_links--; + spin_unlock_bh(&tn->node_list_lock); n_ptr->link_cnt--; } } @@ -414,8 +414,10 @@ static void node_lost_contact(struct tipc_node *n_ptr) TIPC_NOTIFY_NODE_DOWN; } -struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) +struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area, + int req_tlv_space) { + struct tipc_net *tn = net_generic(net, tipc_net_id); u32 domain; struct sk_buff *buf; struct tipc_node *n_ptr; @@ -430,20 +432,20 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (network address)"); - spin_lock_bh(&node_list_lock); - if (!tipc_num_nodes) { - spin_unlock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); + if (!tn->num_nodes) { + spin_unlock_bh(&tn->node_list_lock); return tipc_cfg_reply_none(); } /* For now, get space for all other nodes */ - payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; + payload_size = TLV_SPACE(sizeof(node_info)) * tn->num_nodes; if (payload_size > 32768u) { - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many nodes)"); } - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); buf = tipc_cfg_reply_alloc(payload_size); if (!buf) @@ -451,7 +453,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) /* Add TLVs for all nodes in scope */ rcu_read_lock(); - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { if (!tipc_in_scope(domain, n_ptr->addr)) continue; node_info.addr = htonl(n_ptr->addr); @@ -463,8 +465,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) return buf; } -struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) +struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area, + int req_tlv_space) { + struct tipc_net *tn = net_generic(net, tipc_net_id); u32 domain; struct sk_buff *buf; struct tipc_node *n_ptr; @@ -482,15 +486,15 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) if (!tipc_own_addr) return tipc_cfg_reply_none(); - spin_lock_bh(&node_list_lock); + spin_lock_bh(&tn->node_list_lock); /* Get space for all unicast links + broadcast link */ - payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1)); + payload_size = TLV_SPACE((sizeof(link_info)) * (tn->num_links + 1)); if (payload_size > 32768u) { - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (too many links)"); } - spin_unlock_bh(&node_list_lock); + spin_unlock_bh(&tn->node_list_lock); buf = tipc_cfg_reply_alloc(payload_size); if (!buf) @@ -504,7 +508,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) /* Add TLVs for any other links in scope */ rcu_read_lock(); - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { u32 i; if (!tipc_in_scope(domain, n_ptr->addr)) @@ -534,10 +538,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) * * Returns 0 on success */ -int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) +int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, + char *linkname, size_t len) { struct tipc_link *link; - struct tipc_node *node = tipc_node_find(addr); + struct tipc_node *node = tipc_node_find(net, addr); if ((bearer_id >= MAX_BEARERS) || !node) return -EINVAL; @@ -554,6 +559,7 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) void tipc_node_unlock(struct tipc_node *node) { + struct net *net = node->net; LIST_HEAD(nsub_list); LIST_HEAD(conn_sks); struct sk_buff_head waiting_sks; @@ -585,26 +591,26 @@ void tipc_node_unlock(struct tipc_node *node) spin_unlock_bh(&node->lock); while (!skb_queue_empty(&waiting_sks)) - tipc_sk_rcv(__skb_dequeue(&waiting_sks)); + tipc_sk_rcv(net, __skb_dequeue(&waiting_sks)); if (!list_empty(&conn_sks)) - tipc_node_abort_sock_conns(&conn_sks); + tipc_node_abort_sock_conns(net, &conn_sks); if (!list_empty(&nsub_list)) - tipc_publ_notify(&nsub_list, addr); + tipc_publ_notify(net, &nsub_list, addr); if (flags & TIPC_WAKEUP_BCAST_USERS) - tipc_bclink_wakeup_users(); + tipc_bclink_wakeup_users(net); if (flags & TIPC_NOTIFY_NODE_UP) - tipc_named_node_up(addr); + tipc_named_node_up(net, addr); if (flags & TIPC_NOTIFY_LINK_UP) - tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, + tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, link_id, addr); if (flags & TIPC_NOTIFY_LINK_DOWN) - tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, + tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, link_id, addr); } @@ -645,6 +651,8 @@ msg_full: int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) { int err; + struct net *net = sock_net(skb->sk); + struct tipc_net *tn = net_generic(net, tipc_net_id); int done = cb->args[0]; int last_addr = cb->args[1]; struct tipc_node *node; @@ -659,7 +667,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); - if (last_addr && !tipc_node_find(last_addr)) { + if (last_addr && !tipc_node_find(net, last_addr)) { rcu_read_unlock(); /* We never set seq or call nl_dump_check_consistent() this * means that setting prev_seq here will cause the consistence @@ -671,7 +679,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) return -EPIPE; } - list_for_each_entry_rcu(node, &tipc_node_list, list) { + list_for_each_entry_rcu(node, &tn->node_list, list) { if (last_addr) { if (node->addr == last_addr) last_addr = 0; |