summaryrefslogtreecommitdiffstats
path: root/net/llc/llc_proc.c
diff options
context:
space:
mode:
authorOctavian Purdila <opurdila@ixiacom.com>2009-12-26 12:51:02 +0100
committerDavid S. Miller <davem@davemloft.net>2009-12-27 05:41:43 +0100
commitb76f5a8427ac2928c07fa4ff2144bb8db072c240 (patch)
tree542d64a8bd28baf69f87b199a0156f865a06551d /net/llc/llc_proc.c
parentllc: add support for SO_BINDTODEVICE (diff)
downloadlinux-b76f5a8427ac2928c07fa4ff2144bb8db072c240.tar.xz
linux-b76f5a8427ac2928c07fa4ff2144bb8db072c240.zip
llc: convert the socket list to RCU locking
For the reclamation phase we use the SLAB_DESTROY_BY_RCU mechanism, which require some extra checks in the lookup code: a) If the current socket was released, reallocated & inserted in another list it will short circuit the iteration for the current list, thus we need to restart the lookup. b) If the current socket was released, reallocated & inserted in the same list we just need to recheck it matches the look-up criteria and if not we can skip to the next element. In this case there is no need to restart the lookup, since sockets are inserted at the start of the list and the worst that will happen is that we will iterate throught some of the list elements more then once. Note that the /proc and multicast delivery was not yet converted to RCU, it still uses spinlocks for protection. Signed-off-by: Octavian Purdila <opurdila@ixiacom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/llc/llc_proc.c')
-rw-r--r--net/llc/llc_proc.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6b..6b3d033b3236 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -34,19 +34,19 @@ static struct sock *llc_get_sk_idx(loff_t pos)
{
struct list_head *sap_entry;
struct llc_sap *sap;
- struct hlist_node *node;
+ struct hlist_nulls_node *node;
struct sock *sk = NULL;
list_for_each(sap_entry, &llc_sap_list) {
sap = list_entry(sap_entry, struct llc_sap, node);
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(sk, node, &sap->sk_list.list) {
+ spin_lock_bh(&sap->sk_lock);
+ sk_nulls_for_each(sk, node, &sap->sk_list) {
if (!pos)
goto found;
--pos;
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
sk = NULL;
found:
@@ -73,25 +73,25 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
sk = v;
- next = sk_next(sk);
+ next = sk_nulls_next(sk);
if (next) {
sk = next;
goto out;
}
llc = llc_sk(sk);
sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
sk = NULL;
for (;;) {
if (sap->node.next == &llc_sap_list)
break;
sap = list_entry(sap->node.next, struct llc_sap, node);
- read_lock_bh(&sap->sk_list.lock);
- if (!hlist_empty(&sap->sk_list.list)) {
- sk = sk_head(&sap->sk_list.list);
+ spin_lock_bh(&sap->sk_lock);
+ if (!hlist_nulls_empty(&sap->sk_list)) {
+ sk = sk_nulls_head(&sap->sk_list);
break;
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
out:
return sk;
@@ -104,7 +104,7 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
struct llc_sock *llc = llc_sk(sk);
struct llc_sap *sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
read_unlock_bh(&llc_sap_list_lock);
}