diff options
author | wangweidong <wangweidong1@huawei.com> | 2014-01-21 08:44:12 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-22 03:41:36 +0100 |
commit | 5bc1d1b4a261a865cbde65b1561748df5b9c724b (patch) | |
tree | 4920c464aba555430904d4b93936a72a391016c7 /net/sctp/input.c | |
parent | sctp: remove macros sctp_{lock|release}_sock (diff) | |
download | linux-5bc1d1b4a261a865cbde65b1561748df5b9c724b.tar.xz linux-5bc1d1b4a261a865cbde65b1561748df5b9c724b.zip |
sctp: remove macros sctp_bh_[un]lock_sock
Redefined bh_[un]lock_sock to sctp_bh[un]lock_sock for user
space friendly code which we haven't use in years, so removing them.
Signed-off-by: Wang Weidong <wangweidong1@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/input.c')
-rw-r--r-- | net/sctp/input.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/net/sctp/input.c b/net/sctp/input.c index 94f7f44049a6..f2e2cbd2d750 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -238,7 +238,7 @@ int sctp_rcv(struct sk_buff *skb) * bottom halves on this lock, but a user may be in the lock too, * so check if it is busy. */ - sctp_bh_lock_sock(sk); + bh_lock_sock(sk); if (sk != rcvr->sk) { /* Our cached sk is different from the rcvr->sk. This is @@ -248,14 +248,14 @@ int sctp_rcv(struct sk_buff *skb) * be doing something with the new socket. Switch our veiw * of the current sk. */ - sctp_bh_unlock_sock(sk); + bh_unlock_sock(sk); sk = rcvr->sk; - sctp_bh_lock_sock(sk); + bh_lock_sock(sk); } if (sock_owned_by_user(sk)) { if (sctp_add_backlog(sk, skb)) { - sctp_bh_unlock_sock(sk); + bh_unlock_sock(sk); sctp_chunk_free(chunk); skb = NULL; /* sctp_chunk_free already freed the skb */ goto discard_release; @@ -266,7 +266,7 @@ int sctp_rcv(struct sk_buff *skb) sctp_inq_push(&chunk->rcvr->inqueue, chunk); } - sctp_bh_unlock_sock(sk); + bh_unlock_sock(sk); /* Release the asoc/ep ref we took in the lookup calls. */ if (asoc) @@ -327,7 +327,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) */ sk = rcvr->sk; - sctp_bh_lock_sock(sk); + bh_lock_sock(sk); if (sock_owned_by_user(sk)) { if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) @@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) } else sctp_inq_push(inqueue, chunk); - sctp_bh_unlock_sock(sk); + bh_unlock_sock(sk); /* If the chunk was backloged again, don't drop refs */ if (backloged) @@ -522,7 +522,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, goto out; } - sctp_bh_lock_sock(sk); + bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. @@ -542,7 +542,7 @@ out: /* Common cleanup code for icmp/icmpv6 error handler. */ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) { - sctp_bh_unlock_sock(sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } |