diff options
author | David S. Miller <davem@davemloft.net> | 2005-07-08 23:57:23 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-07-08 23:57:23 +0200 |
commit | b03efcfb2180289718991bb984044ce6c5b7d1b0 (patch) | |
tree | f3b0c6c4eaf0991c28b7116a20994b48398eea57 /net/ipv4 | |
parent | [PATCH] Add MAINTAINERS entry for audit subsystem (diff) | |
download | linux-b03efcfb2180289718991bb984044ce6c5b7d1b0.tar.xz linux-b03efcfb2180289718991bb984044ce6c5b7d1b0.zip |
[NET]: Transform skb_queue_len() binary tests into skb_queue_empty()
This is part of the grand scheme to eliminate the qlen
member of skb_queue_head, and subsequently remove the
'list' member of sk_buff.
Most users of skb_queue_len() want to know if the queue is
empty or not, and that's trivially done with skb_queue_empty()
which doesn't use the skb_queue_head->qlen member and instead
uses the queue list emptyness as the test.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp.c | 8 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 5 |
3 files changed, 11 insertions, 13 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 29894c749163..ddb6ce4ecff2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * is not empty. It is more elegant, but eats cycles, * unfortunately. */ - if (skb_queue_len(&tp->ucopy.prequeue)) + if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ @@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } if (tp->rcv_nxt == tp->copied_seq && - skb_queue_len(&tp->ucopy.prequeue)) { + !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); @@ -1476,7 +1476,7 @@ skip_copy: } while (len > 0); if (user_recv) { - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8de2f1071c2b..53a8a5399f1e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ - if (skb_queue_len(&tp->out_of_order_queue) == 0) { + if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; @@ -2935,13 +2935,13 @@ queue_and_out: if(th->fin) tcp_fin(skb, sk, th); - if (skb_queue_len(&tp->out_of_order_queue)) { + if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ - if (!skb_queue_len(&tp->out_of_order_queue)) + if (skb_queue_empty(&tp->out_of_order_queue)) tp->ack.pingpong = 0; } @@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk) * This must not ever occur. */ /* First, purge the out_of_order queue. */ - if (skb_queue_len(&tp->out_of_order_queue)) { - NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, - skb_queue_len(&tp->out_of_order_queue)); + if (!skb_queue_empty(&tp->out_of_order_queue)) { + NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b127b4498565..0084227438c2 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data) } tp->ack.pending &= ~TCP_ACK_TIMER; - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { struct sk_buff *skb; - NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, - skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk->sk_backlog_rcv(sk, skb); |