summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/irda/irda_device.h2
-rw-r--r--include/net/sctp/structs.h20
-rw-r--r--include/net/sock.h18
-rw-r--r--include/net/tcp.h5
4 files changed, 22 insertions, 23 deletions
diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h
index 71d6af83b631..92c828029cd8 100644
--- a/include/net/irda/irda_device.h
+++ b/include/net/irda/irda_device.h
@@ -224,7 +224,7 @@ int irda_device_is_receiving(struct net_device *dev);
/* Interface for internal use */
static inline int irda_device_txqueue_empty(const struct net_device *dev)
{
- return (skb_queue_len(&dev->qdisc->q) == 0);
+ return skb_queue_empty(&dev->qdisc->q);
}
int irda_device_set_raw_mode(struct net_device* self, int status);
struct net_device *alloc_irdadev(int sizeof_priv);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 47727c7cc628..7435528a1747 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -582,7 +582,6 @@ void sctp_datamsg_track(struct sctp_chunk *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
-
/* RFC2960 1.4 Key Terms
*
* o Chunk: A unit of information within an SCTP packet, consisting of
@@ -592,13 +591,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *);
* each chunk as well as a few other header pointers...
*/
struct sctp_chunk {
- /* These first three elements MUST PRECISELY match the first
- * three elements of struct sk_buff. This allows us to reuse
- * all the skb_* queue management functions.
- */
- struct sctp_chunk *next;
- struct sctp_chunk *prev;
- struct sk_buff_head *list;
+ struct list_head list;
+
atomic_t refcnt;
/* This is our link to the per-transport transmitted list. */
@@ -717,7 +711,7 @@ struct sctp_packet {
__u32 vtag;
/* This contains the payload chunks. */
- struct sk_buff_head chunks;
+ struct list_head chunk_list;
/* This is the overhead of the sctp and ip headers. */
size_t overhead;
@@ -974,7 +968,7 @@ struct sctp_inq {
/* This is actually a queue of sctp_chunk each
* containing a partially decoded packet.
*/
- struct sk_buff_head in;
+ struct list_head in_chunk_list;
/* This is the packet which is currently off the in queue and is
* being worked on through the inbound chunk processing.
*/
@@ -1017,7 +1011,7 @@ struct sctp_outq {
struct sctp_association *asoc;
/* Data pending that has never been transmitted. */
- struct sk_buff_head out;
+ struct list_head out_chunk_list;
unsigned out_qlen; /* Total length of queued data chunks. */
@@ -1025,7 +1019,7 @@ struct sctp_outq {
unsigned error;
/* These are control chunks we want to send. */
- struct sk_buff_head control;
+ struct list_head control_chunk_list;
/* These are chunks that have been sacked but are above the
* CTSN, or cumulative tsn ack point.
@@ -1672,7 +1666,7 @@ struct sctp_association {
* which already resides in sctp_outq. Please move this
* queue and its supporting logic down there. --piggy]
*/
- struct sk_buff_head addip_chunks;
+ struct list_head addip_chunk_list;
/* ADDIP Section 4.1 ASCONF Chunk Procedures
*
diff --git a/include/net/sock.h b/include/net/sock.h
index 7b76f891ae2d..a1042d08becd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -684,16 +684,17 @@ extern void FASTCALL(release_sock(struct sock *sk));
#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-extern struct sock *sk_alloc(int family, int priority,
+extern struct sock *sk_alloc(int family,
+ unsigned int __nocast priority,
struct proto *prot, int zero_it);
extern void sk_free(struct sock *sk);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
- int priority);
+ unsigned int __nocast priority);
extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
- int priority);
+ unsigned int __nocast priority);
extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
@@ -708,7 +709,8 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
unsigned long size,
int noblock,
int *errcode);
-extern void *sock_kmalloc(struct sock *sk, int size, int priority);
+extern void *sock_kmalloc(struct sock *sk, int size,
+ unsigned int __nocast priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
extern void sk_send_sigurg(struct sock *sk);
@@ -1132,7 +1134,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
}
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
- int size, int mem, int gfp)
+ int size, int mem,
+ unsigned int __nocast gfp)
{
struct sk_buff *skb;
int hdr_len;
@@ -1155,7 +1158,8 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
}
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
- int size, int gfp)
+ int size,
+ unsigned int __nocast gfp)
{
return sk_stream_alloc_pskb(sk, size, 0, gfp);
}
@@ -1188,7 +1192,7 @@ static inline int sock_writeable(const struct sock *sk)
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
}
-static inline int gfp_any(void)
+static inline unsigned int __nocast gfp_any(void)
{
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a166918ca56d..f4f9aba07ac2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -860,7 +860,8 @@ extern void tcp_send_probe0(struct sock *);
extern void tcp_send_partial(struct sock *);
extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, int priority);
+extern void tcp_send_active_reset(struct sock *sk,
+ unsigned int __nocast priority);
extern int tcp_send_synack(struct sock *);
extern void tcp_push_one(struct sock *, unsigned int mss_now);
extern void tcp_send_ack(struct sock *sk);
@@ -991,7 +992,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp)
static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp)
{
- if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
+ if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)