diff options
author | Arnaldo Carvalho de Melo <acme@mandriva.com> | 2005-08-24 06:54:23 +0200 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-30 01:05:45 +0200 |
commit | 331968bd0c1b2437f3ad773cbf55f2e0737bafc0 (patch) | |
tree | ec9c5aeaa5217c8ce009a7e5e07a60c3a390e021 /net/dccp/proto.c | |
parent | [DCCP]: Call the HC exit routines at dccp_v4_destroy_sock (diff) | |
download | linux-331968bd0c1b2437f3ad773cbf55f2e0737bafc0.tar.xz linux-331968bd0c1b2437f3ad773cbf55f2e0737bafc0.zip |
[DCCP]: Initial dccp_poll implementation
Tested with a patched netcat, no horror stories so far 8)
Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | net/dccp/proto.c | 59 |
1 files changed, 58 insertions, 1 deletions
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 2b6db18e607f..600dda51d995 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -140,6 +140,62 @@ int dccp_disconnect(struct sock *sk, int flags) return err; } +/* + * Wait for a DCCP event. + * + * Note that we don't need to lock the socket, as the upper poll layers + * take care of normal races (between the test and the event) and we don't + * go look at any of the socket buffers directly. + */ +static unsigned int dccp_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + unsigned int mask; + struct sock *sk = sock->sk; + + poll_wait(file, sk->sk_sleep, wait); + if (sk->sk_state == DCCP_LISTEN) + return inet_csk_listen_poll(sk); + + /* Socket is not locked. We are protected from async events + by poll logic and correct handling of state changes + made by another threads is impossible in any case. + */ + + mask = 0; + if (sk->sk_err) + mask = POLLERR; + + if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) + mask |= POLLHUP; + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLIN | POLLRDNORM; + + /* Connected? */ + if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { + if (atomic_read(&sk->sk_rmem_alloc) > 0) + mask |= POLLIN | POLLRDNORM; + + if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + mask |= POLLOUT | POLLWRNORM; + } else { /* send SIGIO later */ + set_bit(SOCK_ASYNC_NOSPACE, + &sk->sk_socket->flags); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + + /* Race breaker. If space is freed after + * wspace test but before the flags are set, + * IO signal will be lost. + */ + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) + mask |= POLLOUT | POLLWRNORM; + } + } + } + return mask; +} + int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) { dccp_pr_debug("entry\n"); @@ -478,7 +534,8 @@ static struct proto_ops inet_dccp_ops = { .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, - .poll = sock_no_poll, + /* FIXME: work on tcp_poll to rename it to inet_csk_poll */ + .poll = dccp_poll, .ioctl = inet_ioctl, /* FIXME: work on inet_listen to rename it to sock_common_listen */ .listen = inet_dccp_listen, |