diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 23:34:03 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 23:34:03 +0100 |
commit | a9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch) | |
tree | 415d6e6a82e001c65e6b161539411f54ba5fe8ce /net | |
parent | Merge branch 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/vi... (diff) | |
download | linux-a9a08845e9acbd224e4ee466f5c1275ed50054e8.tar.xz linux-a9a08845e9acbd224e4ee466f5c1275ed50054e8.zip |
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL*
variables as described by Al, done by this script:
for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
done
with de-mangling cleanups yet to come.
NOTE! On almost all architectures, the EPOLL* constants have the same
values as the POLL* constants do. But they keyword here is "almost".
For various bad reasons they aren't the same, and epoll() doesn't
actually work quite correctly in some cases due to this on Sparc et al.
The next patch from Al will sort out the final differences, and we
should be all done.
Scripted-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/9p/trans_fd.c | 26 | ||||
-rw-r--r-- | net/atm/common.c | 8 | ||||
-rw-r--r-- | net/batman-adv/icmp_socket.c | 2 | ||||
-rw-r--r-- | net/batman-adv/log.c | 2 | ||||
-rw-r--r-- | net/bluetooth/af_bluetooth.c | 16 | ||||
-rw-r--r-- | net/caif/caif_socket.c | 12 | ||||
-rw-r--r-- | net/core/datagram.c | 16 | ||||
-rw-r--r-- | net/core/sock.c | 10 | ||||
-rw-r--r-- | net/core/stream.c | 4 | ||||
-rw-r--r-- | net/dccp/proto.c | 12 | ||||
-rw-r--r-- | net/decnet/af_decnet.c | 2 | ||||
-rw-r--r-- | net/ipv4/af_inet.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 34 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 2 | ||||
-rw-r--r-- | net/ipv4/udp.c | 6 | ||||
-rw-r--r-- | net/iucv/af_iucv.c | 18 | ||||
-rw-r--r-- | net/kcm/kcmsock.c | 6 | ||||
-rw-r--r-- | net/nfc/llcp_sock.c | 16 | ||||
-rw-r--r-- | net/packet/af_packet.c | 4 | ||||
-rw-r--r-- | net/phonet/socket.c | 10 | ||||
-rw-r--r-- | net/rds/af_rds.c | 16 | ||||
-rw-r--r-- | net/rfkill/core.c | 4 | ||||
-rw-r--r-- | net/rxrpc/af_rxrpc.c | 4 | ||||
-rw-r--r-- | net/sctp/socket.c | 20 | ||||
-rw-r--r-- | net/smc/af_smc.c | 24 | ||||
-rw-r--r-- | net/smc/smc_rx.c | 4 | ||||
-rw-r--r-- | net/smc/smc_tx.c | 4 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 4 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 6 | ||||
-rw-r--r-- | net/tipc/socket.c | 22 | ||||
-rw-r--r-- | net/unix/af_unix.c | 40 | ||||
-rw-r--r-- | net/vmw_vsock/af_vsock.c | 30 |
32 files changed, 193 insertions, 193 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index d6f7f7cb79c4..0cfba919d167 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -240,7 +240,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err) if (!ts) { if (err) *err = -EREMOTEIO; - return POLLERR; + return EPOLLERR; } if (!ts->rd->f_op->poll) @@ -253,7 +253,7 @@ p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err) n = DEFAULT_POLLMASK; else n = ts->wr->f_op->poll(ts->wr, pt); - ret = (ret & ~POLLOUT) | (n & ~POLLIN); + ret = (ret & ~EPOLLOUT) | (n & ~EPOLLIN); } return ret; @@ -396,11 +396,11 @@ end_clear: if (!list_empty(&m->req_list)) { if (test_and_clear_bit(Rpending, &m->wsched)) - n = POLLIN; + n = EPOLLIN; else n = p9_fd_poll(m->client, NULL, NULL); - if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { + if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); schedule_work(&m->rq); } @@ -505,11 +505,11 @@ end_clear: if (m->wsize || !list_empty(&m->unsent_req_list)) { if (test_and_clear_bit(Wpending, &m->wsched)) - n = POLLOUT; + n = EPOLLOUT; else n = p9_fd_poll(m->client, NULL, NULL); - if ((n & POLLOUT) && + if ((n & EPOLLOUT) && !test_and_set_bit(Wworksched, &m->wsched)) { p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); schedule_work(&m->wq); @@ -599,12 +599,12 @@ static void p9_conn_create(struct p9_client *client) init_poll_funcptr(&m->pt, p9_pollwait); n = p9_fd_poll(client, &m->pt, NULL); - if (n & POLLIN) { + if (n & EPOLLIN) { p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); set_bit(Rpending, &m->wsched); } - if (n & POLLOUT) { + if (n & EPOLLOUT) { p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); set_bit(Wpending, &m->wsched); } @@ -625,12 +625,12 @@ static void p9_poll_mux(struct p9_conn *m) return; n = p9_fd_poll(m->client, NULL, &err); - if (n & (POLLERR | POLLHUP | POLLNVAL)) { + if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) { p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); p9_conn_cancel(m, err); } - if (n & POLLIN) { + if (n & EPOLLIN) { set_bit(Rpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); if (!test_and_set_bit(Rworksched, &m->wsched)) { @@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m) } } - if (n & POLLOUT) { + if (n & EPOLLOUT) { set_bit(Wpending, &m->wsched); p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); if ((m->wsize || !list_empty(&m->unsent_req_list)) && @@ -678,11 +678,11 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) spin_unlock(&client->lock); if (test_and_clear_bit(Wpending, &m->wsched)) - n = POLLOUT; + n = EPOLLOUT; else n = p9_fd_poll(m->client, NULL, NULL); - if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) + if (n & EPOLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) schedule_work(&m->wq); return 0; diff --git a/net/atm/common.c b/net/atm/common.c index 6523f38c4957..fc78a0508ae1 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -661,15 +661,15 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) /* exceptional events */ if (sk->sk_err) - mask = POLLERR; + mask = EPOLLERR; if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags)) - mask |= POLLHUP; + mask |= EPOLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* writable? */ if (sock->state == SS_CONNECTING && @@ -678,7 +678,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait) if (vcc->qos.txtp.traffic_class != ATM_NONE && vcc_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 581375d0eed2..e91f29c7c638 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c @@ -304,7 +304,7 @@ static __poll_t batadv_socket_poll(struct file *file, poll_table *wait) poll_wait(file, &socket_client->queue_wait, wait); if (socket_client->queue_len > 0) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index 9be74a44e99d..dc9fa37ddd14 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -193,7 +193,7 @@ static __poll_t batadv_log_poll(struct file *file, poll_table *wait) poll_wait(file, &debug_log->queue_wait, wait); if (!batadv_log_empty(debug_log)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; return 0; } diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index f897681780db..84d92a077834 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -431,7 +431,7 @@ static inline __poll_t bt_accept_poll(struct sock *parent) if (sk->sk_state == BT_CONNECTED || (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && sk->sk_state == BT_CONNECT2)) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } return 0; @@ -451,20 +451,20 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, return bt_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == BT_CLOSED) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_state == BT_CONNECT || sk->sk_state == BT_CONNECT2 || @@ -472,7 +472,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, return mask; if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index b109445a1df9..a6fb1b3bcad9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -924,7 +924,7 @@ static int caif_release(struct socket *sock) caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; - wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); + wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP); sock_orphan(sk); sk_stream_kill_queues(&cf_sk->sk); @@ -946,23 +946,23 @@ static __poll_t caif_poll(struct file *file, /* exceptional events? */ if (sk->sk_err) - mask |= POLLERR; + mask |= EPOLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP; + mask |= EPOLLRDHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } diff --git a/net/core/datagram.c b/net/core/datagram.c index b7d9293940b5..9938952c5c78 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -75,7 +75,7 @@ static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, i /* * Avoid a wakeup if event not interesting for us */ - if (key && !(key_to_poll(key) & (POLLIN | POLLERR))) + if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR))) return 0; return autoremove_wake_function(wait, mode, sync, key); } @@ -842,22 +842,22 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ if (connection_based(sk)) { if (sk->sk_state == TCP_CLOSE) - mask |= POLLHUP; + mask |= EPOLLHUP; /* connection hasn't started yet? */ if (sk->sk_state == TCP_SYN_SENT) return mask; @@ -865,7 +865,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock, /* writable? */ if (sock_writeable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); diff --git a/net/core/sock.c b/net/core/sock.c index b026e1717df4..c501499a04fe 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2619,7 +2619,7 @@ static void sock_def_error_report(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_poll(&wq->wait, POLLERR); + wake_up_interruptible_poll(&wq->wait, EPOLLERR); sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); rcu_read_unlock(); } @@ -2631,8 +2631,8 @@ static void sock_def_readable(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | - POLLRDNORM | POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | + EPOLLRDNORM | EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } @@ -2649,8 +2649,8 @@ static void sock_def_write_space(struct sock *sk) if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLWRNORM | POLLWRBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) diff --git a/net/core/stream.c b/net/core/stream.c index 1cff9c6270c6..7d329fb1f553 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -38,8 +38,8 @@ void sk_stream_write_space(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_poll(&wq->wait, POLLOUT | - POLLWRNORM | POLLWRBAND); + wake_up_interruptible_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 74685fecfdb9..15bdc002d90c 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -338,21 +338,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, mask = 0; if (sk->sk_err) - mask = POLLERR; + mask = EPOLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLIN | POLLRDNORM | POLLRDHUP; + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); @@ -362,7 +362,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, * IO signal will be lost. */ if (sk_stream_is_writeable(sk)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } } } diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index cc1b505453a8..91dd09f79808 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1216,7 +1216,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wai __poll_t mask = datagram_poll(file, sock, wait); if (!skb_queue_empty(&scp->other_receive_queue)) - mask |= POLLRDBAND; + mask |= EPOLLRDBAND; return mask; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c24008daa3d8..e4329e161943 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -828,7 +828,7 @@ int inet_shutdown(struct socket *sock, int how) case TCP_CLOSE: err = -ENOTCONN; /* Hack to wake up other listeners, who can poll for - POLLHUP, even on eg. unconnected UDP sockets -- RR */ + EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ /* fall through */ default: sk->sk_shutdown |= how; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c059aa7df0a9..48636aee23c3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -512,36 +512,36 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) mask = 0; /* - * POLLHUP is certainly not done right. But poll() doesn't + * EPOLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a * socket the read side is more interesting. * - * Some poll() documentation says that POLLHUP is incompatible - * with the POLLOUT/POLLWR flags, so somebody should check this + * Some poll() documentation says that EPOLLHUP is incompatible + * with the EPOLLOUT/POLLWR flags, so somebody should check this * all. But careful, it tends to be safer to return too many * bits than too few, and you can easily break real applications * if you don't tell them that something has hung up! * * Check-me. * - * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and + * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and * our fs/select.c). It means that after we received EOF, * poll always returns immediately, making impossible poll() on write() - * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP + * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP * if and only if shutdown has been made in both directions. * Actually, it is interesting to look how Solaris and DUX - * solve this dilemma. I would prefer, if POLLHUP were maskable, + * solve this dilemma. I would prefer, if EPOLLHUP were maskable, * then we could set it on SND_SHUTDOWN. BTW examples given * in Stevens' books assume exactly this behaviour, it explains - * why POLLHUP is incompatible with POLLOUT. --ANK + * why EPOLLHUP is incompatible with EPOLLOUT. --ANK * * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLIN | POLLRDNORM | POLLRDHUP; + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected or passive Fast Open socket? */ if (state != TCP_SYN_SENT && @@ -554,11 +554,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) target++; if (tp->rcv_nxt - tp->copied_seq >= target) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); @@ -570,24 +570,24 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) */ smp_mb__after_atomic(); if (sk_stream_is_writeable(sk)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } } else - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; if (tp->urg_data & TCP_URG_VALID) - mask |= POLLPRI; + mask |= EPOLLPRI; } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect - * Return POLLOUT so application can call write() + * Return EPOLLOUT so application can call write() * in order for kernel to generate SYN+data */ - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } /* This barrier is coupled with smp_wmb() in tcp_reset() */ smp_rmb(); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR; + mask |= EPOLLERR; return mask; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cfa51cfd2d99..575d3c1fb6e8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -315,7 +315,7 @@ static void tcp_sndbuf_expand(struct sock *sk) /* Fast Recovery (RFC 5681 3.2) : * Cubic needs 1.7 factor, rounded to 2 to include - * extra cushion (application might react slowly to POLLOUT) + * extra cushion (application might react slowly to EPOLLOUT) */ sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; sndmem *= nr_segs * per_mss; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f81f969f9c06..bfaefe560b5c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2501,12 +2501,12 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) struct sock *sk = sock->sk; if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* Check for false positives due to checksum errors */ - if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && + if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) - mask &= ~(POLLIN | POLLRDNORM); + mask &= ~(EPOLLIN | EPOLLRDNORM); return mask; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 64331158d693..1e8cc7bcbca3 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1483,7 +1483,7 @@ static inline __poll_t iucv_accept_poll(struct sock *parent) sk = (struct sock *) isk; if (sk->sk_state == IUCV_CONNECTED) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } return 0; @@ -1501,27 +1501,27 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock, return iucv_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP; + mask |= EPOLLRDHUP; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == IUCV_CLOSED) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_state == IUCV_DISCONN) - mask |= POLLIN; + mask |= EPOLLIN; if (sock_writeable(sk) && iucv_below_msglim(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 4a8d407f8902..f297d53a11aa 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -396,8 +396,8 @@ static int kcm_read_sock_done(struct strparser *strp, int err) static void psock_state_change(struct sock *sk) { - /* TCP only does a POLLIN for a half close. Do a POLLHUP here - * since application will normally not poll with POLLIN + /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here + * since application will normally not poll with EPOLLIN * on the TCP sockets. */ @@ -1338,7 +1338,7 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so * we set sk_state, otherwise epoll_wait always returns right away with - * POLLHUP + * EPOLLHUP */ kcm->sk.sk_state = TCP_ESTABLISHED; diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 985909f105eb..376040092142 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -543,7 +543,7 @@ static inline __poll_t llcp_accept_poll(struct sock *parent) sk = &llcp_sock->sk; if (sk->sk_state == LLCP_CONNECTED) - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } return 0; @@ -563,23 +563,23 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, return llcp_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == LLCP_CLOSED) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1d1483007e46..e0f3f4aeeb4f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4085,7 +4085,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock, if (po->rx_ring.pg_vec) { if (!packet_previous_rx_frame(po, &po->rx_ring, TP_STATUS_KERNEL)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL) po->pressure = 0; @@ -4093,7 +4093,7 @@ static __poll_t packet_poll(struct file *file, struct socket *sock, spin_lock_bh(&sk->sk_write_queue.lock); if (po->tx_ring.pg_vec) { if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } spin_unlock_bh(&sk->sk_write_queue.lock); return mask; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 08f6751d2030..fffcd69f63ff 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -351,18 +351,18 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock, poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == TCP_CLOSE) - return POLLERR; + return EPOLLERR; if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (!skb_queue_empty(&pn->ctrlreq_queue)) - mask |= POLLPRI; + mask |= EPOLLPRI; if (!mask && sk->sk_state == TCP_CLOSE_WAIT) - return POLLHUP; + return EPOLLHUP; if (sk->sk_state == TCP_ESTABLISHED && refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf && atomic_read(&pn->tx_credits)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 88aa8ad0f5b6..744c637c86b0 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -137,17 +137,17 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr, /* * RDS' poll is without a doubt the least intuitive part of the interface, - * as POLLIN and POLLOUT do not behave entirely as you would expect from + * as EPOLLIN and EPOLLOUT do not behave entirely as you would expect from * a network protocol. * - * POLLIN is asserted if + * EPOLLIN is asserted if * - there is data on the receive queue. * - to signal that a previously congested destination may have become * uncongested * - A notification has been queued to the socket (this can be a congestion * update, or a RDMA completion). * - * POLLOUT is asserted if there is room on the send queue. This does not mean + * EPOLLOUT is asserted if there is room on the send queue. This does not mean * however, that the next sendmsg() call will succeed. If the application tries * to send to a congested destination, the system call may still fail (and * return ENOBUFS). @@ -167,22 +167,22 @@ static __poll_t rds_poll(struct file *file, struct socket *sock, read_lock_irqsave(&rs->rs_recv_lock, flags); if (!rs->rs_cong_monitor) { - /* When a congestion map was updated, we signal POLLIN for + /* When a congestion map was updated, we signal EPOLLIN for * "historical" reasons. Applications can also poll for * WRBAND instead. */ if (rds_cong_updated_since(&rs->rs_cong_track)) - mask |= (POLLIN | POLLRDNORM | POLLWRBAND); + mask |= (EPOLLIN | EPOLLRDNORM | EPOLLWRBAND); } else { spin_lock(&rs->rs_lock); if (rs->rs_cong_notify) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); spin_unlock(&rs->rs_lock); } if (!list_empty(&rs->rs_recv_queue) || !list_empty(&rs->rs_notify_queue)) - mask |= (POLLIN | POLLRDNORM); + mask |= (EPOLLIN | EPOLLRDNORM); if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) - mask |= (POLLOUT | POLLWRNORM); + mask |= (EPOLLOUT | EPOLLWRNORM); read_unlock_irqrestore(&rs->rs_recv_lock, flags); /* clear state any time we wake a seen-congested socket */ diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 124c77e9d058..59d0eb960275 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1142,13 +1142,13 @@ static int rfkill_fop_open(struct inode *inode, struct file *file) static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait) { struct rfkill_data *data = file->private_data; - __poll_t res = POLLOUT | POLLWRNORM; + __poll_t res = EPOLLOUT | EPOLLWRNORM; poll_wait(file, &data->read_wait, wait); mutex_lock(&data->mtx); if (!list_empty(&data->events)) - res = POLLIN | POLLRDNORM; + res = EPOLLIN | EPOLLRDNORM; mutex_unlock(&data->mtx); return res; diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 21ad6a3a465c..0c9c18aa7c77 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -742,13 +742,13 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock, /* the socket is readable if there are any messages waiting on the Rx * queue */ if (!list_empty(&rx->recvmsg_q)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* the socket is writable if there is space to add new data to the * socket; there is no guarantee that any particular call in progress * on the socket may have space in the Tx ACK window */ if (rxrpc_writable(sk)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; return mask; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ebb8cb9eb0bd..bf271f8c2dc9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7602,22 +7602,22 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) return (!list_empty(&sp->ep->asocs)) ? - (POLLIN | POLLRDNORM) : 0; + (EPOLLIN | EPOLLRDNORM) : 0; mask = 0; /* Is there any exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; /* Is it readable? Reconsider this code with TCP-style support. */ if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* The association is either gone or not ready. */ if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) @@ -7625,7 +7625,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) /* Is it writable? */ if (sctp_writeable(sk)) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); /* @@ -7637,7 +7637,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait) * in the following code to cover it as well. */ if (sctp_writeable(sk)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } return mask; } @@ -8161,8 +8161,8 @@ void sctp_data_ready(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | + EPOLLRDNORM | EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index ba4b84debc5a..da1a5cdefd13 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1145,7 +1145,7 @@ static __poll_t smc_accept_poll(struct sock *parent) spin_lock(&isk->accept_q_lock); if (!list_empty(&isk->accept_q)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&isk->accept_q_lock); return mask; @@ -1160,7 +1160,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, int rc; if (!sk) - return POLLNVAL; + return EPOLLNVAL; smc = smc_sk(sock->sk); sock_hold(sk); @@ -1171,16 +1171,16 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); /* if non-blocking connect finished ... */ lock_sock(sk); - if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) { + if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { sk->sk_err = smc->clcsock->sk->sk_err; if (sk->sk_err) { - mask |= POLLERR; + mask |= EPOLLERR; } else { rc = smc_connect_rdma(smc); if (rc < 0) - mask |= POLLERR; + mask |= EPOLLERR; /* success cases including fallback */ - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } } } else { @@ -1190,27 +1190,27 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, lock_sock(sk); } if (sk->sk_err) - mask |= POLLERR; + mask |= EPOLLERR; if ((sk->sk_shutdown == SHUTDOWN_MASK) || (sk->sk_state == SMC_CLOSED)) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_state == SMC_LISTEN) { /* woken up by sk_data_ready in smc_listen_work() */ mask = smc_accept_poll(sk); } else { if (atomic_read(&smc->conn.sndbuf_space) || sk->sk_shutdown & SEND_SHUTDOWN) { - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); } if (atomic_read(&smc->conn.bytes_to_rcv)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLIN | POLLRDNORM | POLLRDHUP; + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; if (sk->sk_state == SMC_APPCLOSEWAIT1) - mask |= POLLIN; + mask |= EPOLLIN; } } diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 9dc392ca06bf..eff4e0d0bb31 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -35,8 +35,8 @@ static void smc_rx_data_ready(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | - POLLRDNORM | POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | + EPOLLRDNORM | EPOLLRDBAND); sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); if ((sk->sk_shutdown == SHUTDOWN_MASK) || (sk->sk_state == SMC_CLOSED)) diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 838bce20c361..72f004c9c9b1 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -46,8 +46,8 @@ static void smc_tx_write_space(struct sock *sk) wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, - POLLOUT | POLLWRNORM | - POLLWRBAND); + EPOLLOUT | EPOLLWRNORM | + EPOLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index aa36dad32db1..8a7e1c774f9c 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -940,7 +940,7 @@ static __poll_t cache_poll(struct file *filp, poll_table *wait, poll_wait(filp, &queue_wait, wait); /* alway allow write */ - mask = POLLOUT | POLLWRNORM; + mask = EPOLLOUT | EPOLLWRNORM; if (!rp) return mask; @@ -950,7 +950,7 @@ static __poll_t cache_poll(struct file *filp, poll_table *wait, for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; break; } spin_unlock(&queue_lock); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 5c4330325787..fc97fc3ed637 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -345,15 +345,15 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) { struct inode *inode = file_inode(filp); struct rpc_inode *rpci = RPC_I(inode); - __poll_t mask = POLLOUT | POLLWRNORM; + __poll_t mask = EPOLLOUT | EPOLLWRNORM; poll_wait(filp, &rpci->waitq, wait); inode_lock(inode); if (rpci->pipe == NULL) - mask |= POLLERR | POLLHUP; + mask |= EPOLLERR | EPOLLHUP; else if (filp->private_data || !list_empty(&rpci->pipe->pipe)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; inode_unlock(inode); return mask; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 163f3a547501..b0323ec7971e 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -721,31 +721,31 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock, sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_shutdown & RCV_SHUTDOWN) - revents |= POLLRDHUP | POLLIN | POLLRDNORM; + revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - revents |= POLLHUP; + revents |= EPOLLHUP; switch (sk->sk_state) { case TIPC_ESTABLISHED: case TIPC_CONNECTING: if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) - revents |= POLLOUT; + revents |= EPOLLOUT; /* fall thru' */ case TIPC_LISTEN: if (!skb_queue_empty(&sk->sk_receive_queue)) - revents |= POLLIN | POLLRDNORM; + revents |= EPOLLIN | EPOLLRDNORM; break; case TIPC_OPEN: if (tsk->group_is_open && !tsk->cong_link_cnt) - revents |= POLLOUT; + revents |= EPOLLOUT; if (!tipc_sk_type_connectionless(sk)) break; if (skb_queue_empty(&sk->sk_receive_queue)) break; - revents |= POLLIN | POLLRDNORM; + revents |= EPOLLIN | EPOLLRDNORM; break; case TIPC_DISCONNECTING: - revents = POLLIN | POLLRDNORM | POLLHUP; + revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP; break; } return revents; @@ -1897,8 +1897,8 @@ static void tipc_write_space(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | - POLLWRNORM | POLLWRBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | + EPOLLWRNORM | EPOLLWRBAND); rcu_read_unlock(); } @@ -1914,8 +1914,8 @@ static void tipc_data_ready(struct sock *sk) rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) - wake_up_interruptible_sync_poll(&wq->wait, POLLIN | - POLLRDNORM | POLLRDBAND); + wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | + EPOLLRDNORM | EPOLLRDBAND); rcu_read_unlock(); } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0214acbd6bff..d545e1d0dea2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -415,9 +415,9 @@ static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, { unix_dgram_peer_wake_disconnect(sk, other); wake_up_interruptible_poll(sk_sleep(sk), - POLLOUT | - POLLWRNORM | - POLLWRBAND); + EPOLLOUT | + EPOLLWRNORM | + EPOLLWRBAND); } /* preconditions: @@ -454,7 +454,7 @@ static void unix_write_space(struct sock *sk) wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, - POLLOUT | POLLWRNORM | POLLWRBAND); + EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); @@ -2129,8 +2129,8 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, if (wq_has_sleeper(&u->peer_wait)) wake_up_interruptible_sync_poll(&u->peer_wait, - POLLOUT | POLLWRNORM | - POLLWRBAND); + EPOLLOUT | EPOLLWRNORM | + EPOLLWRBAND); if (msg->msg_name) unix_copy_addr(msg, skb->sk); @@ -2650,27 +2650,27 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa /* exceptional events? */ if (sk->sk_err) - mask |= POLLERR; + mask |= EPOLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE) - mask |= POLLHUP; + mask |= EPOLLHUP; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (unix_writable(sk)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } @@ -2687,29 +2687,29 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, /* exceptional events? */ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) - mask |= POLLERR | - (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= POLLRDHUP | POLLIN | POLLRDNORM; + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) - mask |= POLLHUP; + mask |= EPOLLHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* Connection-based need to check for termination and startup */ if (sk->sk_type == SOCK_SEQPACKET) { if (sk->sk_state == TCP_CLOSE) - mask |= POLLHUP; + mask |= EPOLLHUP; /* connection hasn't started yet? */ if (sk->sk_state == TCP_SYN_SENT) return mask; } /* No write status requested, avoid expensive OUT tests. */ - if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT))) + if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT))) return mask; writable = unix_writable(sk); @@ -2726,7 +2726,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, } if (writable) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 9d95e773f4c8..e0fc84daed94 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -865,20 +865,20 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, if (sk->sk_err) /* Signify that there has been an error on this socket. */ - mask |= POLLERR; + mask |= EPOLLERR; /* INET sockets treat local write shutdown and peer write shutdown as a - * case of POLLHUP set. + * case of EPOLLHUP set. */ if ((sk->sk_shutdown == SHUTDOWN_MASK) || ((sk->sk_shutdown & SEND_SHUTDOWN) && (vsk->peer_shutdown & SEND_SHUTDOWN))) { - mask |= POLLHUP; + mask |= EPOLLHUP; } if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { - mask |= POLLRDHUP; + mask |= EPOLLRDHUP; } if (sock->type == SOCK_DGRAM) { @@ -888,11 +888,11 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } if (!(sk->sk_shutdown & SEND_SHUTDOWN)) - mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; } else if (sock->type == SOCK_STREAM) { lock_sock(sk); @@ -902,7 +902,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, */ if (sk->sk_state == TCP_LISTEN && !vsock_is_accept_queue_empty(sk)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; /* If there is something in the queue then we can read. */ if (transport->stream_is_active(vsk) && @@ -911,10 +911,10 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, int ret = transport->notify_poll_in( vsk, 1, &data_ready_now); if (ret < 0) { - mask |= POLLERR; + mask |= EPOLLERR; } else { if (data_ready_now) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } } @@ -925,7 +925,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, */ if (sk->sk_shutdown & RCV_SHUTDOWN || vsk->peer_shutdown & SEND_SHUTDOWN) { - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; } /* Connected sockets that can produce data can be written. */ @@ -935,25 +935,25 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, int ret = transport->notify_poll_out( vsk, 1, &space_avail_now); if (ret < 0) { - mask |= POLLERR; + mask |= EPOLLERR; } else { if (space_avail_now) - /* Remove POLLWRBAND since INET + /* Remove EPOLLWRBAND since INET * sockets are not setting it. */ - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } } } /* Simulate INET socket poll behaviors, which sets - * POLLOUT|POLLWRNORM when peer is closed and nothing to read, + * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read, * but local send is not shutdown. */ if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) { if (!(sk->sk_shutdown & SEND_SHUTDOWN)) - mask |= POLLOUT | POLLWRNORM; + mask |= EPOLLOUT | EPOLLWRNORM; } |