diff options
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 60 |
1 files changed, 48 insertions, 12 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ff12f7ac73ef..f9cfd168fbe2 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -181,7 +181,7 @@ struct eventpoll { /* * This is a single linked list that chains all the "struct epitem" that - * happened while transfering ready events to userspace w/out + * happened while transferring ready events to userspace w/out * holding ->lock. */ struct epitem *ovflist; @@ -316,6 +316,19 @@ static void ep_nested_calls_init(struct nested_calls *ncalls) } /** + * ep_events_available - Checks if ready events might be available. + * + * @ep: Pointer to the eventpoll context. + * + * Returns: Returns a value different than zero if ready events are available, + * or zero otherwise. + */ +static inline int ep_events_available(struct eventpoll *ep) +{ + return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; +} + +/** * ep_call_nested - Perform a bound (possibly) nested call, by checking * that the recursion limit is not exceeded, and that * the same nested call (by the meaning of same cookie) is @@ -593,7 +606,7 @@ static void ep_free(struct eventpoll *ep) * We do not need to hold "ep->mtx" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but - * holding "epmutex" is sufficent here. + * holding "epmutex" is sufficient here. */ mutex_lock(&epmutex); @@ -707,7 +720,7 @@ void eventpoll_release_file(struct file *file) /* * We don't want to get "file->f_lock" because it is not * necessary. It is not necessary because we're in the "struct file" - * cleanup path, and this means that noone is using this file anymore. + * cleanup path, and this means that no one is using this file anymore. * So, for example, epoll_ctl() cannot hit here since if we reach this * point, the file counter already went to zero and fget() would fail. * The only hit might come from ep_free() but by holding the mutex @@ -1099,7 +1112,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, * Trigger mode, we need to insert back inside * the ready list, so that the next call to * epoll_wait() will check again the events - * availability. At this point, noone can insert + * availability. At this point, no one can insert * into ep->rdllist besides us. The epoll_ctl() * callers are locked out by * ep_scan_ready_list() holding "mtx" and the @@ -1135,12 +1148,29 @@ static inline struct timespec ep_set_mstimeout(long ms) return timespec_add_safe(now, ts); } +/** + * ep_poll - Retrieves ready events, and delivers them to the caller supplied + * event buffer. + * + * @ep: Pointer to the eventpoll context. + * @events: Pointer to the userspace buffer where the ready events should be + * stored. + * @maxevents: Size (in terms of number of events) of the caller event buffer. + * @timeout: Maximum timeout for the ready events fetch operation, in + * milliseconds. If the @timeout is zero, the function will not block, + * while if the @timeout is less than zero, the function will block + * until at least one event has been retrieved (or an error + * occurred). + * + * Returns: Returns the number of ready events which have been fetched, or an + * error code, in case of error. + */ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { - int res, eavail, timed_out = 0; + int res = 0, eavail, timed_out = 0; unsigned long flags; - long slack; + long slack = 0; wait_queue_t wait; ktime_t expires, *to = NULL; @@ -1151,14 +1181,19 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, to = &expires; *to = timespec_to_ktime(end_time); } else if (timeout == 0) { + /* + * Avoid the unnecessary trip to the wait queue loop, if the + * caller specified a non blocking operation. + */ timed_out = 1; + spin_lock_irqsave(&ep->lock, flags); + goto check_events; } -retry: +fetch_events: spin_lock_irqsave(&ep->lock, flags); - res = 0; - if (list_empty(&ep->rdllist)) { + if (!ep_events_available(ep)) { /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by @@ -1174,7 +1209,7 @@ retry: * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); - if (!list_empty(&ep->rdllist) || timed_out) + if (ep_events_available(ep) || timed_out) break; if (signal_pending(current)) { res = -EINTR; @@ -1191,8 +1226,9 @@ retry: set_current_state(TASK_RUNNING); } +check_events: /* Is it worth to try to dig for events ? */ - eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; + eavail = ep_events_available(ep); spin_unlock_irqrestore(&ep->lock, flags); @@ -1203,7 +1239,7 @@ retry: */ if (!res && eavail && !(res = ep_send_events(ep, events, maxevents)) && !timed_out) - goto retry; + goto fetch_events; return res; } |