diff options
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 89 |
1 files changed, 46 insertions, 43 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index afd548ebc328..0f3494ed3ed0 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -95,9 +95,9 @@ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) -#define EPOLLINOUT_BITS (POLLIN | POLLOUT) +#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT) -#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \ +#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \ EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE) /* Maximum number of nesting allowed inside epoll sets */ @@ -260,6 +260,7 @@ struct ep_pqueue { struct ep_send_events_data { int maxevents; struct epoll_event __user *events; + int res; }; /* @@ -554,7 +555,7 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie; spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1); - wake_up_locked_poll(wqueue, POLLIN); + wake_up_locked_poll(wqueue, EPOLLIN); spin_unlock_irqrestore(&wqueue->lock, flags); return 0; @@ -574,7 +575,7 @@ static void ep_poll_safewake(wait_queue_head_t *wq) static void ep_poll_safewake(wait_queue_head_t *wq) { - wake_up_poll(wq, POLLIN); + wake_up_poll(wq, EPOLLIN); } #endif @@ -660,12 +661,13 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi) * * Returns: The same integer error code returned by the @sproc callback. */ -static int ep_scan_ready_list(struct eventpoll *ep, - int (*sproc)(struct eventpoll *, +static __poll_t ep_scan_ready_list(struct eventpoll *ep, + __poll_t (*sproc)(struct eventpoll *, struct list_head *, void *), void *priv, int depth, bool ep_locked) { - int error, pwake = 0; + __poll_t res; + int pwake = 0; unsigned long flags; struct epitem *epi, *nepi; LIST_HEAD(txlist); @@ -694,7 +696,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, /* * Now call the callback function. */ - error = (*sproc)(ep, &txlist, priv); + res = (*sproc)(ep, &txlist, priv); spin_lock_irqsave(&ep->lock, flags); /* @@ -747,7 +749,7 @@ static int ep_scan_ready_list(struct eventpoll *ep, if (pwake) ep_poll_safewake(&ep->poll_wait); - return error; + return res; } static void epi_rcu_free(struct rcu_head *head) @@ -864,7 +866,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file) return 0; } -static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, +static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv); static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt); @@ -874,7 +876,8 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested() * is correctly annotated. */ -static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth) +static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, + int depth) { struct eventpoll *ep; bool locked; @@ -893,7 +896,7 @@ static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth) locked) & epi->event.events; } -static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, +static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct epitem *epi, *tmp; @@ -905,7 +908,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, list_for_each_entry_safe(epi, tmp, head, rdllink) { if (ep_item_poll(epi, &pt, depth)) { - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } else { /* * Item has been dropped into the ready list by the poll @@ -920,7 +923,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, return 0; } -static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) +static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) { struct eventpoll *ep = file->private_data; int depth = 0; @@ -1117,6 +1120,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v unsigned long flags; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; + __poll_t pollflags = key_to_poll(key); int ewake = 0; spin_lock_irqsave(&ep->lock, flags); @@ -1138,7 +1142,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v * callback. We need to be able to handle both cases here, hence the * test for "key" != NULL before the event match test. */ - if (key && !((unsigned long) key & epi->event.events)) + if (pollflags && !(pollflags & epi->event.events)) goto out_unlock; /* @@ -1175,14 +1179,14 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v */ if (waitqueue_active(&ep->wq)) { if ((epi->event.events & EPOLLEXCLUSIVE) && - !((unsigned long)key & POLLFREE)) { - switch ((unsigned long)key & EPOLLINOUT_BITS) { - case POLLIN: - if (epi->event.events & POLLIN) + !(pollflags & POLLFREE)) { + switch (pollflags & EPOLLINOUT_BITS) { + case EPOLLIN: + if (epi->event.events & EPOLLIN) ewake = 1; break; - case POLLOUT: - if (epi->event.events & POLLOUT) + case EPOLLOUT: + if (epi->event.events & EPOLLOUT) ewake = 1; break; case 0: @@ -1205,7 +1209,7 @@ out_unlock: if (!(epi->event.events & EPOLLEXCLUSIVE)) ewake = 1; - if ((unsigned long)key & POLLFREE) { + if (pollflags & POLLFREE) { /* * If we race with ep_remove_wait_queue() it can miss * ->whead = NULL and do another remove_wait_queue() after @@ -1409,10 +1413,11 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi) /* * Must be called with "mtx" held. */ -static int ep_insert(struct eventpoll *ep, struct epoll_event *event, +static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, struct file *tfile, int fd, int full_check) { - int error, revents, pwake = 0; + int error, pwake = 0; + __poll_t revents; unsigned long flags; long user_watches; struct epitem *epi; @@ -1486,7 +1491,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, ep_set_busy_poll_napi_id(epi); /* If the file is already "ready" we drop it inside the ready list */ - if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { + if (revents && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); ep_pm_stay_awake(epi); @@ -1540,10 +1545,10 @@ error_create_wakeup_source: * Modify the interest event mask by dropping an event if the new mask * has a match in the current file status. Must be called with "mtx" held. */ -static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) +static int ep_modify(struct eventpoll *ep, struct epitem *epi, + const struct epoll_event *event) { int pwake = 0; - unsigned int revents; poll_table pt; init_poll_funcptr(&pt, NULL); @@ -1585,14 +1590,10 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. - */ - revents = ep_item_poll(epi, &pt, 1); - - /* * If the item is "hot" and it is not registered inside the ready * list, push it inside. */ - if (revents & event->events) { + if (ep_item_poll(epi, &pt, 1)) { spin_lock_irq(&ep->lock); if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); @@ -1614,12 +1615,11 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even return 0; } -static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, +static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct ep_send_events_data *esed = priv; - int eventcnt; - unsigned int revents; + __poll_t revents; struct epitem *epi; struct epoll_event __user *uevent; struct wakeup_source *ws; @@ -1632,8 +1632,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, * Items cannot vanish during the loop because ep_scan_ready_list() is * holding "mtx" during this call. */ - for (eventcnt = 0, uevent = esed->events; - !list_empty(head) && eventcnt < esed->maxevents;) { + for (esed->res = 0, uevent = esed->events; + !list_empty(head) && esed->res < esed->maxevents;) { epi = list_first_entry(head, struct epitem, rdllink); /* @@ -1667,9 +1667,11 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, __put_user(epi->event.data, &uevent->data)) { list_add(&epi->rdllink, head); ep_pm_stay_awake(epi); - return eventcnt ? eventcnt : -EFAULT; + if (!esed->res) + esed->res = -EFAULT; + return 0; } - eventcnt++; + esed->res++; uevent++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; @@ -1691,7 +1693,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, } } - return eventcnt; + return 0; } static int ep_send_events(struct eventpoll *ep, @@ -1702,7 +1704,8 @@ static int ep_send_events(struct eventpoll *ep, esed.maxevents = maxevents; esed.events = events; - return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); + ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); + return esed.res; } static inline struct timespec64 ep_set_mstimeout(long ms) @@ -2102,7 +2105,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, switch (op) { case EPOLL_CTL_ADD: if (!epi) { - epds.events |= POLLERR | POLLHUP; + epds.events |= EPOLLERR | EPOLLHUP; error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; @@ -2118,7 +2121,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, case EPOLL_CTL_MOD: if (epi) { if (!(epi->event.events & EPOLLEXCLUSIVE)) { - epds.events |= POLLERR | POLLHUP; + epds.events |= EPOLLERR | EPOLLHUP; error = ep_modify(ep, epi, &epds); } } else |